summaryrefslogtreecommitdiffstats
path: root/src/spdk/ocf
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/ocf')
-rw-r--r--src/spdk/ocf/.pep8speaks.yml13
-rw-r--r--src/spdk/ocf/LICENSE13
-rw-r--r--src/spdk/ocf/Makefile137
-rw-r--r--src/spdk/ocf/README.md121
-rw-r--r--src/spdk/ocf/codecov.yml25
-rw-r--r--src/spdk/ocf/doc/.gitignore1
-rw-r--r--src/spdk/ocf/doc/HOME.md292
-rw-r--r--src/spdk/ocf/doc/doxygen.cfg329
-rw-r--r--src/spdk/ocf/doc/img/deployment-1.pngbin0 -> 18524 bytes
-rw-r--r--src/spdk/ocf/doc/img/deployment-2.pngbin0 -> 19038 bytes
-rw-r--r--src/spdk/ocf/doc/img/io-path.pngbin0 -> 66489 bytes
-rw-r--r--src/spdk/ocf/env/posix/ocf_env.c192
-rw-r--r--src/spdk/ocf/env/posix/ocf_env.h642
-rw-r--r--src/spdk/ocf/env/posix/ocf_env_headers.h22
-rw-r--r--src/spdk/ocf/env/posix/ocf_env_list.h168
-rw-r--r--src/spdk/ocf/example/simple/Makefile37
-rw-r--r--src/spdk/ocf/example/simple/src/ctx.c303
-rw-r--r--src/spdk/ocf/example/simple/src/ctx.h19
-rw-r--r--src/spdk/ocf/example/simple/src/data.h14
-rw-r--r--src/spdk/ocf/example/simple/src/main.c380
-rw-r--r--src/spdk/ocf/example/simple/src/volume.c168
-rw-r--r--src/spdk/ocf/example/simple/src/volume.h27
-rw-r--r--src/spdk/ocf/inc/cleaning/acp.h49
-rw-r--r--src/spdk/ocf/inc/cleaning/alru.h74
-rw-r--r--src/spdk/ocf/inc/ocf.h37
-rw-r--r--src/spdk/ocf/inc/ocf_cache.h240
-rw-r--r--src/spdk/ocf/inc/ocf_cfg.h36
-rw-r--r--src/spdk/ocf/inc/ocf_cleaner.h67
-rw-r--r--src/spdk/ocf/inc/ocf_core.h251
-rw-r--r--src/spdk/ocf/inc/ocf_ctx.h310
-rw-r--r--src/spdk/ocf/inc/ocf_def.h357
-rw-r--r--src/spdk/ocf/inc/ocf_err.h130
-rw-r--r--src/spdk/ocf/inc/ocf_io.h238
-rw-r--r--src/spdk/ocf/inc/ocf_io_class.h109
-rw-r--r--src/spdk/ocf/inc/ocf_logger.h44
-rw-r--r--src/spdk/ocf/inc/ocf_metadata.h143
-rw-r--r--src/spdk/ocf/inc/ocf_metadata_updater.h50
-rw-r--r--src/spdk/ocf/inc/ocf_mngt.h1107
-rw-r--r--src/spdk/ocf/inc/ocf_queue.h129
-rw-r--r--src/spdk/ocf/inc/ocf_stats.h239
-rw-r--r--src/spdk/ocf/inc/ocf_trace.h185
-rw-r--r--src/spdk/ocf/inc/ocf_types.h95
-rw-r--r--src/spdk/ocf/inc/ocf_volume.h338
-rw-r--r--src/spdk/ocf/inc/promotion/nhit.h23
-rw-r--r--src/spdk/ocf/src/cleaning/acp.c738
-rw-r--r--src/spdk/ocf/src/cleaning/acp.h43
-rw-r--r--src/spdk/ocf/src/cleaning/acp_structs.h23
-rw-r--r--src/spdk/ocf/src/cleaning/alru.c845
-rw-r--r--src/spdk/ocf/src/cleaning/alru.h30
-rw-r--r--src/spdk/ocf/src/cleaning/alru_structs.h32
-rw-r--r--src/spdk/ocf/src/cleaning/cleaning.c156
-rw-r--r--src/spdk/ocf/src/cleaning/cleaning.h75
-rw-r--r--src/spdk/ocf/src/cleaning/cleaning_priv.h19
-rw-r--r--src/spdk/ocf/src/cleaning/nop.c13
-rw-r--r--src/spdk/ocf/src/cleaning/nop.h14
-rw-r--r--src/spdk/ocf/src/cleaning/nop_structs.h15
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.c1159
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.h164
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_concurrency.c24
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_concurrency.h43
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.c388
-rw-r--r--src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.h137
-rw-r--r--src/spdk/ocf/src/engine/cache_engine.c350
-rw-r--r--src/spdk/ocf/src/engine/cache_engine.h88
-rw-r--r--src/spdk/ocf/src/engine/engine_bf.c102
-rw-r--r--src/spdk/ocf/src/engine/engine_bf.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_common.c697
-rw-r--r--src/spdk/ocf/src/engine/engine_common.h284
-rw-r--r--src/spdk/ocf/src/engine/engine_d2c.c58
-rw-r--r--src/spdk/ocf/src/engine/engine_d2c.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_debug.h48
-rw-r--r--src/spdk/ocf/src/engine/engine_discard.c284
-rw-r--r--src/spdk/ocf/src/engine/engine_discard.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_fast.c228
-rw-r--r--src/spdk/ocf/src/engine/engine_fast.h12
-rw-r--r--src/spdk/ocf/src/engine/engine_inv.c71
-rw-r--r--src/spdk/ocf/src/engine/engine_inv.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_ops.c63
-rw-r--r--src/spdk/ocf/src/engine/engine_ops.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_pt.c172
-rw-r--r--src/spdk/ocf/src/engine/engine_pt.h15
-rw-r--r--src/spdk/ocf/src/engine/engine_rd.c270
-rw-r--r--src/spdk/ocf/src/engine/engine_rd.h13
-rw-r--r--src/spdk/ocf/src/engine/engine_wa.c89
-rw-r--r--src/spdk/ocf/src/engine/engine_wa.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_wb.c214
-rw-r--r--src/spdk/ocf/src/engine/engine_wb.h12
-rw-r--r--src/spdk/ocf/src/engine/engine_wi.c181
-rw-r--r--src/spdk/ocf/src/engine/engine_wi.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_wo.c249
-rw-r--r--src/spdk/ocf/src/engine/engine_wo.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_wt.c208
-rw-r--r--src/spdk/ocf/src/engine/engine_wt.h11
-rw-r--r--src/spdk/ocf/src/engine/engine_zero.c166
-rw-r--r--src/spdk/ocf/src/engine/engine_zero.h11
-rw-r--r--src/spdk/ocf/src/eviction/eviction.c125
-rw-r--r--src/spdk/ocf/src/eviction/eviction.h69
-rw-r--r--src/spdk/ocf/src/eviction/lru.c522
-rw-r--r--src/spdk/ocf/src/eviction/lru.h21
-rw-r--r--src/spdk/ocf/src/eviction/lru_structs.h24
-rw-r--r--src/spdk/ocf/src/eviction/ops.h106
-rw-r--r--src/spdk/ocf/src/metadata/metadata.c395
-rw-r--r--src/spdk/ocf/src/metadata/metadata.h224
-rw-r--r--src/spdk/ocf/src/metadata/metadata_bit.h240
-rw-r--r--src/spdk/ocf/src/metadata/metadata_cleaning_policy.h29
-rw-r--r--src/spdk/ocf/src/metadata/metadata_collision.c88
-rw-r--r--src/spdk/ocf/src/metadata/metadata_collision.h120
-rw-r--r--src/spdk/ocf/src/metadata/metadata_common.h12
-rw-r--r--src/spdk/ocf/src/metadata/metadata_core.h45
-rw-r--r--src/spdk/ocf/src/metadata/metadata_eviction_policy.h26
-rw-r--r--src/spdk/ocf/src/metadata/metadata_hash.c2934
-rw-r--r--src/spdk/ocf/src/metadata/metadata_hash.h51
-rw-r--r--src/spdk/ocf/src/metadata/metadata_io.c463
-rw-r--r--src/spdk/ocf/src/metadata/metadata_io.h157
-rw-r--r--src/spdk/ocf/src/metadata/metadata_misc.c131
-rw-r--r--src/spdk/ocf/src/metadata/metadata_misc.h38
-rw-r--r--src/spdk/ocf/src/metadata/metadata_partition.c142
-rw-r--r--src/spdk/ocf/src/metadata/metadata_partition.h84
-rw-r--r--src/spdk/ocf/src/metadata/metadata_partition_structs.h44
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw.c661
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw.h345
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_atomic.c259
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_atomic.h15
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_dynamic.c577
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_dynamic.h95
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_volatile.c64
-rw-r--r--src/spdk/ocf/src/metadata/metadata_raw_volatile.h45
-rw-r--r--src/spdk/ocf/src/metadata/metadata_status.h434
-rw-r--r--src/spdk/ocf/src/metadata/metadata_structs.h469
-rw-r--r--src/spdk/ocf/src/metadata/metadata_superblock.h86
-rw-r--r--src/spdk/ocf/src/metadata/metadata_updater.c163
-rw-r--r--src/spdk/ocf/src/metadata/metadata_updater_priv.h32
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_cache.c2557
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_common.c464
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_common.h33
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_core.c969
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_core_pool.c115
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_core_pool_priv.h15
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_core_priv.h15
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_flush.c999
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_io_class.c299
-rw-r--r--src/spdk/ocf/src/mngt/ocf_mngt_misc.c29
-rw-r--r--src/spdk/ocf/src/ocf_cache.c233
-rw-r--r--src/spdk/ocf/src/ocf_cache_priv.h220
-rw-r--r--src/spdk/ocf/src/ocf_core.c600
-rw-r--r--src/spdk/ocf/src/ocf_core_priv.h104
-rw-r--r--src/spdk/ocf/src/ocf_ctx.c241
-rw-r--r--src/spdk/ocf/src/ocf_ctx_priv.h181
-rw-r--r--src/spdk/ocf/src/ocf_def_priv.h52
-rw-r--r--src/spdk/ocf/src/ocf_freelist.c427
-rw-r--r--src/spdk/ocf/src/ocf_freelist.h34
-rw-r--r--src/spdk/ocf/src/ocf_io.c166
-rw-r--r--src/spdk/ocf/src/ocf_io_class.c72
-rw-r--r--src/spdk/ocf/src/ocf_io_priv.h52
-rw-r--r--src/spdk/ocf/src/ocf_logger.c86
-rw-r--r--src/spdk/ocf/src/ocf_logger_priv.h31
-rw-r--r--src/spdk/ocf/src/ocf_metadata.c104
-rw-r--r--src/spdk/ocf/src/ocf_priv.h18
-rw-r--r--src/spdk/ocf/src/ocf_queue.c141
-rw-r--r--src/spdk/ocf/src/ocf_queue_priv.h42
-rw-r--r--src/spdk/ocf/src/ocf_request.c333
-rw-r--r--src/spdk/ocf/src/ocf_request.h364
-rw-r--r--src/spdk/ocf/src/ocf_stats.c436
-rw-r--r--src/spdk/ocf/src/ocf_stats_builder.c451
-rw-r--r--src/spdk/ocf/src/ocf_stats_priv.h241
-rw-r--r--src/spdk/ocf/src/ocf_trace.c137
-rw-r--r--src/spdk/ocf/src/ocf_trace_priv.h134
-rw-r--r--src/spdk/ocf/src/ocf_volume.c351
-rw-r--r--src/spdk/ocf/src/ocf_volume_priv.h67
-rw-r--r--src/spdk/ocf/src/promotion/nhit/nhit.c249
-rw-r--r--src/spdk/ocf/src/promotion/nhit/nhit.h32
-rw-r--r--src/spdk/ocf/src/promotion/nhit/nhit_hash.c417
-rw-r--r--src/spdk/ocf/src/promotion/nhit/nhit_hash.h26
-rw-r--r--src/spdk/ocf/src/promotion/nhit/nhit_structs.h16
-rw-r--r--src/spdk/ocf/src/promotion/ops.h57
-rw-r--r--src/spdk/ocf/src/promotion/promotion.c174
-rw-r--r--src/spdk/ocf/src/promotion/promotion.h111
-rw-r--r--src/spdk/ocf/src/utils/utils_async_lock.c240
-rw-r--r--src/spdk/ocf/src/utils/utils_async_lock.h50
-rw-r--r--src/spdk/ocf/src/utils/utils_cache_line.c179
-rw-r--r--src/spdk/ocf/src/utils/utils_cache_line.h390
-rw-r--r--src/spdk/ocf/src/utils/utils_cleaner.c1058
-rw-r--r--src/spdk/ocf/src/utils/utils_cleaner.h160
-rw-r--r--src/spdk/ocf/src/utils/utils_io.c354
-rw-r--r--src/spdk/ocf/src/utils/utils_io.h84
-rw-r--r--src/spdk/ocf/src/utils/utils_io_allocator.h62
-rw-r--r--src/spdk/ocf/src/utils/utils_list.c64
-rw-r--r--src/spdk/ocf/src/utils/utils_list.h207
-rw-r--r--src/spdk/ocf/src/utils/utils_part.c192
-rw-r--r--src/spdk/ocf/src/utils/utils_part.h117
-rw-r--r--src/spdk/ocf/src/utils/utils_pipeline.c131
-rw-r--r--src/spdk/ocf/src/utils/utils_pipeline.h153
-rw-r--r--src/spdk/ocf/src/utils/utils_realloc.c117
-rw-r--r--src/spdk/ocf/src/utils/utils_realloc.h69
-rw-r--r--src/spdk/ocf/src/utils/utils_refcnt.c70
-rw-r--r--src/spdk/ocf/src/utils/utils_refcnt.h49
-rw-r--r--src/spdk/ocf/src/utils/utils_stats.h50
-rw-r--r--src/spdk/ocf/tests/build/Makefile40
-rw-r--r--src/spdk/ocf/tests/functional/.gitignore9
-rwxr-xr-xsrc/spdk/ocf/tests/functional/Makefile52
-rw-r--r--src/spdk/ocf/tests/functional/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/config/random.cfg2
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/ocf.py30
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/cache.py593
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/cleaner.py43
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/core.py227
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/ctx.py122
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/data.py225
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/io.py118
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/logger.py182
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py102
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/queue.py105
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/shared.py160
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py39
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/core.py21
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py88
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/volume.py361
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/utils.py173
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c36
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c42
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c12
-rw-r--r--src/spdk/ocf/tests/functional/pytest.ini2
-rw-r--r--src/spdk/ocf/tests/functional/tests/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/basic/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py86
-rw-r--r--src/spdk/ocf/tests/functional/tests/conftest.py39
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/test_pp.py305
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/test_wo.py213
-rw-r--r--src/spdk/ocf/tests/functional/tests/eviction/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py80
-rw-r--r--src/spdk/ocf/tests/functional/tests/flush/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_add_remove.py278
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_change_params.py135
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_start_stop.py545
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/conftest.py98
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py315
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py155
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_negative_io.py205
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py215
-rw-r--r--src/spdk/ocf/tests/functional/tests/utils/random.py95
-rwxr-xr-xsrc/spdk/ocf/tests/functional/utils/configure_random.py13
-rw-r--r--src/spdk/ocf/tests/unit/framework/.gitignore1
-rw-r--r--src/spdk/ocf/tests/unit/framework/README11
-rwxr-xr-xsrc/spdk/ocf/tests/unit/framework/add_new_test_file.py179
-rwxr-xr-xsrc/spdk/ocf/tests/unit/framework/prepare_sources_for_testing.py730
-rwxr-xr-xsrc/spdk/ocf/tests/unit/framework/run_unit_tests.py127
-rw-r--r--src/spdk/ocf/tests/unit/framework/tests_config.py47
-rw-r--r--src/spdk/ocf/tests/unit/tests/.gitignore6
-rwxr-xr-xsrc/spdk/ocf/tests/unit/tests/add_new_test_file.py34
-rw-r--r--src/spdk/ocf/tests/unit/tests/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c125
-rw-r--r--src/spdk/ocf/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c160
-rw-r--r--src/spdk/ocf/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c135
-rw-r--r--src/spdk/ocf/tests/unit/tests/header.c5
-rw-r--r--src/spdk/ocf/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c69
-rw-r--r--src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/_cache_mngt_set_cache_mode_test.c247
-rw-r--r--src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c189
-rw-r--r--src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_io_class.c/ocf_mngt_io_class.c249
-rw-r--r--src/spdk/ocf/tests/unit/tests/ocf_env/CMakeLists.txt3
-rw-r--r--src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_get_put.c382
-rw-r--r--src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_init.c68
-rw-r--r--src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_locks.c213
-rw-r--r--src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_populate.c138
-rw-r--r--src/spdk/ocf/tests/unit/tests/print_desc.h6
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_dec.c63
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_freeze.c117
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_inc.c52
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_init.c51
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_register_zero_cb.c102
-rw-r--r--src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_unfreeze.c101
275 files changed, 49253 insertions, 0 deletions
diff --git a/src/spdk/ocf/.pep8speaks.yml b/src/spdk/ocf/.pep8speaks.yml
new file mode 100644
index 000000000..93ede8a78
--- /dev/null
+++ b/src/spdk/ocf/.pep8speaks.yml
@@ -0,0 +1,13 @@
+# File : .pep8speaks.yml
+
+scanner:
+ diff_only: True
+ linter: pycodestyle
+
+pycodestyle:
+ max-line-length: 100
+ ignore:
+ - E402 # module level import not at top of file
+ - W503 # line break after binary operator
+
+no_blank_comment: True
diff --git a/src/spdk/ocf/LICENSE b/src/spdk/ocf/LICENSE
new file mode 100644
index 000000000..8a69ec3d6
--- /dev/null
+++ b/src/spdk/ocf/LICENSE
@@ -0,0 +1,13 @@
+The Clear BSD License
+
+Copyright(c) 2019 Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/spdk/ocf/Makefile b/src/spdk/ocf/Makefile
new file mode 100644
index 000000000..5a65d20de
--- /dev/null
+++ b/src/spdk/ocf/Makefile
@@ -0,0 +1,137 @@
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+PWD:=$(shell pwd)
+
+ifneq ($(strip $(O)),)
+OUTDIR:=$(shell cd $(O) && pwd)
+endif
+
+validate:
+ifeq ($(strip $(OUTDIR)),)
+ $(error No output specified for installing sources or headers)
+endif
+
+ifeq ($(strip $(CMD)),)
+INSTALL=ln -fs
+else ifeq ($(strip $(CMD)),cp)
+INSTALL=cp
+else ifeq ($(strip $(CMD)),install)
+INSTALL=install
+else
+$(error Not allowed program command)
+endif
+
+ifneq ($(strip $(OCF_ENV)),)
+ifeq ($(strip $(OCF_ENV)),posix)
+OCF_ENV_DIR=$(PWD)/env/posix
+else
+$(error Invalid environment selected)
+endif
+endif
+
+#
+# Installing headers
+#
+INC_IN=$(shell find $(PWD)/inc -name '*.[h]' -type f)
+INC_OUT=$(patsubst $(PWD)/inc/%,$(OUTDIR)/include/ocf/%,$(INC_IN))
+INC_RM=$(shell find $(OUTDIR)/include/ocf -name '*.[h]' -xtype l 2>/dev/null)
+
+inc: $(INC_OUT) $(INC_RM)
+ @$(MAKE) distcleandir
+
+$(INC_OUT):
+ifeq ($(strip $(OUTDIR)),)
+ $(error No output specified for installing headers)
+endif
+ @echo " INSTALL $@"
+ @mkdir -p $(dir $@)
+ @$(INSTALL) $(subst $(OUTDIR)/include/ocf,$(PWD)/inc,$@) $@
+
+$(INC_RM): validate
+ $(if $(shell readlink $@ | grep $(PWD)/inc), \
+ @echo " RM $@"; rm $@,)
+
+#
+# Installing sources
+#
+SRC_IN=$(shell find $(PWD)/src -name '*.[c|h]' -type f)
+SRC_OUT=$(patsubst $(PWD)/src/%,$(OUTDIR)/src/ocf/%,$(SRC_IN))
+SRC_RM=$(shell find $(OUTDIR)/src/ocf -name '*.[c|h]' -xtype l 2>/dev/null)
+
+src: $(SRC_OUT) $(SRC_RM)
+ @$(MAKE) distcleandir
+
+$(SRC_OUT):
+ifeq ($(strip $(OUTDIR)),)
+ $(error No output specified for installing sources)
+endif
+ @echo " INSTALL $@"
+ @mkdir -p $(dir $@)
+ @$(INSTALL) $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$@) $@
+
+$(SRC_RM): validate
+ $(if $(shell readlink $@ | grep $(PWD)/src), \
+ @echo " RM $@"; rm $@,)
+
+#
+# Installing environment
+#
+OCF_ENV_IN=$(shell find $(OCF_ENV_DIR) -name '*.[c|h]' -type f)
+OCF_ENV_OUT=$(patsubst $(OCF_ENV_DIR)%,$(OUTDIR)/src/ocf/env/%,$(OCF_ENV_IN))
+OCF_ENV_RM=$(shell find $(OUTDIR)/src/ocf/env -name '*.[c|h]' -xtype l 2>/dev/null)
+
+env: | env_check env_dep
+ @$(MAKE) distcleandir
+
+env_check:
+ifeq ($(OCF_ENV_DIR),)
+ $(error No environment selected)
+endif
+
+env_dep: $(OCF_ENV_OUT) $(OCF_ENV_RM)
+
+$(OCF_ENV_OUT):
+ifeq ($(strip $(OUTDIR)),)
+ $(error No output specified for installing sources)
+endif
+ @echo " INSTALL $@"
+ @mkdir -p $(dir $@)
+ @$(INSTALL) $(subst $(OUTDIR)/src/ocf/env,$(OCF_ENV_DIR),$@) $@
+
+$(OCF_ENV_RM): validate
+ $(if $(shell readlink $@ | grep $(OCF_ENV_DIR)), \
+ @echo " RM $@"; rm $@,)
+
+#
+# Distclean
+#
+dist_dir=$(foreach dir,$(shell find $(OUTDIR) -type d -empty), \
+$(if $(wildcard $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$(dir))),$(dir),))
+
+distclean: validate
+ @rm -f $(SRC_OUT) $(INC_OUT)
+ @$(MAKE) distcleandir
+
+distcleandir:
+ $(if $(strip $(dist_dir)), rm -r $(dist_dir),)
+
+#
+# Printing help
+#
+help:
+ $(info Available targets:)
+ $(info inc O=<OUTDIR> [CMD=cp|install] Install include files into specified directory)
+ $(info src O=<OUTDIR> [CMD=cp|install] Install source files into specified directory)
+ $(info distclean O=<OUTDIR> Uninstall source and headers from specified directory)
+
+doc: validate
+ @cd doc && rm -rf html
+ @cd doc && doxygen doxygen.cfg
+ @mkdir -p $(OUTDIR)/doc
+ @cd doc && mv html $(OUTDIR)/doc/ocf
+
+.PHONY: inc src env env_check env_dep validate help distclean distcleandir doc \
+ $(INC_RM) $(SRC_RM) $(OCF_ENV_RM) $(DIST_DIR)
diff --git a/src/spdk/ocf/README.md b/src/spdk/ocf/README.md
new file mode 100644
index 000000000..cad0d7bea
--- /dev/null
+++ b/src/spdk/ocf/README.md
@@ -0,0 +1,121 @@
+# Open CAS Framework
+
+[![Build Status](https://open-cas-logs.s3.us-east-2.amazonaws.com/master-status/build/curr-badge.svg)](https://open-cas-logs.s3.us-east-2.amazonaws.com/master-status/build/build.html)
+[![Tests Status](https://open-cas-logs.s3.us-east-2.amazonaws.com/master-status/tests/curr-badge.svg)](https://open-cas-logs.s3.us-east-2.amazonaws.com/master-status/tests/index.html)
+[![Coverity status](https://scan.coverity.com/projects/19083/badge.svg)](https://scan.coverity.com/projects/open-cas-ocf)
+[![codecov](https://codecov.io/gh/Open-CAS/ocf/branch/master/graph/badge.svg)](https://codecov.io/gh/Open-CAS/ocf)
+[![License](https://open-cas-logs.s3.us-east-2.amazonaws.com/master-status/license-badge.svg)](LICENSE)
+
+Open CAS Framework (OCF) is high performance block storage caching meta-library
+written in C. It's entirely platform and system independent, accessing system API
+through user provided environment wrappers layer. OCF tightly integrates with the
+rest of software stack, providing flawless, high performance, low latency caching
+utility.
+
+# In this readme:
+
+* [Documentation](#documentation)
+* [Source Code](#source-code)
+* [Deployment](#deployment)
+* [Examples](#examples)
+* [Unit Tests](#unit-tests)
+* [Build Test](#build-test)
+* [Functional Tests](#functional-tests)
+* [Contributing](#contributing)
+* [Security](#security)
+
+## Documentation
+
+OCF documentation is available on [GitHub Pages](https://open-cas.github.io/getting_started_ocf.html).
+Doxygen API documentation is available [here](http://open-cas.github.io/doxygen/ocf).
+
+## Source Code
+
+Source code is available in the official OCF GitHub repository:
+
+~~~{.sh}
+git clone https://github.com/Open-CAS/ocf.git
+cd ocf
+~~~
+
+## Deployment
+
+OCF doesn't compile as separate library. It's designed to be included into another
+software stack. For this purpose OCF provides Makefile with two useful targets for
+deploying its source into target directories. Assuming OCFDIR is OCF directory, and
+SRCDIR and INCDIR are respectively your source and include directories, use following
+commands to deploy OCF into your project:
+
+~~~{.sh}
+make -C $OCFDIF src O=$SRCDIR
+make -C $OCFDIF inc O=$INCDIR
+~~~
+
+By default this will not copy OCF source files but create symbolic links to them,
+to avoid source duplication and allow for easy OCF code modification. If you prefer
+to copy OCF source files (e.g. you don't want to distribute whole OCF repository
+as your submodule) you can use following commands:
+
+~~~{.sh}
+make -C $OCFDIF src O=$SRCDIR CMD=cp
+make -C $OCFDIF inc O=$INCDIR CMD=cp
+~~~
+
+## Examples
+
+OCF is shipped with examples, which are complete, compillable and working
+programs, containing lot of comments that explain basics of caching. They
+are great starting point for everyone who wants to start working with OCF.
+
+Examples can be found in directory `example/`.
+
+Each example contains Makefile which can be used to compile it.
+
+## Unit Tests
+
+OCF is shipped with dedicated unit test framework based on Cmocka.
+To run unit tests you need to install following packages:
+- Cmake (>= 3.8.1)
+- Cmocka (>= 1.1.1)
+- ctags (>= 5.8)
+
+To run unit tests use following command:
+
+~~~{.sh}
+./tests/unit/framework/run_unit_tests.py
+~~~
+
+## Build Test
+
+OCF repository contains basic build test. It uses default POSIX environment.
+To run this test, use following commands:
+
+~~~{.sh}
+cd tests/build/
+make
+~~~
+
+## Functional Tests
+
+OCF repository contains dedicated functional test framework written in python and executed via pytest. With the use of ctypes it is possible to call, wrap ocf functions and use C compatible data types.
+To run functional tests you need to install the following:
+- python3 (>=3.6.7)
+- pytest (Install with `pip3 install pytest`)
+
+To run all functional tests (in compliance with the configuration file) compile using makefile located in `./tests/functional/Makefile` and then use the following command:
+
+~~~{.sh}
+pytest
+~~~
+
+## Contributing
+
+Feel like making OCF better? Don't hesitate to submit a pull request!
+You can find more information about our contribution process
+[here](https://open-cas.github.io/contributing.html).
+In case of any questions feel free to contact [maintainer](mailto:robert.baldyga@intel.com).
+
+## Security
+
+To report a potential security vulnerability please follow the instructions
+[here](https://open-cas.github.io/contributing.html#reporting-a-potential-security-vulnerability)
diff --git a/src/spdk/ocf/codecov.yml b/src/spdk/ocf/codecov.yml
new file mode 100644
index 000000000..dc421a311
--- /dev/null
+++ b/src/spdk/ocf/codecov.yml
@@ -0,0 +1,25 @@
+codecov:
+ require_ci_to_pass: yes
+
+coverage:
+ precision: 2
+ round: down
+ range: "60...100"
+
+ status:
+ project: yes
+ patch: yes
+ changes: no
+
+parsers:
+ gcov:
+ branch_detection:
+ conditional: yes
+ loop: yes
+ method: no
+ macro: no
+
+comment:
+ layout: "diff,flags,tree"
+ behavior: default
+ require_changes: no
diff --git a/src/spdk/ocf/doc/.gitignore b/src/spdk/ocf/doc/.gitignore
new file mode 100644
index 000000000..5ccff1a6b
--- /dev/null
+++ b/src/spdk/ocf/doc/.gitignore
@@ -0,0 +1 @@
+html/
diff --git a/src/spdk/ocf/doc/HOME.md b/src/spdk/ocf/doc/HOME.md
new file mode 100644
index 000000000..caad48211
--- /dev/null
+++ b/src/spdk/ocf/doc/HOME.md
@@ -0,0 +1,292 @@
+# Open CAS Framework
+
+# Content:
+- [Architecture overview](#architecture-overview)
+- [Management interface](#library-management)
+- [IO path](#reading-and-writing-data)
+
+# Architecture overview
+
+Intel(R) Cache Acceleration Software (CAS) consists of:
+- Platform independent library called Open CAS Framework (OCF)
+- Platform dependent adaptation layers enabling OCF to work in different
+environments such as Linux kernel
+
+An example usage for OCF is Linux kernel (see picture below).
+In this case OCF operates as block level cache for block devices.
+For this usage model OCF comes with following adaptation layers:
+- <b>Library client (top adapter)</b> - its main responsibility is creating
+cache volume representing primary storage device. Application can
+read/write from/to the cache volume block device as to regular primary
+storage device.
+- <b>Block device volume (bottom adapter)</b> - is responsible for issuing
+IO operations to underlying block device.
+
+A system administrator can manage cache instances via Intel CAS CLI management
+utility called "casadm".
+
+![OCF Linux deployment view](img/deployment-1.png)
+
+Another example of OCF usage is user space block level cache for QEMU
+(see picture below). In this example following adaptation layers may exist:
+- <b>CAS virtIO-blk driver for QEMU (top adapter)</b> - it exposes
+primary storage device (another virtIO driver) to guest OS via OCF library
+- <b>virtIO-blk volume (bottom adapter)</b> - enables OCF to access
+data on primary storage device or cache device via original virtIO driver
+
+Please note that actual adapters depend on the environment where OCF is
+meant to be run. There can be different bottom adapters delivered for cache device
+and primary storage device. For example bottom adapter for caching device may
+be implemented using kernel bypass techniques, providing low-latency access to
+cache media.
+
+![OCF deployment in QEMU example](img/deployment-2.png)
+
+# Management interface
+Management interface delivered with Intel OCF enables system administrator to:
+ - Configure OCF caching library to target environment, which includes installation
+of required platform dependent adapters.
+ - Starting/stopping and managing existing cache instances.
+ - Performing observability functions (e.g. retrieving performance counters)
+
+For more details please see below examples:
+
+## Library initialization example
+
+OCF enables possibility use it simultaneously from two independent libraries linked
+into the same executable by means of concept of contexts. Each context has its own
+set of operations which allow to handle specific data types used by volumes
+within this context.
+
+```c
+#include "ocf.h"
+
+/* Handle to library context */
+ocf_ctx_t ctx;
+
+/* Your context interface */
+const struct ocf_ctx_ops ctx_ops = {
+/* Fill your interface functions */
+};
+
+/* Your unique volume type IDs */
+enum my_volume_type {
+ my_volume_type_1,
+ my_volume_type_2
+};
+
+/* Your volumes interface declaration */
+const struct ocf_volume_ops my_volume_ops1 = {
+ .name = "My volume 1",
+ /* Fill your volume interface functions */
+};
+
+const struct ocf_volume_ops my_volume_ops2 = {
+ .name = "My volume 2"
+ /* Fill your volume interface functions */
+};
+
+int my_cache_init(void)
+{
+ int result;
+
+ result = ocf_ctx_create(&ctx, &ctx_ops)
+ if (result) {
+ /* Cannot initialze context of OCF library */
+ return result;
+ }
+ /* Initialization successful */
+
+ /* Now we can register volumes */
+ result |= ocf_ctx_register_volume_ops(ctx, &my_volume_ops1,
+ my_volume_type_1);
+ if (result) {
+ /* Cannot register volume interface */
+ goto err;
+ }
+
+ result |= ocf_ctx_register_volume_ops(ctx, &my_volume_ops2,
+ my_volume_type_2);
+ if (result) {
+ /* Cannot register volume interface */
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /* In case of failure we destroy context and propagate error code */
+ ocf_ctx_put(ctx);
+ return result;
+}
+
+```
+
+## Cache management
+OCF library API provides management functions (@ref ocf_mngt.h). This
+interface enables user to manage cache instances. Examples:
+- Start cache
+```c
+int result;
+ocf_cache_t cache; /* Handle to your cache */
+struct ocf_mngt_cache_config cfg; /* Your cache configuration */
+
+/* Prepare your cache configuration */
+
+/* Configure cache mode */
+cfg.cache_mode = ocf_cache_mode_wt;
+
+/* Now tell how your cache will be initialzed. Selech warm or cold cache */
+cfg.init_mode = ocf_init_mode_init;
+
+cfg.uuid.data = "/path/to/your/cache/or/unique/id";
+
+/* Specify cache volume type */
+cfg.volume_type = my_volume_type_1;
+
+/* Other cache configuration */
+...
+
+/* Start cache. */
+result = ocf_mngt_cache_start(cas, &cache, cfg);
+if (!result) {
+ /* Your cache was created successfully */
+}
+```
+
+- Add core (primary storage device) to cache
+```c
+int result;
+ocf_core_t core; /* Handle to your core */
+struct ocf_mngt_core_config cfg; /* Your core configuration */
+
+/* Prepare core configuration */
+
+/* Select core volume type */
+cfg.volume_type = my_volume_type_2;
+/* Set UUID or path of your core */
+cfg.uuid.data = "/path/to/your/core/or/unique/id";
+
+result = ocf_mngt_cache_add_core(cache, &core, &cfg);
+if (!result) {
+ /* Your core was added successfully */
+}
+
+```
+
+## Management interface considerations
+Each device (cache or core) is assigned with ID, either automatically by OCF or
+explicitly specified by user. It is possible to retrieve handle to cache
+instance via @ref ocf_cache_get_id. To get handle to core instance please
+use @ref ocf_core_get_id.
+
+Cache management operations are thread safe - it is possible to perform
+cache management from many threads at a time. There is a possiblity to "batch"
+several cache management operations and execute them under cache management
+lock. To do this user needs to first obtain cache management lock, perform management
+operations and finally release the lock. For reference see example below.
+
+```c
+int my_complex_work(ocf_cache_id_t cache_id,
+ ocf_core_id_t core_id)
+{
+ int result;
+ ocf_cache_t cache; /* Handle to your cache */
+ ocf_core_t core; /* Handle to your core */
+
+ /* Get cache handle */
+ result = ocf_mngt_cache_get(cas, cache_id, &cache);
+ if (result)
+ return result;
+
+ /* Lock cache */
+ result = ocf_mngt_cache_lock(cache);
+ if (result) {
+ ocf_mngt_cache_put(cache);
+ return result;
+ }
+
+ /* Get core handle */
+ result = ocf_core_get(cache, core_id, &core);
+ if (result) {
+ result = -1;
+ goto END;
+ }
+
+ /* Cache is locked, you can perform your activities */
+
+ /* 1. Flush your core */
+ result = ocf_mngt_core_flush(cache, core_id, true);
+ if (result) {
+ goto END;
+ }
+
+ /* 2. Your others operations including internal actions */
+
+ /* 3. Removing core form cache */
+ result = ocf_mngt_cache_remove_core(cache, core_id, true);
+
+END:
+ ocf_mngt_cache_unlock(cache); /* Remember to unlock cache */
+ ocf_mngt_cache_put(cache); /* Release cache referance */
+
+ return result;
+}
+```
+
+# IO path
+Please refer to below sequence diagram for detailed IO flow. Typical IO
+path includes:
+ - <b>IO allocation</b> - creating new IO instance that will be submitted to OCF
+for processing
+ - <b>IO configuration</b> - specifying address and length, IO class, flags and
+completion function
+ - <b>IO submission</b> - actual IO submission to OCF. OCF will perform cache
+lookup and based on its results will return data from cache or primary
+storage device
+ - <b>IO completion</b> - is signalled by calling completion function specified
+in IO configuration phase
+
+![An example of IO flow](img/io-path.png)
+
+## IO submission example
+```c
+#include "ocf.h"
+
+void read_end(struct ocf_io *io, int error)
+{
+ /* Your IO has been finished. Check the result and inform upper
+ * layers.
+ */
+
+ /* Release IO */
+ ocf_io_put(io);
+}
+
+int read(ocf_core_t core, ocf_queue_t queue, void *data, addr, uint32_t length)
+{
+ /* Allocate IO */
+ struct ocf_io *io;
+
+ io = ocf_core_new_io(core, queue, addr, length, OCF_READ, 0, 0);
+ if (!io) {
+ /* Cannot allocate IO */
+ return -ENOMEM;
+ }
+
+ /* Set completion context and function */
+ ocf_io_set_cmpl(io, NULL, NULL, read_end);
+
+ /* Set data */
+ if (ocf_io_set_data(io, data, 0)) {
+ ocf_io_put(io);
+ return -EINVAL;
+ }
+
+ /* Send IO requests to the cache */
+ ocf_core_submit_io(io);
+
+ /* Just it */
+ return 0;
+}
+```
diff --git a/src/spdk/ocf/doc/doxygen.cfg b/src/spdk/ocf/doc/doxygen.cfg
new file mode 100644
index 000000000..2a8f10652
--- /dev/null
+++ b/src/spdk/ocf/doc/doxygen.cfg
@@ -0,0 +1,329 @@
+# Doxyfile 1.8.6
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING = UTF-8
+PROJECT_NAME = "Open CAS Framework"
+PROJECT_NUMBER =
+PROJECT_BRIEF = "Open source framework of Cache Acceleration Software"
+PROJECT_LOGO =
+OUTPUT_DIRECTORY = .
+CREATE_SUBDIRS = NO
+ALLOW_UNICODE_NAMES = NO
+OUTPUT_LANGUAGE = English
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = NO
+STRIP_FROM_PATH =
+STRIP_FROM_INC_PATH =
+SHORT_NAMES = NO
+JAVADOC_AUTOBRIEF = NO
+QT_AUTOBRIEF = NO
+MULTILINE_CPP_IS_BRIEF = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 8
+ALIASES =
+TCL_SUBST =
+OPTIMIZE_OUTPUT_FOR_C = YES
+OPTIMIZE_OUTPUT_JAVA = NO
+OPTIMIZE_FOR_FORTRAN = NO
+OPTIMIZE_OUTPUT_VHDL = NO
+EXTENSION_MAPPING =
+MARKDOWN_SUPPORT = YES
+AUTOLINK_SUPPORT = YES
+BUILTIN_STL_SUPPORT = NO
+CPP_CLI_SUPPORT = NO
+SIP_SUPPORT = NO
+IDL_PROPERTY_SUPPORT = YES
+DISTRIBUTE_GROUP_DOC = NO
+GROUP_NESTED_COMPOUNDS = NO
+SUBGROUPING = YES
+INLINE_GROUPED_CLASSES = NO
+INLINE_SIMPLE_STRUCTS = NO
+TYPEDEF_HIDES_STRUCT = NO
+LOOKUP_CACHE_SIZE = 0
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_PACKAGE = NO
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = YES
+EXTRACT_LOCAL_METHODS = NO
+EXTRACT_ANON_NSPACES = NO
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = NO
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = NO
+HIDE_SCOPE_NAMES = YES
+HIDE_COMPOUND_REFERENCE= NO
+SHOW_INCLUDE_FILES = YES
+SHOW_GROUPED_MEMB_INC = NO
+FORCE_LOCAL_INCLUDES = NO
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+SORT_BRIEF_DOCS = NO
+SORT_MEMBERS_CTORS_1ST = NO
+SORT_GROUP_NAMES = NO
+SORT_BY_SCOPE_NAME = NO
+STRICT_PROTO_MATCHING = NO
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_USED_FILES = YES
+SHOW_FILES = YES
+SHOW_NAMESPACES = YES
+FILE_VERSION_FILTER =
+LAYOUT_FILE =
+CITE_BIB_FILES =
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_AS_ERROR = NO
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT = ../inc HOME.md
+INPUT_ENCODING = UTF-8
+FILE_PATTERNS = *.c \
+ *.h \
+ *.md
+RECURSIVE = YES
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXCLUDE_SYMBOLS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS = *
+EXAMPLE_RECURSIVE = NO
+IMAGE_PATH = ./img/
+INPUT_FILTER =
+FILTER_PATTERNS =
+FILTER_SOURCE_FILES = NO
+FILTER_SOURCE_PATTERNS =
+USE_MDFILE_AS_MAINPAGE = HOME.md
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+STRIP_CODE_COMMENTS = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION = NO
+REFERENCES_LINK_SOURCE = YES
+SOURCE_TOOLTIPS = YES
+USE_HTAGS = NO
+VERBATIM_HEADERS = YES
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 5
+IGNORE_PREFIX =
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML = YES
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+#HTML_HEADER = header.html
+#HTML_FOOTER = footer.html
+HTML_STYLESHEET =
+HTML_EXTRA_STYLESHEET =
+HTML_EXTRA_FILES =
+HTML_COLORSTYLE_HUE = 220
+HTML_COLORSTYLE_SAT = 100
+HTML_COLORSTYLE_GAMMA = 80
+HTML_TIMESTAMP = NO
+HTML_DYNAMIC_SECTIONS = NO
+HTML_INDEX_NUM_ENTRIES = 100
+GENERATE_DOCSET = NO
+DOCSET_FEEDNAME = "Doxygen generated docs"
+DOCSET_BUNDLE_ID = org.doxygen.Project
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+DOCSET_PUBLISHER_NAME = Publisher
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+CHM_INDEX_ENCODING =
+BINARY_TOC = NO
+TOC_EXPAND = NO
+GENERATE_QHP = NO
+QCH_FILE =
+QHP_NAMESPACE = org.doxygen.Project
+QHP_VIRTUAL_FOLDER = doc
+QHP_CUST_FILTER_NAME =
+QHP_CUST_FILTER_ATTRS =
+QHP_SECT_FILTER_ATTRS =
+QHG_LOCATION =
+GENERATE_ECLIPSEHELP = NO
+ECLIPSE_DOC_ID = org.doxygen.Project
+DISABLE_INDEX = NO
+GENERATE_TREEVIEW = NO
+ENUM_VALUES_PER_LINE = 4
+TREEVIEW_WIDTH = 250
+EXT_LINKS_IN_WINDOW = NO
+FORMULA_FONTSIZE = 10
+FORMULA_TRANSPARENT = YES
+USE_MATHJAX = NO
+MATHJAX_FORMAT = HTML-CSS
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+MATHJAX_EXTENSIONS =
+MATHJAX_CODEFILE =
+SEARCHENGINE = YES
+SERVER_BASED_SEARCH = NO
+EXTERNAL_SEARCH = NO
+SEARCHENGINE_URL =
+SEARCHDATA_FILE = searchdata.xml
+EXTERNAL_SEARCH_ID =
+EXTRA_SEARCH_MAPPINGS =
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX = NO
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME = latex
+MAKEINDEX_CMD_NAME = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4
+EXTRA_PACKAGES =
+LATEX_HEADER =
+LATEX_FOOTER =
+LATEX_EXTRA_STYLESHEET =
+LATEX_EXTRA_FILES =
+PDF_HYPERLINKS = YES
+USE_PDFLATEX = YES
+LATEX_BATCHMODE = NO
+LATEX_HIDE_INDICES = NO
+LATEX_SOURCE_CODE = NO
+LATEX_BIB_STYLE = plain
+LATEX_TIMESTAMP = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+RTF_SOURCE_CODE = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN = NO
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_SUBDIR =
+MAN_LINKS = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML = NO
+XML_OUTPUT = xml
+XML_PROGRAMLISTING = YES
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+GENERATE_DOCBOOK = NO
+DOCBOOK_OUTPUT = docbook
+DOCBOOK_PROGRAMLISTING = NO
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD = NO
+PERLMOD_LATEX = NO
+PERLMOD_PRETTY = YES
+PERLMOD_MAKEVAR_PREFIX =
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = NO
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = YES
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+EXTERNAL_PAGES = YES
+PERL_PATH = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS = YES
+MSCGEN_PATH =
+DIA_PATH =
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+DOT_NUM_THREADS = 0
+DOT_FONTNAME = Helvetica
+DOT_FONTSIZE = 10
+DOT_FONTPATH =
+CLASS_GRAPH = YES
+COLLABORATION_GRAPH = YES
+GROUP_GRAPHS = YES
+UML_LOOK = NO
+UML_LIMIT_NUM_FIELDS = 10
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = YES
+INCLUDED_BY_GRAPH = YES
+CALL_GRAPH = NO
+CALLER_GRAPH = NO
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+DOT_IMAGE_FORMAT = png
+INTERACTIVE_SVG = NO
+DOT_PATH =
+DOTFILE_DIRS =
+MSCFILE_DIRS =
+DIAFILE_DIRS =
+PLANTUML_JAR_PATH =
+PLANTUML_INCLUDE_PATH =
+DOT_GRAPH_MAX_NODES = 50
+MAX_DOT_GRAPH_DEPTH = 0
+DOT_TRANSPARENT = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
diff --git a/src/spdk/ocf/doc/img/deployment-1.png b/src/spdk/ocf/doc/img/deployment-1.png
new file mode 100644
index 000000000..ccf6769f7
--- /dev/null
+++ b/src/spdk/ocf/doc/img/deployment-1.png
Binary files differ
diff --git a/src/spdk/ocf/doc/img/deployment-2.png b/src/spdk/ocf/doc/img/deployment-2.png
new file mode 100644
index 000000000..f31df9e24
--- /dev/null
+++ b/src/spdk/ocf/doc/img/deployment-2.png
Binary files differ
diff --git a/src/spdk/ocf/doc/img/io-path.png b/src/spdk/ocf/doc/img/io-path.png
new file mode 100644
index 000000000..8c43fdca4
--- /dev/null
+++ b/src/spdk/ocf/doc/img/io-path.png
Binary files differ
diff --git a/src/spdk/ocf/env/posix/ocf_env.c b/src/spdk/ocf/env/posix/ocf_env.c
new file mode 100644
index 000000000..6b8a6d5da
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_env.h"
+#include <sched.h>
+#include <execinfo.h>
+
+/* ALLOCATOR */
+struct _env_allocator {
+ /*!< Memory pool ID unique name */
+ char *name;
+
+ /*!< Size of specific item of memory pool */
+ uint32_t item_size;
+
+ /*!< Number of currently allocated items in pool */
+ env_atomic count;
+};
+
+static inline size_t env_allocator_align(size_t size)
+{
+ if (size <= 2)
+ return size;
+ return (1ULL << 32) >> __builtin_clz(size - 1);
+}
+
+struct _env_allocator_item {
+ uint32_t flags;
+ uint32_t cpu;
+ char data[];
+};
+
+void *env_allocator_new(env_allocator *allocator)
+{
+ struct _env_allocator_item *item = NULL;
+
+ item = calloc(1, allocator->item_size);
+
+ if (item) {
+ item->cpu = 0;
+ env_atomic_inc(&allocator->count);
+ }
+
+ return &item->data;
+}
+
+env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...)
+{
+ char name[OCF_ALLOCATOR_NAME_MAX] = { '\0' };
+ int result, error = -1;
+ va_list args;
+
+ env_allocator *allocator = calloc(1, sizeof(*allocator));
+ if (!allocator) {
+ error = __LINE__;
+ goto err;
+ }
+
+ allocator->item_size = size + sizeof(struct _env_allocator_item);
+
+ /* Format allocator name */
+ va_start(args, fmt_name);
+ result = vsnprintf(name, sizeof(name), fmt_name, args);
+ va_end(args);
+
+ if ((result > 0) && (result < sizeof(name))) {
+ allocator->name = strdup(name);
+
+ if (!allocator->name) {
+ error = __LINE__;
+ goto err;
+ }
+ } else {
+ /* Formated string name exceed max allowed size of name */
+ error = __LINE__;
+ goto err;
+ }
+
+ return allocator;
+
+err:
+ printf("Cannot create memory allocator, ERROR %d", error);
+ env_allocator_destroy(allocator);
+
+ return NULL;
+}
+
+void env_allocator_del(env_allocator *allocator, void *obj)
+{
+ struct _env_allocator_item *item =
+ container_of(obj, struct _env_allocator_item, data);
+
+ env_atomic_dec(&allocator->count);
+
+ free(item);
+}
+
+void env_allocator_destroy(env_allocator *allocator)
+{
+ if (allocator) {
+ if (env_atomic_read(&allocator->count)) {
+ printf("Not all objects deallocated\n");
+ ENV_WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n");
+ }
+
+ free(allocator->name);
+ free(allocator);
+ }
+}
+
+/* DEBUGING */
+#define ENV_TRACE_DEPTH 16
+
+void env_stack_trace(void)
+{
+ void *trace[ENV_TRACE_DEPTH];
+ char **messages = NULL;
+ int i, size;
+
+ size = backtrace(trace, ENV_TRACE_DEPTH);
+ messages = backtrace_symbols(trace, size);
+ printf("[stack trace]>>>\n");
+ for (i = 0; i < size; ++i)
+ printf("%s\n", messages[i]);
+ printf("<<<[stack trace]\n");
+ free(messages);
+}
+
+/* CRC */
+uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)
+{
+ return crc32(crc, data, len);
+}
+
+/* EXECUTION CONTEXTS */
+pthread_mutex_t *exec_context_mutex;
+
+static void __attribute__((constructor)) init_execution_context(void)
+{
+ unsigned count = env_get_execution_context_count();
+ unsigned i;
+
+ ENV_BUG_ON(count == 0);
+ exec_context_mutex = malloc(count * sizeof(exec_context_mutex[0]));
+ ENV_BUG_ON(exec_context_mutex == NULL);
+ for (i = 0; i < count; i++)
+ ENV_BUG_ON(pthread_mutex_init(&exec_context_mutex[i], NULL));
+}
+
+static void __attribute__((destructor)) deinit_execution_context(void)
+{
+ unsigned count = env_get_execution_context_count();
+ unsigned i;
+
+ ENV_BUG_ON(count == 0);
+ ENV_BUG_ON(exec_context_mutex == NULL);
+
+ for (i = 0; i < count; i++)
+ ENV_BUG_ON(pthread_mutex_destroy(&exec_context_mutex[i]));
+ free(exec_context_mutex);
+}
+
+/* get_execuction_context must assure that after the call finishes, the caller
+ * will not get preempted from current execution context. For userspace env
+ * we simulate this behavior by acquiring per execution context mutex. As a
+ * result the caller might actually get preempted, but no other thread will
+ * execute in this context by the time the caller puts current execution ctx. */
+unsigned env_get_execution_context(void)
+{
+ unsigned cpu;
+
+ cpu = sched_getcpu();
+ cpu = (cpu == -1) ? 0 : cpu;
+
+ ENV_BUG_ON(pthread_mutex_lock(&exec_context_mutex[cpu]));
+
+ return cpu;
+}
+
+void env_put_execution_context(unsigned ctx)
+{
+ pthread_mutex_unlock(&exec_context_mutex[ctx]);
+}
+
+unsigned env_get_execution_context_count(void)
+{
+ int num = sysconf(_SC_NPROCESSORS_ONLN);
+
+ return (num == -1) ? 0 : num;
+}
diff --git a/src/spdk/ocf/env/posix/ocf_env.h b/src/spdk/ocf/env/posix/ocf_env.h
new file mode 100644
index 000000000..da9c6421a
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_H__
+#define __OCF_ENV_H__
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#ifndef __USE_GNU
+#define __USE_GNU
+#endif
+
+#include <linux/limits.h>
+#include <linux/stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <pthread.h>
+#include <assert.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <limits.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/time.h>
+#include <sys/param.h>
+#include <sys/mman.h>
+#include <zlib.h>
+
+#include "ocf_env_list.h"
+#include "ocf_env_headers.h"
+#include "ocf/ocf_err.h"
+
+/* linux sector 512-bytes */
+#define ENV_SECTOR_SHIFT 9
+
+#define OCF_ALLOCATOR_NAME_MAX 128
+
+#define PAGE_SIZE 4096
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define min(a,b) MIN(a,b)
+
+#define ENV_PRIu64 "lu"
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef uint64_t sector_t;
+
+#define __packed __attribute__((packed))
+
+#define likely(cond) __builtin_expect(!!(cond), 1)
+#define unlikely(cond) __builtin_expect(!!(cond), 0)
+
+/* MEMORY MANAGEMENT */
+#define ENV_MEM_NORMAL 0
+#define ENV_MEM_NOIO 0
+#define ENV_MEM_ATOMIC 0
+
+/* DEBUGING */
+#define ENV_WARN(cond, fmt...) printf(fmt)
+#define ENV_WARN_ON(cond) ;
+#define ENV_WARN_ONCE(cond, fmt...) ENV_WARN(cond, fmt)
+
+#define ENV_BUG() assert(0)
+#define ENV_BUG_ON(cond) do { if (cond) ENV_BUG(); } while (0)
+#define ENV_BUILD_BUG_ON(cond) _Static_assert(!(cond), "static "\
+ "assertion failure")
+
+/* MISC UTILITIES */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member)*__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+/* STRING OPERATIONS */
+#define env_memcpy(dest, dmax, src, slen) ({ \
+ memcpy(dest, src, min(dmax, slen)); \
+ 0; \
+ })
+#define env_memset(dest, dmax, val) ({ \
+ memset(dest, val, dmax); \
+ 0; \
+ })
+#define env_memcmp(s1, s1max, s2, s2max, diff) ({ \
+ *diff = memcmp(s1, s2, min(s1max, s2max)); \
+ 0; \
+ })
+#define env_strdup strndup
+#define env_strnlen(s, smax) strnlen(s, smax)
+#define env_strncmp(s1, slen1, s2, slen2) strncmp(s1, s2, min(slen1, slen2))
+#define env_strncpy(dest, dmax, src, slen) ({ \
+ strncpy(dest, src, min(dmax - 1, slen)); \
+ dest[dmax - 1] = '\0'; \
+ 0; \
+ })
+
+/* MEMORY MANAGEMENT */
+static inline void *env_malloc(size_t size, int flags)
+{
+ return malloc(size);
+}
+
+static inline void *env_zalloc(size_t size, int flags)
+{
+ void *ptr = malloc(size);
+
+ if (ptr)
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+
+static inline void env_free(const void *ptr)
+{
+ free((void *)ptr);
+}
+
+static inline void *env_vmalloc_flags(size_t size, int flags)
+{
+ return malloc(size);
+}
+
+static inline void *env_vzalloc_flags(size_t size, int flags)
+{
+ return env_zalloc(size, 0);
+}
+
+static inline void *env_vmalloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void *env_vzalloc(size_t size)
+{
+ return env_zalloc(size, 0);
+}
+
+static inline void env_vfree(const void *ptr)
+{
+ free((void *)ptr);
+}
+
+/* SECURE MEMORY MANAGEMENT */
+/*
+ * OCF adapter can opt to take additional steps to securely allocate and free
+ * memory used by OCF to store cache metadata. This is to prevent other
+ * entities in the system from acquiring parts of OCF cache metadata via
+ * memory allocations. If this is not a concern in given product, secure
+ * alloc/free should default to vmalloc/vfree.
+ *
+ * Memory returned from secure alloc is not expected to be physically continous
+ * nor zeroed.
+ */
+
+/* default to standard memory allocations for secure allocations */
+#define SECURE_MEMORY_HANDLING 0
+
+static inline void *env_secure_alloc(size_t size)
+{
+ void *ptr = malloc(size);
+
+#if SECURE_MEMORY_HANDLING
+ if (ptr && mlock(ptr, size)) {
+ free(ptr);
+ ptr = NULL;
+ }
+#endif
+
+ return ptr;
+}
+
+static inline void env_secure_free(const void *ptr, size_t size)
+{
+ if (ptr) {
+#if SECURE_MEMORY_HANDLING
+ memset(ptr, size, 0);
+ /* TODO: flush CPU caches ? */
+ ENV_BUG_ON(munlock(ptr));
+#endif
+ free((void*)ptr);
+ }
+}
+
+static inline uint64_t env_get_free_memory(void)
+{
+ return (uint64_t)(-1);
+}
+
+/* ALLOCATOR */
+typedef struct _env_allocator env_allocator;
+
+env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...);
+
+void env_allocator_destroy(env_allocator *allocator);
+
+void *env_allocator_new(env_allocator *allocator);
+
+void env_allocator_del(env_allocator *allocator, void *item);
+
+/* MUTEX */
+typedef struct {
+ pthread_mutex_t m;
+} env_mutex;
+
+#define env_cond_resched() ({})
+
+static inline int env_mutex_init(env_mutex *mutex)
+{
+ if(pthread_mutex_init(&mutex->m, NULL))
+ return 1;
+
+ return 0;
+}
+
+static inline void env_mutex_lock(env_mutex *mutex)
+{
+ ENV_BUG_ON(pthread_mutex_lock(&mutex->m));
+}
+
+static inline int env_mutex_trylock(env_mutex *mutex)
+{
+ return pthread_mutex_trylock(&mutex->m);
+}
+
+static inline int env_mutex_lock_interruptible(env_mutex *mutex)
+{
+ env_mutex_lock(mutex);
+ return 0;
+}
+
+static inline void env_mutex_unlock(env_mutex *mutex)
+{
+ ENV_BUG_ON(pthread_mutex_unlock(&mutex->m));
+}
+
+static inline int env_mutex_destroy(env_mutex *mutex)
+{
+ if(pthread_mutex_destroy(&mutex->m))
+ return 1;
+
+ return 0;
+}
+
+/* RECURSIVE MUTEX */
+typedef env_mutex env_rmutex;
+
+static inline int env_rmutex_init(env_rmutex *rmutex)
+{
+ pthread_mutexattr_t attr;
+
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&rmutex->m, &attr);
+
+ return 0;
+}
+
+static inline void env_rmutex_lock(env_rmutex *rmutex)
+{
+ env_mutex_lock(rmutex);
+}
+
+static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex)
+{
+ return env_mutex_lock_interruptible(rmutex);
+}
+
+static inline void env_rmutex_unlock(env_rmutex *rmutex)
+{
+ env_mutex_unlock(rmutex);
+}
+
+static inline int env_rmutex_destroy(env_rmutex *rmutex)
+{
+ if(pthread_mutex_destroy(&rmutex->m))
+ return 1;
+
+ return 0;
+}
+
+/* RW SEMAPHORE */
+typedef struct {
+ pthread_rwlock_t lock;
+} env_rwsem;
+
+static inline int env_rwsem_init(env_rwsem *s)
+{
+ return pthread_rwlock_init(&s->lock, NULL);
+}
+
+static inline void env_rwsem_up_read(env_rwsem *s)
+{
+ pthread_rwlock_unlock(&s->lock);
+}
+
+static inline void env_rwsem_down_read(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_rdlock(&s->lock));
+}
+
+static inline int env_rwsem_down_read_trylock(env_rwsem *s)
+{
+ return pthread_rwlock_tryrdlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline void env_rwsem_up_write(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&s->lock));
+}
+
+static inline void env_rwsem_down_write(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_wrlock(&s->lock));
+}
+
+static inline int env_rwsem_down_write_trylock(env_rwsem *s)
+{
+ return pthread_rwlock_trywrlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline int env_rwsem_destroy(env_rwsem *s)
+{
+ return pthread_rwlock_destroy(&s->lock);
+}
+
+/* COMPLETION */
+struct completion {
+ sem_t sem;
+};
+
+typedef struct completion env_completion;
+
+static inline void env_completion_init(env_completion *completion)
+{
+ sem_init(&completion->sem, 0, 0);
+}
+
+static inline void env_completion_wait(env_completion *completion)
+{
+ sem_wait(&completion->sem);
+}
+
+static inline void env_completion_complete(env_completion *completion)
+{
+ sem_post(&completion->sem);
+}
+
+static inline void env_completion_destroy(env_completion *completion)
+{
+ sem_destroy(&completion->sem);
+}
+
+/* ATOMIC VARIABLES */
+typedef struct {
+ volatile int counter;
+} env_atomic;
+
+typedef struct {
+ volatile long counter;
+} env_atomic64;
+
+static inline int env_atomic_read(const env_atomic *a)
+{
+ return a->counter; /* TODO */
+}
+
+static inline void env_atomic_set(env_atomic *a, int i)
+{
+ a->counter = i; /* TODO */
+}
+
+static inline void env_atomic_add(int i, env_atomic *a)
+{
+ __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic_sub(int i, env_atomic *a)
+{
+ __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic_inc(env_atomic *a)
+{
+ env_atomic_add(1, a);
+}
+
+static inline void env_atomic_dec(env_atomic *a)
+{
+ env_atomic_sub(1, a);
+}
+
+static inline bool env_atomic_dec_and_test(env_atomic *a)
+{
+ return __sync_sub_and_fetch(&a->counter, 1) == 0;
+}
+
+static inline int env_atomic_add_return(int i, env_atomic *a)
+{
+ return __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline int env_atomic_sub_return(int i, env_atomic *a)
+{
+ return __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline int env_atomic_inc_return(env_atomic *a)
+{
+ return env_atomic_add_return(1, a);
+}
+
+static inline int env_atomic_dec_return(env_atomic *a)
+{
+ return env_atomic_sub_return(1, a);
+}
+
+static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value)
+{
+ return __sync_val_compare_and_swap(&a->counter, old, new_value);
+}
+
+static inline int env_atomic_add_unless(env_atomic *a, int i, int u)
+{
+ int c, old;
+ c = env_atomic_read(a);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = env_atomic_cmpxchg((a), c, c + (i));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+static inline long env_atomic64_read(const env_atomic64 *a)
+{
+ return a->counter; /* TODO */
+}
+
+static inline void env_atomic64_set(env_atomic64 *a, long i)
+{
+ a->counter = i; /* TODO */
+}
+
+static inline void env_atomic64_add(long i, env_atomic64 *a)
+{
+ __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic64_sub(long i, env_atomic64 *a)
+{
+ __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic64_inc(env_atomic64 *a)
+{
+ env_atomic64_add(1, a);
+}
+
+static inline void env_atomic64_dec(env_atomic64 *a)
+{
+ env_atomic64_sub(1, a);
+}
+
+static inline long env_atomic64_inc_return(env_atomic64 *a)
+{
+ return __sync_add_and_fetch(&a->counter, 1);
+}
+
+static inline long env_atomic64_cmpxchg(env_atomic64 *a, long old_v, long new_v)
+{
+ return __sync_val_compare_and_swap(&a->counter, old_v, new_v);
+}
+
+/* SPIN LOCKS */
+typedef struct {
+ pthread_spinlock_t lock;
+} env_spinlock;
+
+static inline int env_spinlock_init(env_spinlock *l)
+{
+ return pthread_spin_init(&l->lock, 0);
+}
+
+static inline int env_spinlock_trylock(env_spinlock *l)
+{
+ return pthread_spin_trylock(&l->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline void env_spinlock_lock(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_lock(&l->lock));
+}
+
+static inline void env_spinlock_unlock(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_unlock(&l->lock));
+}
+
+#define env_spinlock_lock_irqsave(l, flags) \
+ (void)flags; \
+ env_spinlock_lock(l)
+
+#define env_spinlock_unlock_irqrestore(l, flags) \
+ (void)flags; \
+ env_spinlock_unlock(l)
+
+static inline void env_spinlock_destroy(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_destroy(&l->lock));
+}
+
+/* RW LOCKS */
+typedef struct {
+ pthread_rwlock_t lock;
+} env_rwlock;
+
+static inline void env_rwlock_init(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_init(&l->lock, NULL));
+}
+
+static inline void env_rwlock_read_lock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_rdlock(&l->lock));
+}
+
+static inline void env_rwlock_read_unlock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
+}
+
+static inline void env_rwlock_write_lock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_wrlock(&l->lock));
+}
+
+static inline void env_rwlock_write_unlock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
+}
+
+static inline void env_rwlock_destroy(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_destroy(&l->lock));
+}
+
+/* BIT OPERATIONS */
+static inline void env_bit_set(int nr, volatile void *addr)
+{
+ char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ __sync_or_and_fetch(byte, mask);
+}
+
+static inline void env_bit_clear(int nr, volatile void *addr)
+{
+ char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ mask = ~mask;
+ __sync_and_and_fetch(byte, mask);
+}
+
+static inline bool env_bit_test(int nr, const volatile unsigned long *addr)
+{
+ const char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ return !!(*byte & mask);
+}
+
+/* SCHEDULING */
+static inline int env_in_interrupt(void)
+{
+ return 0;
+}
+
+static inline uint64_t env_get_tick_count(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static inline uint64_t env_ticks_to_nsecs(uint64_t j)
+{
+ return j * 1000;
+}
+
+static inline uint64_t env_ticks_to_msecs(uint64_t j)
+{
+ return j / 1000;
+}
+
+static inline uint64_t env_ticks_to_secs(uint64_t j)
+{
+ return j / 1000000;
+}
+
+static inline uint64_t env_secs_to_ticks(uint64_t j)
+{
+ return j * 1000000;
+}
+
+/* SORTING */
+static inline void env_sort(void *base, size_t num, size_t size,
+ int (*cmp_fn)(const void *, const void *),
+ void (*swap_fn)(void *, void *, int size))
+{
+ qsort(base, num, size, cmp_fn);
+}
+
+/* TIME */
+static inline void env_msleep(uint64_t n)
+{
+ usleep(n * 1000);
+}
+
+struct env_timeval {
+ uint64_t sec, usec;
+};
+
+uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
+
+unsigned env_get_execution_context(void);
+void env_put_execution_context(unsigned ctx);
+unsigned env_get_execution_context_count(void);
+
+#endif /* __OCF_ENV_H__ */
diff --git a/src/spdk/ocf/env/posix/ocf_env_headers.h b/src/spdk/ocf/env/posix/ocf_env_headers.h
new file mode 100644
index 000000000..2e4c9e182
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env_headers.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright(c) 2019-2020 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_HEADERS_H__
+#define __OCF_ENV_HEADERS_H__
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+/* TODO: Move prefix printing to context logger. */
+#define OCF_LOGO "OCF"
+#define OCF_PREFIX_SHORT "[" OCF_LOGO "] "
+#define OCF_PREFIX_LONG "Open CAS Framework"
+
+#define OCF_VERSION_MAIN 20
+#define OCF_VERSION_MAJOR 3
+#define OCF_VERSION_MINOR 0
+
+#endif /* __OCF_ENV_HEADERS_H__ */
diff --git a/src/spdk/ocf/env/posix/ocf_env_list.h b/src/spdk/ocf/env/posix/ocf_env_list.h
new file mode 100644
index 000000000..53ce53e46
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env_list.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_LIST__
+#define __OCF_ENV_LIST__
+
+#define LIST_POISON1 ((void *)0x101)
+#define LIST_POISON2 ((void *)0x202)
+
+/**
+ * List entry structure mimicking linux kernel based one.
+ */
+struct list_head {
+ struct list_head *next;
+ struct list_head *prev;
+};
+
+/**
+ * start an empty list
+ */
+#define INIT_LIST_HEAD(l) { (l)->prev = l; (l)->next = l; }
+
+/**
+ * Add item to list head.
+ * @param it list entry to be added
+ * @param l1 list main node (head)
+ */
+static inline void list_add(struct list_head *it, struct list_head *l1)
+{
+ it->prev = l1;
+ it->next = l1->next;
+
+ l1->next->prev = it;
+ l1->next = it;
+}
+
+/**
+ * Add item it to tail.
+ * @param it list entry to be added
+ * @param l1 list main node (head)
+ */
+static inline void list_add_tail(struct list_head *it, struct list_head *l1)
+{
+ it->prev = l1->prev;
+ it->next = l1;
+
+ l1->prev->next = it;
+ l1->prev = it;
+}
+
+/**
+ * check if a list is empty (return true)
+ * @param l1 list main node (head)
+ */
+static inline int list_empty(struct list_head *l1)
+{
+ return l1->next == l1;
+}
+
+/**
+ * delete an entry from a list
+ * @param it list entry to be deleted
+ */
+static inline void list_del(struct list_head *it)
+{
+ it->next->prev = it->prev;
+ it->prev->next = it->next;
+}
+
+/**
+ * Move element to list head.
+ * @param it list entry to be moved
+ * @param l1 list main node (head)
+ */
+static inline void list_move(struct list_head *it, struct list_head *l1)
+{
+ list_del(it);
+ list_add(it, l1);
+}
+
+/**
+ * Move element to list tail.
+ * @param it list entry to be moved
+ * @param l1 list main node (head)
+ */
+static inline void list_move_tail(struct list_head *it, struct list_head *l1)
+{
+ list_del(it);
+ list_add_tail(it, l1);
+}
+
+/**
+ * Extract an entry.
+ * @param list_head_i list head item, from which entry is extracted
+ * @param item_type type (struct) of list entry
+ * @param field_name name of list_head field within item_type
+ */
+#define list_entry(list_head_i, item_type, field_name) \
+ (item_type *)(((void*)(list_head_i)) - offsetof(item_type, field_name))
+
+#define list_first_entry(list_head_i, item_type, field_name) \
+ list_entry((list_head_i)->next, item_type, field_name)
+
+/**
+ * @param iterator uninitialized list_head pointer, to be used as iterator
+ * @param plist list head (main node)
+ */
+#define list_for_each(iterator, plist) \
+ for (iterator = (plist)->next; \
+ (iterator)->next != (plist)->next; \
+ iterator = (iterator)->next)
+
+/**
+ * Safe version of list_for_each which works even if entries are deleted during
+ * loop.
+ * @param iterator uninitialized list_head pointer, to be used as iterator
+ * @param q another uninitialized list_head, used as helper
+ * @param plist list head (main node)
+ */
+/*
+ * Algorithm handles situation, where q is deleted.
+ * consider in example 3 element list with header h:
+ *
+ * h -> 1 -> 2 -> 3 ->
+ *1. i q
+ *
+ *2. i q
+ *
+ *3. q i
+ */
+#define list_for_each_safe(iterator, q, plist) \
+ for (iterator = (q = (plist)->next->next)->prev; \
+ (q) != (plist)->next; \
+ iterator = (q = (q)->next)->prev)
+
+#define _list_entry_helper(item, head, field_name) \
+ list_entry(head, typeof(*item), field_name)
+
+/**
+ * Iterate over list entries.
+ * @param list pointer to list item (iterator)
+ * @param plist pointer to list_head item
+ * @param field_name name of list_head field in list entry
+ */
+#define list_for_each_entry(item, plist, field_name) \
+ for (item = _list_entry_helper(item, (plist)->next, field_name); \
+ _list_entry_helper(item, (item)->field_name.next, field_name) !=\
+ _list_entry_helper(item, (plist)->next, field_name); \
+ item = _list_entry_helper(item, (item)->field_name.next, field_name))
+
+/**
+ * Safe version of list_for_each_entry which works even if entries are deleted
+ * during loop.
+ * @param list pointer to list item (iterator)
+ * @param q another pointer to list item, used as helper
+ * @param plist pointer to list_head item
+ * @param field_name name of list_head field in list entry
+ */
+#define list_for_each_entry_safe(item, q, plist, field_name) \
+ for (item = _list_entry_helper(item, (plist)->next, field_name), \
+ q = _list_entry_helper(item, (item)->field_name.next, field_name); \
+ _list_entry_helper(item, (item)->field_name.next, field_name) != \
+ _list_entry_helper(item, (plist)->next, field_name); \
+ item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))
+
+#endif // __OCF_ENV_LIST__
diff --git a/src/spdk/ocf/example/simple/Makefile b/src/spdk/ocf/example/simple/Makefile
new file mode 100644
index 000000000..f19cae0c5
--- /dev/null
+++ b/src/spdk/ocf/example/simple/Makefile
@@ -0,0 +1,37 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+OCFDIR=../../
+SRCDIR=src/
+INCDIR=include/
+
+SRC=$(shell find ${SRCDIR} -name \*.c)
+OBJS = $(patsubst %.c, %.o, $(SRC))
+PROGRAM=simple
+
+CC = gcc
+CFLAGS = -g -Wall -I${INCDIR} -I${SRCDIR}/ocf/env/
+LDFLAGS = -lm -lz -pthread
+
+all: sync
+ $(MAKE) $(PROGRAM)
+
+$(PROGRAM): $(OBJS)
+ $(CC) -o $@ $^ $(LDFLAGS)
+
+sync:
+ @$(MAKE) -C ${OCFDIR} inc O=$(PWD)
+ @$(MAKE) -C ${OCFDIR} src O=$(PWD)
+ @$(MAKE) -C ${OCFDIR} env O=$(PWD) OCF_ENV=posix
+
+clean:
+ @rm -rf $(PROGRAM) $(OBJS)
+
+distclean:
+ @rm -rf $(PROGRAM) $(OBJS)
+ @rm -rf src/ocf
+ @rm -rf include/ocf
+
+.PHONY: all clean
diff --git a/src/spdk/ocf/example/simple/src/ctx.c b/src/spdk/ocf/example/simple/src/ctx.c
new file mode 100644
index 000000000..420ea63b0
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/ctx.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include <execinfo.h>
+#include <ocf/ocf.h>
+#include "ocf_env.h"
+#include "data.h"
+#include "volume.h"
+#include "ctx.h"
+
+#define PAGE_SIZE 4096
+
+/*
+ * Allocate structure representing data for io operations.
+ */
+ctx_data_t *ctx_data_alloc(uint32_t pages)
+{
+ struct volume_data *data;
+
+ data = malloc(sizeof(*data));
+ data->ptr = malloc(pages * PAGE_SIZE);
+ data->offset = 0;
+
+ return data;
+}
+
+/*
+ * Free data structure.
+ */
+void ctx_data_free(ctx_data_t *ctx_data)
+{
+ struct volume_data *data = ctx_data;
+
+ if (!data)
+ return;
+
+ free(data->ptr);
+ free(data);
+}
+
+/*
+ * This function is supposed to set protection of data pages against swapping.
+ * Can be non-implemented if not needed.
+ */
+static int ctx_data_mlock(ctx_data_t *ctx_data)
+{
+ return 0;
+}
+
+/*
+ * Stop protecting data pages against swapping.
+ */
+static void ctx_data_munlock(ctx_data_t *ctx_data)
+{
+}
+
+/*
+ * Read data into flat memory buffer.
+ */
+static uint32_t ctx_data_read(void *dst, ctx_data_t *src, uint32_t size)
+{
+ struct volume_data *data = src;
+
+ memcpy(dst, data->ptr + data->offset, size);
+
+ return size;
+}
+
+/*
+ * Write data from flat memory buffer.
+ */
+static uint32_t ctx_data_write(ctx_data_t *dst, const void *src, uint32_t size)
+{
+ struct volume_data *data = dst;
+
+ memcpy(data->ptr + data->offset, src, size);
+
+ return size;
+}
+
+/*
+ * Fill data with zeros.
+ */
+static uint32_t ctx_data_zero(ctx_data_t *dst, uint32_t size)
+{
+ struct volume_data *data = dst;
+
+ memset(data->ptr + data->offset, 0, size);
+
+ return size;
+}
+
+/*
+ * Perform seek operation on data.
+ */
+static uint32_t ctx_data_seek(ctx_data_t *dst, ctx_data_seek_t seek,
+ uint32_t offset)
+{
+ struct volume_data *data = dst;
+
+ switch (seek) {
+ case ctx_data_seek_begin:
+ data->offset = offset;
+ break;
+ case ctx_data_seek_current:
+ data->offset += offset;
+ break;
+ }
+
+ return offset;
+}
+
+/*
+ * Copy data from one structure to another.
+ */
+static uint64_t ctx_data_copy(ctx_data_t *dst, ctx_data_t *src,
+ uint64_t to, uint64_t from, uint64_t bytes)
+{
+ struct volume_data *data_dst = dst;
+ struct volume_data *data_src = src;
+
+ memcpy(data_dst->ptr + to, data_src->ptr + from, bytes);
+
+ return bytes;
+}
+
+/*
+ * Perform secure erase of data (e.g. fill pages with zeros).
+ * Can be left non-implemented if not needed.
+ */
+static void ctx_data_secure_erase(ctx_data_t *ctx_data)
+{
+}
+
+/*
+ * Initialize cleaner thread. Cleaner thread is left non-implemented,
+ * to keep this example as simple as possible.
+ */
+static int ctx_cleaner_init(ocf_cleaner_t c)
+{
+ return 0;
+}
+
+/*
+ * Kick cleaner thread. Cleaner thread is left non-implemented,
+ * to keep this example as simple as possible.
+ */
+static void ctx_cleaner_kick(ocf_cleaner_t c)
+{
+}
+
+/*
+ * Stop cleaner thread. Cleaner thread is left non-implemented, to keep
+ * this example as simple as possible.
+ */
+static void ctx_cleaner_stop(ocf_cleaner_t c)
+{
+}
+
+/*
+ * Initialize metadata updater thread. Metadata updater thread is left
+ * non-implemented to keep this example as simple as possible.
+ */
+static int ctx_metadata_updater_init(ocf_metadata_updater_t mu)
+{
+ return 0;
+}
+
+/*
+ * Kick metadata updater thread. Metadata updater thread is left
+ * non-implemented to keep this example as simple as possible.
+ */
+static void ctx_metadata_updater_kick(ocf_metadata_updater_t mu)
+{
+ ocf_metadata_updater_run(mu);
+}
+
+/*
+ * Stop metadata updater thread. Metadata updater thread is left
+ * non-implemented to keep this example as simple as possible.
+ */
+static void ctx_metadata_updater_stop(ocf_metadata_updater_t mu)
+{
+}
+
+/*
+ * Function prividing interface for printing to log used by OCF internals.
+ * It can handle differently messages at varous log levels.
+ */
+static int ctx_logger_print(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, va_list args)
+{
+ FILE *lfile = stdout;
+
+ if (lvl > log_info)
+ return 0;
+
+ if (lvl <= log_warn)
+ lfile = stderr;
+
+ return vfprintf(lfile, fmt, args);
+}
+
+#define CTX_LOG_TRACE_DEPTH 16
+
+/*
+ * Function prividing interface for printing current stack. Used for debugging,
+ * and for providing additional information in log in case of errors.
+ */
+static int ctx_logger_dump_stack(ocf_logger_t logger)
+{
+ void *trace[CTX_LOG_TRACE_DEPTH];
+ char **messages = NULL;
+ int i, size;
+
+ size = backtrace(trace, CTX_LOG_TRACE_DEPTH);
+ messages = backtrace_symbols(trace, size);
+ printf("[stack trace]>>>\n");
+ for (i = 0; i < size; ++i)
+ printf("%s\n", messages[i]);
+ printf("<<<[stack trace]\n");
+ free(messages);
+
+ return 0;
+}
+
+/*
+ * This structure describes context config, containing simple context info
+ * and pointers to ops callbacks. Ops are splitted into few categories:
+ * - data ops, providing context specific data handing interface,
+ * - cleaner ops, providing interface to start and stop clener thread,
+ * - metadata updater ops, providing interface for starting, stoping
+ * and kicking metadata updater thread.
+ * - logger ops, providing interface for text message logging
+ */
+static const struct ocf_ctx_config ctx_cfg = {
+ .name = "OCF Example",
+ .ops = {
+ .data = {
+ .alloc = ctx_data_alloc,
+ .free = ctx_data_free,
+ .mlock = ctx_data_mlock,
+ .munlock = ctx_data_munlock,
+ .read = ctx_data_read,
+ .write = ctx_data_write,
+ .zero = ctx_data_zero,
+ .seek = ctx_data_seek,
+ .copy = ctx_data_copy,
+ .secure_erase = ctx_data_secure_erase,
+ },
+
+ .cleaner = {
+ .init = ctx_cleaner_init,
+ .kick = ctx_cleaner_kick,
+ .stop = ctx_cleaner_stop,
+ },
+
+ .metadata_updater = {
+ .init = ctx_metadata_updater_init,
+ .kick = ctx_metadata_updater_kick,
+ .stop = ctx_metadata_updater_stop,
+ },
+
+ .logger = {
+ .print = ctx_logger_print,
+ .dump_stack = ctx_logger_dump_stack,
+ },
+ },
+};
+
+
+/*
+ * Function initializing context. Prepares context, sets logger and
+ * registers volume type.
+ */
+int ctx_init(ocf_ctx_t *ctx)
+{
+ int ret;
+
+ ret = ocf_ctx_create(ctx, &ctx_cfg);
+ if (ret)
+ return ret;
+
+ ret = volume_init(*ctx);
+ if (ret) {
+ ocf_ctx_put(*ctx);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function cleaning up context. Unregisters volume type and
+ * deinitializes context.
+ */
+void ctx_cleanup(ocf_ctx_t ctx)
+{
+ volume_cleanup(ctx);
+ ocf_ctx_put(ctx);
+}
diff --git a/src/spdk/ocf/example/simple/src/ctx.h b/src/spdk/ocf/example/simple/src/ctx.h
new file mode 100644
index 000000000..6f0360679
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/ctx.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __CTX_H__
+#define __CTX_H__
+
+#include <ocf/ocf.h>
+
+#define VOL_TYPE 1
+
+ctx_data_t *ctx_data_alloc(uint32_t pages);
+void ctx_data_free(ctx_data_t *ctx_data);
+
+int ctx_init(ocf_ctx_t *ocf_ctx);
+void ctx_cleanup(ocf_ctx_t ctx);
+
+#endif
diff --git a/src/spdk/ocf/example/simple/src/data.h b/src/spdk/ocf/example/simple/src/data.h
new file mode 100644
index 000000000..bbef4ec66
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/data.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __DATA_H__
+#define __DATA_H__
+
+struct volume_data {
+ void *ptr;
+ int offset;
+};
+
+#endif
diff --git a/src/spdk/ocf/example/simple/src/main.c b/src/spdk/ocf/example/simple/src/main.c
new file mode 100644
index 000000000..ef059fdd7
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/main.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <ocf/ocf.h>
+#include "data.h"
+#include "ctx.h"
+
+/*
+ * Cache private data. Used to share information between async contexts.
+ */
+struct cache_priv {
+ ocf_queue_t mngt_queue;
+ ocf_queue_t io_queue;
+};
+
+/*
+ * Helper function for error handling.
+ */
+void error(char *msg)
+{
+ printf("ERROR: %s", msg);
+ exit(1);
+}
+
+/*
+ * Trigger queue asynchronously. Made synchronous for simplicity.
+ * Notice that it makes all asynchronous calls synchronous, because
+ * asynchronism in OCF is achieved mostly by using queues.
+ */
+static inline void queue_kick_async(ocf_queue_t q)
+{
+ ocf_queue_run(q);
+}
+
+/*
+ * Trigger queue synchronously. May be implemented as asynchronous as well,
+ * but in some environments kicking queue synchronously may reduce latency,
+ * so to take advantage of such situations OCF call synchronous variant of
+ * queue kick callback where possible.
+ */
+static void queue_kick_sync(ocf_queue_t q)
+{
+ ocf_queue_run(q);
+}
+
+/*
+ * Stop queue thread. To keep this example simple we handle queues
+ * synchronously, thus it's left non-implemented.
+ */
+static void queue_stop(ocf_queue_t q)
+{
+}
+
+/*
+ * Queue ops providing interface for running queue thread in both synchronous
+ * and asynchronous way. The stop() operation in called just before queue is
+ * being destroyed.
+ */
+const struct ocf_queue_ops queue_ops = {
+ .kick_sync = queue_kick_sync,
+ .kick = queue_kick_async,
+ .stop = queue_stop,
+};
+
+/*
+ * Simple completion context. As lots of OCF API functions work asynchronously
+ * and call completion callback when job is done, we need some structure to
+ * share program state with completion callback. In this case we have single
+ * variable pointer to propagate error code.
+ */
+struct simple_context {
+ int *error;
+};
+
+/*
+ * Basic asynchronous completion callback. Just propagate error code.
+ */
+static void simple_complete(ocf_cache_t cache, void *priv, int error)
+{
+ struct simple_context *context= priv;
+
+ *context->error = error;
+}
+
+/*
+ * Function starting cache and attaching cache device.
+ */
+int initialize_cache(ocf_ctx_t ctx, ocf_cache_t *cache)
+{
+ struct ocf_mngt_cache_config cache_cfg = { .name = "cache1" };
+ struct ocf_mngt_cache_device_config device_cfg = { };
+ struct cache_priv *cache_priv;
+ struct simple_context context;
+ int ret;
+
+ /*
+ * Asynchronous callbacks will assign error code to ret. That
+ * way we have always the same variable holding last error code.
+ */
+ context.error = &ret;
+
+ /* Cache configuration */
+ ocf_mngt_cache_config_set_default(&cache_cfg);
+ cache_cfg.metadata_volatile = true;
+
+ /* Cache deivce (volume) configuration */
+ ocf_mngt_cache_device_config_set_default(&device_cfg);
+ device_cfg.volume_type = VOL_TYPE;
+ ret = ocf_uuid_set_str(&device_cfg.uuid, "cache");
+ if (ret)
+ return ret;
+
+ /*
+ * Allocate cache private structure. We can not initialize it
+ * on stack, as it may be used in various async contexts
+ * throughout the entire live span of cache object.
+ */
+ cache_priv = malloc(sizeof(*cache_priv));
+ if (!cache_priv)
+ return -ENOMEM;
+
+ /* Start cache */
+ ret = ocf_mngt_cache_start(ctx, cache, &cache_cfg);
+ if (ret)
+ goto err_priv;
+
+ /* Assing cache priv structure to cache. */
+ ocf_cache_set_priv(*cache, cache_priv);
+
+ /*
+ * Create management queue. It will be used for performing various
+ * asynchronous management operations, such as attaching cache volume
+ * or adding core object.
+ */
+ ret = ocf_queue_create(*cache, &cache_priv->mngt_queue, &queue_ops);
+ if (ret) {
+ ocf_mngt_cache_stop(*cache, simple_complete, &context);
+ goto err_priv;
+ }
+
+ /*
+ * Assign management queue to cache. This has to be done before any
+ * other management operation. Management queue is treated specially,
+ * and it may not be used for submitting IO requests. It also will not
+ * be put on the cache stop - we have to put it manually at the end.
+ */
+ ocf_mngt_cache_set_mngt_queue(*cache, cache_priv->mngt_queue);
+
+ /* Create queue which will be used for IO submission. */
+ ret = ocf_queue_create(*cache, &cache_priv->io_queue, &queue_ops);
+ if (ret)
+ goto err_cache;
+
+ /* Attach volume to cache */
+ ocf_mngt_cache_attach(*cache, &device_cfg, simple_complete, &context);
+ if (ret)
+ goto err_cache;
+
+ return 0;
+
+err_cache:
+ ocf_mngt_cache_stop(*cache, simple_complete, &context);
+ ocf_queue_put(cache_priv->mngt_queue);
+err_priv:
+ free(cache_priv);
+ return ret;
+}
+
+/*
+ * Add core completion callback context. We need this to propagate error code
+ * and handle to freshly initialized core object.
+ */
+struct add_core_context {
+ ocf_core_t *core;
+ int *error;
+};
+
+/* Add core complete callback. Just rewrite args to context structure. */
+static void add_core_complete(ocf_cache_t cache, ocf_core_t core,
+ void *priv, int error)
+{
+ struct add_core_context *context = priv;
+
+ *context->core = core;
+ *context->error = error;
+}
+
+/*
+ * Function adding cache to core.
+ */
+int initialize_core(ocf_cache_t cache, ocf_core_t *core)
+{
+ struct ocf_mngt_core_config core_cfg = { };
+ struct add_core_context context;
+ int ret;
+
+ /*
+ * Asynchronous callback will assign core handle to core,
+ * and to error code to ret.
+ */
+ context.core = core;
+ context.error = &ret;
+
+ /* Core configuration */
+ ocf_mngt_core_config_set_default(&core_cfg);
+ strcpy(core_cfg.name, "core1");
+ core_cfg.volume_type = VOL_TYPE;
+ ret = ocf_uuid_set_str(&core_cfg.uuid, "core");
+ if (ret)
+ return ret;
+
+ /* Add core to cache */
+ ocf_mngt_cache_add_core(cache, &core_cfg, add_core_complete, &context);
+
+ return ret;
+}
+
+/*
+ * Callback function called when write completes.
+ */
+void complete_write(struct ocf_io *io, int error)
+{
+ struct volume_data *data = ocf_io_get_data(io);
+
+ printf("WRITE COMPLETE: (error: %d)\n", error);
+
+ /* Free data buffer and io */
+ ctx_data_free(data);
+ ocf_io_put(io);
+}
+
+/*
+ * Callback function called when read completes.
+ */
+void complete_read(struct ocf_io *io, int error)
+{
+ struct volume_data *data = ocf_io_get_data(io);
+
+ printf("WRITE COMPLETE (error: %d)\n", error);
+ printf("DATA: \"%s\"\n", (char *)data->ptr);
+
+ /* Free data buffer and io */
+ ctx_data_free(data);
+ ocf_io_put(io);
+}
+
+/*
+ * Wrapper function for io submition.
+ */
+int submit_io(ocf_core_t core, struct volume_data *data,
+ uint64_t addr, uint64_t len, int dir, ocf_end_io_t cmpl)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
+ struct ocf_io *io;
+
+ /* Allocate new io */
+ io = ocf_core_new_io(core, cache_priv->io_queue, addr, len, dir, 0, 0);
+ if (!io)
+ return -ENOMEM;
+
+ /* Assign data to io */
+ ocf_io_set_data(io, data, 0);
+ /* Setup completion function */
+ ocf_io_set_cmpl(io, NULL, NULL, cmpl);
+ /* Submit io */
+ ocf_core_submit_io(io);
+
+ return 0;
+}
+
+/*
+ * This function simulates actual business logic.
+ *
+ * It performs following steps:
+ * 1. Allocate data buffer for write and write it with example data.
+ * 2. Allocate new io, configure it for write, setup completion callback
+ * and perform write to the core.
+ * 3. Wait for write io completion (write is handled synchronosly, so no
+ * actual wait is needed, but in real life we would need to use some
+ * synchronization to be sure, that completion function has been already
+ * called). Alternatively we could issue read io from write completion
+ * callback.
+ * 4. Allocate data buffer for read.
+ * 5. Allocate new io, configure it for read, setup completion callback
+ * and perform read from the core, from the same address where data
+ * was previously written.
+ * 6. Print example data in read completion callback.
+ *
+ * Data buffers and ios are freed in completion callbacks, so there is no
+ * need to handle freeing in this function.
+ */
+void perform_workload(ocf_core_t core)
+{
+ struct volume_data *data1, *data2;
+
+ /* Allocate data buffer and fill it with example data */
+ data1 = ctx_data_alloc(1);
+ if (!data1)
+ error("Unable to allocate data1\n");
+ strcpy(data1->ptr, "This is some test data");
+ /* Prepare and submit write IO to the core */
+ submit_io(core, data1, 0, 512, OCF_WRITE, complete_write);
+ /* After write completes, complete_write() callback will be called. */
+
+ /*
+ * Here we would need to wait until write completes to be sure, that
+ * performing read we retrive written data.
+ */
+
+ /* Allocate data buffer for read */
+ data2 = ctx_data_alloc(1);
+ if (!data2)
+ error("Unable to allocate data2\n");
+ /* Prepare and submit read IO to the core */
+ submit_io(core, data2, 0, 512, OCF_READ, complete_read);
+ /* After read completes, complete_read() callback will be called,
+ * where we print our example data to stdout.
+ */
+}
+
+static void remove_core_complete(void *priv, int error)
+{
+ struct simple_context *context = priv;
+
+ *context->error = error;
+}
+
+int main(int argc, char *argv[])
+{
+ struct cache_priv *cache_priv;
+ struct simple_context context;
+ ocf_ctx_t ctx;
+ ocf_cache_t cache1;
+ ocf_core_t core1;
+ int ret;
+
+ context.error = &ret;
+
+ /* Initialize OCF context */
+ if (ctx_init(&ctx))
+ error("Unable to initialize context\n");
+
+ /* Start cache */
+ if (initialize_cache(ctx, &cache1))
+ error("Unable to start cache\n");
+
+ /* Add core */
+ if (initialize_core(cache1, &core1))
+ error("Unable to add core\n");
+
+ /* Do some actual io operations */
+ perform_workload(core1);
+
+ /* Remove core from cache */
+ ocf_mngt_cache_remove_core(core1, remove_core_complete, &context);
+ if (ret)
+ error("Unable to remove core\n");
+
+ /* Stop cache */
+ ocf_mngt_cache_stop(cache1, simple_complete, &context);
+ if (ret)
+ error("Unable to stop cache\n");
+
+ cache_priv = ocf_cache_get_priv(cache1);
+
+ /* Put the management queue */
+ ocf_queue_put(cache_priv->mngt_queue);
+
+ free(cache_priv);
+
+ /* Deinitialize context */
+ ctx_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/example/simple/src/volume.c b/src/spdk/ocf/example/simple/src/volume.c
new file mode 100644
index 000000000..1aae692bb
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/volume.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include <ocf/ocf.h>
+#include "volume.h"
+#include "data.h"
+#include "ctx.h"
+
+#define VOL_SIZE 200*1024*1024
+
+/*
+ * In open() function we store uuid data as volume name (for debug messages)
+ * and allocate 200 MiB of memory to simulate backend storage device.
+ */
+static int volume_open(ocf_volume_t volume, void *volume_params)
+{
+ const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume);
+ struct myvolume *myvolume = ocf_volume_get_priv(volume);
+
+ myvolume->name = ocf_uuid_to_str(uuid);
+ myvolume->mem = malloc(VOL_SIZE);
+
+ printf("VOL OPEN: (name: %s)\n", myvolume->name);
+
+ return 0;
+}
+
+/*
+ * In close() function we just free memory allocated in open().
+ */
+static void volume_close(ocf_volume_t volume)
+{
+ struct myvolume *myvolume = ocf_volume_get_priv(volume);
+
+ printf("VOL CLOSE: (name: %s)\n", myvolume->name);
+ free(myvolume->mem);
+}
+
+/*
+ * In submit_io() function we simulate read or write to backend storage device
+ * by doing memcpy() to or from previously allocated memory buffer.
+ */
+static void volume_submit_io(struct ocf_io *io)
+{
+ struct volume_data *data;
+ struct myvolume *myvolume;
+
+ data = ocf_io_get_data(io);
+ myvolume = ocf_volume_get_priv(ocf_io_get_volume(io));
+
+ if (io->dir == OCF_WRITE) {
+ memcpy(myvolume->mem + io->addr,
+ data->ptr + data->offset, io->bytes);
+ } else {
+ memcpy(data->ptr + data->offset,
+ myvolume->mem + io->addr, io->bytes);
+ }
+
+ printf("VOL: (name: %s), IO: (dir: %s, addr: %ld, bytes: %d)\n",
+ myvolume->name, io->dir == OCF_READ ? "read" : "write",
+ io->addr, io->bytes);
+
+ io->end(io, 0);
+}
+
+/*
+ * We don't need to implement submit_flush(). Just complete io with success.
+ */
+static void volume_submit_flush(struct ocf_io *io)
+{
+ io->end(io, 0);
+}
+
+/*
+ * We don't need to implement submit_discard(). Just complete io with success.
+ */
+static void volume_submit_discard(struct ocf_io *io)
+{
+ io->end(io, 0);
+}
+
+/*
+ * Let's set maximum io size to 128 KiB.
+ */
+static unsigned int volume_get_max_io_size(ocf_volume_t volume)
+{
+ return 128 * 1024;
+}
+
+/*
+ * Return volume size.
+ */
+static uint64_t volume_get_length(ocf_volume_t volume)
+{
+ return VOL_SIZE;
+}
+
+/*
+ * In set_data() we just assing data and offset to io.
+ */
+static int myvolume_io_set_data(struct ocf_io *io, ctx_data_t *data,
+ uint32_t offset)
+{
+ struct myvolume_io *myvolume_io = ocf_io_get_priv(io);
+
+ myvolume_io->data = data;
+ myvolume_io->offset = offset;
+
+ return 0;
+}
+
+/*
+ * In get_data() return data stored in io.
+ */
+static ctx_data_t *myvolume_io_get_data(struct ocf_io *io)
+{
+ struct myvolume_io *myvolume_io = ocf_io_get_priv(io);
+
+ return myvolume_io->data;
+}
+
+/*
+ * This structure contains volume properties. It describes volume
+ * type, which can be later instantiated as backend storage for cache
+ * or core.
+ */
+const struct ocf_volume_properties volume_properties = {
+ .name = "Example volume",
+ .io_priv_size = sizeof(struct myvolume_io),
+ .volume_priv_size = sizeof(struct myvolume),
+ .caps = {
+ .atomic_writes = 0,
+ },
+ .ops = {
+ .open = volume_open,
+ .close = volume_close,
+ .submit_io = volume_submit_io,
+ .submit_flush = volume_submit_flush,
+ .submit_discard = volume_submit_discard,
+ .get_max_io_size = volume_get_max_io_size,
+ .get_length = volume_get_length,
+ },
+ .io_ops = {
+ .set_data = myvolume_io_set_data,
+ .get_data = myvolume_io_get_data,
+ },
+};
+
+/*
+ * This function registers volume type in OCF context.
+ * It should be called just after context initialization.
+ */
+int volume_init(ocf_ctx_t ocf_ctx)
+{
+ return ocf_ctx_register_volume_type(ocf_ctx, VOL_TYPE,
+ &volume_properties);
+}
+
+/*
+ * This function unregisters volume type in OCF context.
+ * It should be called just before context cleanup.
+ */
+void volume_cleanup(ocf_ctx_t ocf_ctx)
+{
+ ocf_ctx_unregister_volume_type(ocf_ctx, VOL_TYPE);
+}
diff --git a/src/spdk/ocf/example/simple/src/volume.h b/src/spdk/ocf/example/simple/src/volume.h
new file mode 100644
index 000000000..83314b66b
--- /dev/null
+++ b/src/spdk/ocf/example/simple/src/volume.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __VOLUME_H__
+#define __VOLUME_H__
+
+#include <ocf/ocf.h>
+#include "ocf_env.h"
+#include "ctx.h"
+#include "data.h"
+
+struct myvolume_io {
+ struct volume_data *data;
+ uint32_t offset;
+};
+
+struct myvolume {
+ uint8_t *mem;
+ const char *name;
+};
+
+int volume_init(ocf_ctx_t ocf_ctx);
+void volume_cleanup(ocf_ctx_t ocf_ctx);
+
+#endif
diff --git a/src/spdk/ocf/inc/cleaning/acp.h b/src/spdk/ocf/inc/cleaning/acp.h
new file mode 100644
index 000000000..9ca121a84
--- /dev/null
+++ b/src/spdk/ocf/inc/cleaning/acp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __OCF_CLEANING_ACP_H__
+#define __OCF_CLEANING_ACP_H__
+
+/**
+ * @file
+ * @brief ACP cleaning policy API
+ */
+
+enum ocf_cleaning_acp_parameters {
+ ocf_acp_wake_up_time,
+ ocf_acp_flush_max_buffers,
+};
+
+/**
+ * @name ACP cleaning policy parameters
+ * @{
+ */
+
+/**
+ * ACP cleaning policy time between flushing cycles (in ms)
+ */
+
+/**< Wake up time minimum value */
+#define OCF_ACP_MIN_WAKE_UP 0
+/**< Wake up time maximum value */
+#define OCF_ACP_MAX_WAKE_UP 10000
+/**< Wake up time default value */
+#define OCF_ACP_DEFAULT_WAKE_UP 10
+
+/**
+ * ACP cleaning thread number of dirty cache lines to be flushed in one cycle
+ */
+
+/** Dirty cache lines to be flushed in one cycle minimum value */
+#define OCF_ACP_MIN_FLUSH_MAX_BUFFERS 1
+/** Dirty cache lines to be flushed in one cycle maximum value */
+#define OCF_ACP_MAX_FLUSH_MAX_BUFFERS 10000
+/** Dirty cache lines to be flushed in one cycle default value */
+#define OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS 128
+
+/**
+ * @}
+ */
+
+#endif /* __OCF_CLEANING_ACP_H__ */
diff --git a/src/spdk/ocf/inc/cleaning/alru.h b/src/spdk/ocf/inc/cleaning/alru.h
new file mode 100644
index 000000000..66548bbec
--- /dev/null
+++ b/src/spdk/ocf/inc/cleaning/alru.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __OCF_CLEANING_ALRU_H__
+#define __OCF_CLEANING_ALRU_H__
+
+/**
+ * @file
+ * @brief ALRU cleaning policy API
+ */
+
+enum ocf_cleaning_alru_parameters {
+ ocf_alru_wake_up_time,
+ ocf_alru_stale_buffer_time,
+ ocf_alru_flush_max_buffers,
+ ocf_alru_activity_threshold,
+};
+
+/**
+ * @name ALRU cleaning policy parameters
+ * @{
+ */
+
+/**
+ * ALRU cleaning thread wake up time
+ */
+
+/** Wake up time minimum value */
+#define OCF_ALRU_MIN_WAKE_UP 0
+/** Wake up time maximum value */
+#define OCF_ALRU_MAX_WAKE_UP 3600
+/** Wake up time default value */
+#define OCF_ALRU_DEFAULT_WAKE_UP 20
+
+/**
+ * ALRU cleaning thread staleness time
+ */
+
+/** Staleness time minimum value */
+#define OCF_ALRU_MIN_STALENESS_TIME 1
+/** Staleness time maximum value */
+#define OCF_ALRU_MAX_STALENESS_TIME 3600
+/** Staleness time default value*/
+#define OCF_ALRU_DEFAULT_STALENESS_TIME 120
+
+/**
+ * ALRU cleaning thread number of dirty cache lines to be flushed in one cycle
+ */
+
+/** Dirty cache lines to be flushed in one cycle minimum value */
+#define OCF_ALRU_MIN_FLUSH_MAX_BUFFERS 1
+/** Dirty cache lines to be flushed in one cycle maximum value */
+#define OCF_ALRU_MAX_FLUSH_MAX_BUFFERS 10000
+/** Dirty cache lines to be flushed in one cycle default value */
+#define OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS 100
+
+/**
+ * ALRU cleaning thread cache idle time before flushing thread can start
+ */
+
+/** Idle time before flushing thread can start minimum value */
+#define OCF_ALRU_MIN_ACTIVITY_THRESHOLD 0
+/** Idle time before flushing thread can start maximum value */
+#define OCF_ALRU_MAX_ACTIVITY_THRESHOLD 1000000
+/** Idle time before flushing thread can start default value */
+#define OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD 10000
+
+/**
+ * @}
+ */
+
+
+#endif /* __OCF_CLEANING_ALRU_H__ */
diff --git a/src/spdk/ocf/inc/ocf.h b/src/spdk/ocf/inc/ocf.h
new file mode 100644
index 000000000..7b014119f
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_H__
+#define __OCF_H__
+
+/**
+ * @file
+ * @brief Main OCF header
+ * This file doesn't contain any functions or structures.
+ * It's simply collective include file to allow OCF user include
+ * everything at once.
+ */
+
+#include "ocf_def.h"
+#include "ocf_types.h"
+#include "ocf_io.h"
+#include "ocf_volume.h"
+#include "ocf_cache.h"
+#include "ocf_core.h"
+#include "ocf_queue.h"
+#include "ocf_cleaner.h"
+#include "cleaning/alru.h"
+#include "cleaning/acp.h"
+#include "promotion/nhit.h"
+#include "ocf_metadata.h"
+#include "ocf_metadata_updater.h"
+#include "ocf_io_class.h"
+#include "ocf_stats.h"
+#include "ocf_mngt.h"
+#include "ocf_ctx.h"
+#include "ocf_err.h"
+#include "ocf_trace.h"
+
+#endif /* __OCF_H__ */
diff --git a/src/spdk/ocf/inc/ocf_cache.h b/src/spdk/ocf/inc/ocf_cache.h
new file mode 100644
index 000000000..80860dc81
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_cache.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+
+#ifndef __OCF_CACHE_H__
+#define __OCF_CACHE_H__
+
+/**
+ * @file
+ * @brief OCF cache API
+ */
+
+#include "ocf_volume.h"
+#include "ocf_ctx.h"
+#include "ocf_def.h"
+#include "ocf_stats.h"
+
+/**
+ * @brief Cache info: configuration, status
+ */
+struct ocf_cache_info {
+ bool attached;
+ /*!< True if caching cache is attached to cache */
+
+ uint8_t volume_type;
+ /*!< Cache volume type */
+
+ uint32_t size;
+ /*!< Actual cache size (in cache lines) */
+
+ /* Statistics of inactive cores */
+ struct {
+ struct ocf_stat occupancy;
+ /*!< Cache occupancy (in cache lines) */
+
+ struct ocf_stat clean;
+ /*!< Clean blocks within cache (in cache lines) */
+
+ struct ocf_stat dirty;
+ /*!< Dirty blocks within cache (in cache lines) */
+ } inactive;
+
+ uint32_t occupancy;
+ /*!< Actual cache occupancy (in cache lines) */
+
+ uint32_t dirty;
+ /*!< Dirty blocks within cache (in cache lines) */
+
+ uint32_t dirty_initial;
+ /*!< Dirty blocks within cache that where there when switching
+ * out of WB mode
+ */
+
+ uint32_t dirty_for;
+ /*!< How long there are dirty cache lines (in seconds) */
+
+ ocf_cache_mode_t cache_mode;
+ /*!< Current cache mode */
+
+ /* Statistics of fallback Pass Through */
+ struct {
+ int error_counter;
+ /*!< How many requests to cache failed because of IO error */
+
+ bool status;
+ /*!< Current cache mode is PT,
+ set as a result of reaching IO error threshold */
+ } fallback_pt;
+
+ uint8_t state;
+ /*!< Cache state (running/flushing/stopping etc...) */
+
+ ocf_eviction_t eviction_policy;
+ /*!< Eviction policy selected */
+
+ ocf_cleaning_t cleaning_policy;
+ /*!< Cleaning policy selected */
+
+ ocf_promotion_t promotion_policy;
+ /*!< Promotion policy selected */
+
+ ocf_cache_line_size_t cache_line_size;
+ /*!< Cache line size in KiB */
+
+ uint32_t flushed;
+ /*!< Number of block flushed in ongoing flush operation */
+
+ uint32_t core_count;
+ /*!< Number of core devices associated with this cache */
+
+ uint64_t metadata_footprint;
+ /*!< Metadata memory footprint (in bytes) */
+
+ uint32_t metadata_end_offset;
+ /*!< LBA offset where metadata ends (in 4KiB blocks) */
+};
+
+/**
+ * @brief Obtain volume from cache
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Volume, NULL if dettached.
+ */
+ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache);
+
+/**
+ * @brief Get name of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Cache name
+ */
+const char *ocf_cache_get_name(ocf_cache_t cache);
+
+/**
+ * @brief Check is cache in incomplete state
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval 1 Cache is in incomplete state
+ * @retval 0 Cache is in complete state
+ */
+bool ocf_cache_is_incomplete(ocf_cache_t cache);
+
+/**
+ * @brief Check if caching device is attached
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval 1 Caching device is attached
+ * @retval 0 Caching device is detached
+ */
+bool ocf_cache_is_device_attached(ocf_cache_t cache);
+
+/**
+ * @brief Check if cache object is running
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval 1 Caching device is being stopped
+ * @retval 0 Caching device is being stopped
+ */
+bool ocf_cache_is_running(ocf_cache_t cache);
+
+/**
+ * @brief Get cache mode of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Cache mode
+ */
+ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache);
+
+/**
+ * @brief Get cache line size of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Cache line size
+ */
+ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache);
+
+/**
+ * @brief Convert bytes to cache lines
+ *
+ * @param[in] cache Cache object
+ * @param[in] bytes Number of bytes
+ *
+ * @retval Cache lines count
+ */
+uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes);
+
+/**
+ * @brief Get core count of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Core count
+ */
+uint32_t ocf_cache_get_core_count(ocf_cache_t cache);
+
+/**
+ * @brief Get cache mode of given cache object
+ *
+ * @param[in] cache Cache object
+ * @param[out] info Cache info structure
+ *
+ * @retval 0 Success
+ * @retval Non-zero Fail
+ */
+int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info);
+
+/**
+ * @brief Get UUID of volume associated with cache
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Volume UUID, NULL if detached.
+ */
+const struct ocf_volume_uuid *ocf_cache_get_uuid(ocf_cache_t cache);
+
+/**
+ * @brief Get OCF context of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval OCF context
+ */
+ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache);
+
+/**
+ * @brief Get volume type id of given cache object
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval volume type id, -1 if device detached
+ */
+uint8_t ocf_cache_get_type_id(ocf_cache_t cache);
+
+/**
+ * @brief Set cache private data
+ *
+ * @param[in] cache Cache object
+ * @param[in] priv Private data
+ */
+void ocf_cache_set_priv(ocf_cache_t cache, void *priv);
+
+/**
+ * @brief Get cache private data
+ *
+ * @param[in] cache Cache object
+ *
+ * @retval Private data
+ */
+void *ocf_cache_get_priv(ocf_cache_t cache);
+
+#endif /* __OCF_CACHE_H__ */
diff --git a/src/spdk/ocf/inc/ocf_cfg.h b/src/spdk/ocf/inc/ocf_cfg.h
new file mode 100644
index 000000000..266d6c968
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_cfg.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+
+#ifndef __OCF_CFG_H__
+#define __OCF_CFG_H__
+
+/**
+ * @file
+ * @brief OCF configuration file
+ */
+
+/**
+ * Configure maximum numbers of cores in cache instance
+ */
+#ifndef OCF_CONFIG_MAX_CORES
+#define OCF_CONFIG_MAX_CORES 4096
+#endif
+
+/** Maximum number of IO classes that can be configured */
+#ifndef OCF_CONFIG_MAX_IO_CLASSES
+#define OCF_CONFIG_MAX_IO_CLASSES 33
+#endif
+
+#if OCF_CONFIG_MAX_IO_CLASSES > 256
+#error "Limit of maximum number of IO classes exceeded"
+#endif
+
+/** Enabling debug statistics */
+#ifndef OCF_CONFIG_DEBUG_STATS
+#define OCF_CONFIG_DEBUG_STATS 0
+#endif
+
+#endif /* __OCF_CFG_H__ */
diff --git a/src/spdk/ocf/inc/ocf_cleaner.h b/src/spdk/ocf/inc/ocf_cleaner.h
new file mode 100644
index 000000000..43d6a30cc
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_cleaner.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef OCF_CLEANER_H_
+#define OCF_CLEANER_H_
+
+/**
+ * @file
+ * @brief OCF cleaner API for synchronization dirty data
+ *
+ */
+
+/**
+ * @brief OCF Cleaner completion
+ *
+ * @note Completion function for cleaner
+ *
+ * @param[in] cleaner Cleaner instance
+ * @param[in] interval Time to sleep before next cleaner iteration
+ */
+typedef void (*ocf_cleaner_end_t)(ocf_cleaner_t cleaner, uint32_t interval);
+
+/**
+ * @brief Set cleaner completion function
+ *
+ * @param[in] cleaner Cleaner instance
+ * @param[in] fn Completion function
+ */
+void ocf_cleaner_set_cmpl(ocf_cleaner_t cleaner, ocf_cleaner_end_t fn);
+
+/**
+ * @brief Run cleaner
+ *
+ * @param[in] c Cleaner instance to run
+ * @param[in] queue IO queue handle
+ */
+void ocf_cleaner_run(ocf_cleaner_t c, ocf_queue_t queue);
+
+/**
+ * @brief Set cleaner private data
+ *
+ * @param[in] c Cleaner handle
+ * @param[in] priv Private data
+ */
+void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv);
+
+/**
+ * @brief Get cleaner private data
+ *
+ * @param[in] c Cleaner handle
+ *
+ * @retval Cleaner private data
+ */
+void *ocf_cleaner_get_priv(ocf_cleaner_t c);
+
+/**
+ * @brief Get cache instance to which cleaner belongs
+ *
+ * @param[in] c Cleaner handle
+ *
+ * @retval Cache instance
+ */
+ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c);
+
+#endif
diff --git a/src/spdk/ocf/inc/ocf_core.h b/src/spdk/ocf/inc/ocf_core.h
new file mode 100644
index 000000000..f9e8f6714
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_core.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/**
+ * @file
+ * @brief OCF core API
+ */
+
+#ifndef __OCF_CORE_H__
+#define __OCF_CORE_H__
+
+#include "ocf_volume.h"
+#include "ocf_io.h"
+#include "ocf_mngt.h"
+
+struct ocf_core_info {
+ /** Core size in cache line size unit */
+ uint64_t core_size;
+
+ /** Core size in bytes unit */
+ uint64_t core_size_bytes;
+
+ /** Fields refers ongoing flush operation */
+ struct {
+ /** Number of blocks flushed in ongoing flush operation */
+ uint32_t flushed;
+
+ /** Number of blocks left to flush in ongoing flush operation */
+ uint32_t dirty;
+ };
+
+ /** How long core is dirty in seconds unit */
+ uint32_t dirty_for;
+
+ /** Sequential cutoff threshold (in bytes) */
+ uint32_t seq_cutoff_threshold;
+
+ /** Sequential cutoff policy */
+ ocf_seq_cutoff_policy seq_cutoff_policy;
+};
+
+/**
+ * @brief Get OCF core by name
+ *
+ * @param[in] cache OCF cache
+ * @param[in] name Core name
+ * @param[in] name_len Core name length
+ * @param[out] core OCF core handle
+ *
+ * @retval 0 Get cache successfully
+ * @retval -OCF_ERR_CORE_NOT_EXIST Core with given name doesn't exist
+ */
+int ocf_core_get_by_name(ocf_cache_t cache, const char *name, size_t name_len,
+ ocf_core_t *core);
+
+/**
+ * @brief Obtain cache object from core
+ *
+ * @param[in] core Core object
+ *
+ * @retval Cache object
+ */
+ocf_cache_t ocf_core_get_cache(ocf_core_t core);
+
+/**
+ * @brief Obtain volume associated with core
+ *
+ * @param[in] core Core object
+ *
+ * @retval Volume
+ */
+ocf_volume_t ocf_core_get_volume(ocf_core_t core);
+
+/**
+ * @brief Obtain volume of the core
+ *
+ * @param[in] core Core object
+ *
+ * @retval Front volume
+ */
+ocf_volume_t ocf_core_get_front_volume(ocf_core_t core);
+
+/**
+ * @brief Get UUID of volume associated with core
+ *
+ * @param[in] core Core object
+ *
+ * @retval Volume UUID
+ */
+static inline const struct ocf_volume_uuid *ocf_core_get_uuid(ocf_core_t core)
+{
+ return ocf_volume_get_uuid(ocf_core_get_volume(core));
+}
+
+/**
+ * @brief Get sequential cutoff threshold of given core object
+ *
+ * @param[in] core Core object
+ *
+ * @retval Sequential cutoff threshold [B]
+ */
+uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core);
+
+/**
+ * @brief Get sequential cutoff policy of given core object
+ *
+ * @param[in] core Core object
+ *
+ * @retval Sequential cutoff policy
+ */
+ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core);
+
+/**
+ * @brief Get name of given core object
+ *
+ * @param[in] core Core object
+ *
+ * @retval Core name
+ */
+const char *ocf_core_get_name(ocf_core_t core);
+
+/**
+ * @brief Get core state
+ *
+ * @param[in] core Core object
+ *
+ * @retval Core state
+ */
+ocf_core_state_t ocf_core_get_state(ocf_core_t core);
+
+/**
+ * @brief Allocate new ocf_io
+ *
+ * @param[in] core Core object
+ * @param[in] queue IO queue handle
+ * @param[in] addr OCF IO destination address
+ * @param[in] bytes OCF IO size in bytes
+ * @param[in] dir OCF IO direction
+ * @param[in] io_class OCF IO destination class
+ * @param[in] flags OCF IO flags
+ *
+ * @retval ocf_io object
+ */
+static inline struct ocf_io *ocf_core_new_io(ocf_core_t core, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags)
+{
+ ocf_volume_t volume = ocf_core_get_front_volume(core);
+
+ return ocf_volume_new_io(volume, queue, addr, bytes, dir,
+ io_class, flags);
+}
+
+/**
+ * @brief Submit ocf_io
+ *
+ * @param[in] io IO to be submitted
+ */
+static inline void ocf_core_submit_io(struct ocf_io *io)
+{
+ ocf_volume_submit_io(io);
+}
+
+/**
+ * @brief Fast path for submitting IO. If possible, request is processed
+ * immediately without adding to internal request queue
+ *
+ * @param[in] io IO to be submitted
+ *
+ * @retval 0 IO has been submitted successfully
+ * @retval Non-zero Fast submit failed. Try to submit IO with ocf_core_submit_io()
+ */
+int ocf_core_submit_io_fast(struct ocf_io *io);
+
+/**
+ * @brief Submit ocf_io with flush command
+ *
+ * @param[in] io IO to be submitted
+ */
+static inline void ocf_core_submit_flush(struct ocf_io *io)
+{
+ ocf_volume_submit_flush(io);
+}
+
+/**
+ * @brief Submit ocf_io with discard command
+ *
+ * @param[in] io IO to be submitted
+ */
+static inline void ocf_core_submit_discard(struct ocf_io *io)
+{
+ ocf_volume_submit_discard(io);
+}
+
+/**
+ * @brief Core visitor function type which is called back when iterating over
+ * cores.
+ *
+ * @param[in] core Core which is currently iterated (visited)
+ * @param[in] cntx Visitor context
+ *
+ * @retval 0 continue visiting cores
+ * @retval Non-zero stop iterating and return result
+ */
+typedef int (*ocf_core_visitor_t)(ocf_core_t core, void *cntx);
+
+/**
+ * @brief Run visitor function for each core of given cache
+ *
+ * @param[in] cache OCF cache instance
+ * @param[in] visitor Visitor function
+ * @param[in] cntx Visitor context
+ * @param[in] only_opened Visit only opened cores
+ *
+ * @retval 0 Success
+ * @retval Non-zero Fail
+ */
+int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
+ bool only_opened);
+
+/**
+ * @brief Get info of given core object
+ *
+ * @param[in] core Core object
+ * @param[out] info Core info structure
+ *
+ * @retval 0 Success
+ * @retval Non-zero Fail
+ */
+int ocf_core_get_info(ocf_core_t core, struct ocf_core_info *info);
+
+/**
+ * @brief Set core private data
+ *
+ * @param[in] core Core object
+ * @param[in] priv Private data
+ */
+void ocf_core_set_priv(ocf_core_t core, void *priv);
+
+/**
+ * @brief Get core private data
+ *
+ * @param[in] core Core object
+ *
+ * @retval Private data
+ */
+void *ocf_core_get_priv(ocf_core_t core);
+
+#endif /* __OCF_CORE_H__ */
diff --git a/src/spdk/ocf/inc/ocf_ctx.h b/src/spdk/ocf/inc/ocf_ctx.h
new file mode 100644
index 000000000..b12cc3cba
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_ctx.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_CTX_H__
+#define __OCF_CTX_H__
+
+/**
+ * @file
+ * @brief OCF library context API
+ */
+
+#include "ocf_volume.h"
+#include "ocf_logger.h"
+
+/**
+ * @brief Seeking start position in environment data buffer
+ */
+typedef enum {
+ ctx_data_seek_begin,
+ /*!< Seeking from the beginning of environment data buffer */
+ ctx_data_seek_current,
+ /*!< Seeking from current position in environment data buffer */
+} ctx_data_seek_t;
+
+/**
+ * @brief Context data representation ops
+ */
+struct ocf_data_ops {
+ /**
+ * @brief Allocate contest data buffer
+ *
+ * @param[in] pages The size of data buffer in pages
+ *
+ * @return Context data buffer
+ */
+ ctx_data_t *(*alloc)(uint32_t pages);
+
+ /**
+ * @brief Free context data buffer
+ *
+ * @param[in] data Contex data buffer which shall be freed
+ */
+ void (*free)(ctx_data_t *data);
+
+ /**
+ * @brief Lock context data buffer to disable swap-out
+ *
+ * @param[in] data Contex data buffer which shall be locked
+ *
+ * @retval 0 Memory locked successfully
+ * @retval Non-zero Memory locking failure
+ */
+ int (*mlock)(ctx_data_t *data);
+
+ /**
+ * @brief Unlock context data buffer
+ *
+ * @param[in] data Contex data buffer which shall be unlocked
+ */
+ void (*munlock)(ctx_data_t *data);
+
+ /**
+ * @brief Read from environment data buffer into raw data buffer
+ *
+ * @param[in,out] dst Destination raw memory buffer
+ * @param[in] src Source context data buffer
+ * @param[in] size Number of bytes to be read
+ *
+ * @return Number of read bytes
+ */
+ uint32_t (*read)(void *dst, ctx_data_t *src, uint32_t size);
+
+ /**
+ * @brief Write raw data buffer into context data buffer
+ *
+ * @param[in,out] dst Destination context data buffer
+ * @param[in] src Source raw memory buffer
+ * @param[in] size Number of bytes to be written
+ *
+ * @return Number of written bytes
+ */
+ uint32_t (*write)(ctx_data_t *dst, const void *src, uint32_t size);
+
+ /**
+ * @brief Zero context data buffer
+ *
+ * @param[in,out] dst Destination context data buffer to be zeroed
+ * @param[in] size Number of bytes to be zeroed
+ *
+ * @return Number of zeroed bytes
+ */
+ uint32_t (*zero)(ctx_data_t *dst, uint32_t size);
+
+ /**
+ * @brief Seek read/write head in context data buffer for specified
+ * offset
+ *
+ * @param[in,out] dst Destination context data buffer to be seek
+ * @param[in] seek Seek beginning offset
+ * @param[in] size Number of bytes to be seek
+ *
+ * @return Number of seek bytes
+ */
+ uint32_t (*seek)(ctx_data_t *dst, ctx_data_seek_t seek, uint32_t size);
+
+ /**
+ * @brief Copy context data buffer content
+ *
+ * @param[in,out] dst Destination context data buffer
+ * @param[in] src Source context data buffer
+ * @param[in] to Starting offset in destination buffer
+ * @param[in] from Starting offset in source buffer
+ * @param[in] bytes Number of bytes to be copied
+ *
+ * @return Number of bytes copied
+ */
+ uint64_t (*copy)(ctx_data_t *dst, ctx_data_t *src,
+ uint64_t to, uint64_t from, uint64_t bytes);
+
+ /**
+ * @brief Erase content of data buffer
+ *
+ * @param[in] dst Contex data buffer which shall be erased
+ */
+ void (*secure_erase)(ctx_data_t *dst);
+};
+
+/**
+ * @brief Cleaner operations
+ */
+struct ocf_cleaner_ops {
+ /**
+ * @brief Initialize cleaner.
+ *
+ * This function should create worker, thread, timer or any other
+ * mechanism responsible for calling cleaner routine.
+ *
+ * @param[in] c Descriptor of cleaner to be initialized
+ *
+ * @retval 0 Cleaner has been initializaed successfully
+ * @retval Non-zero Cleaner initialization failure
+ */
+ int (*init)(ocf_cleaner_t c);
+
+ /**
+ * @brief Kick cleaner thread.
+ *
+ * @param[in] c Descriptor of cleaner to be kicked.
+ */
+ void (*kick)(ocf_cleaner_t c);
+
+ /**
+ * @brief Stop cleaner
+ *
+ * @param[in] c Descriptor of cleaner beeing stopped
+ */
+ void (*stop)(ocf_cleaner_t c);
+};
+
+/**
+ * @brief Metadata updater operations
+ */
+struct ocf_metadata_updater_ops {
+ /**
+ * @brief Initialize metadata updater.
+ *
+ * This function should create worker, thread, timer or any other
+ * mechanism responsible for calling metadata updater routine.
+ *
+ * @param[in] mu Handle to metadata updater to be initialized
+ *
+ * @retval 0 Metadata updater has been initializaed successfully
+ * @retval Non-zero I/O queue initialization failure
+ */
+ int (*init)(ocf_metadata_updater_t mu);
+
+ /**
+ * @brief Kick metadata updater processing
+ *
+ * This function should inform worker, thread or any other mechanism,
+ * that there are new metadata requests to be processed.
+ *
+ * @param[in] mu Metadata updater to be kicked
+ */
+ void (*kick)(ocf_metadata_updater_t mu);
+
+ /**
+ * @brief Stop metadata updater
+ *
+ * @param[in] mu Metadata updater beeing stopped
+ */
+ void (*stop)(ocf_metadata_updater_t mu);
+};
+
+/**
+ * @brief OCF context specific operation
+ */
+struct ocf_ctx_ops {
+ /* Context data operations */
+ struct ocf_data_ops data;
+
+ /* Cleaner operations */
+ struct ocf_cleaner_ops cleaner;
+
+ /* Metadata updater operations */
+ struct ocf_metadata_updater_ops metadata_updater;
+
+ /* Logger operations */
+ struct ocf_logger_ops logger;
+};
+
+struct ocf_ctx_config {
+ /* Context name */
+ const char *name;
+
+ /* Context operations */
+ const struct ocf_ctx_ops ops;
+
+ /* Context logger priv */
+ void *logger_priv;
+};
+
+/**
+ * @brief Register volume interface
+ *
+ * @note Type of volume operations is unique and cannot be repeated.
+ *
+ * @param[in] ctx OCF context
+ * @param[in] properties Reference to volume properties
+ * @param[in] type_id Type id of volume operations
+ *
+ * @retval 0 Volume operations registered successfully
+ * @retval Non-zero Volume registration failure
+ */
+int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id,
+ const struct ocf_volume_properties *properties);
+
+/**
+ * @brief Unregister volume interface
+ *
+ * @param[in] ctx OCF context
+ * @param[in] type_id Type id of volume operations
+ */
+void ocf_ctx_unregister_volume_type(ocf_ctx_t ctx, uint8_t type_id);
+
+/**
+ * @brief Get volume type operations by type id
+ *
+ * @param[in] ctx OCF context
+ * @param[in] type_id Type id of volume operations which were registered
+ *
+ * @return Volume type
+ * @retval NULL When volume operations were not registered
+ * for requested type
+ */
+ocf_volume_type_t ocf_ctx_get_volume_type(ocf_ctx_t ctx, uint8_t type_id);
+
+/**
+ * @brief Get volume type id by type
+ *
+ * @param[in] ctx OCF context
+ * @param[in] type Type of volume operations which were registered
+ *
+ * @return Volume type id
+ * @retval -1 When volume operations were not registered
+ * for requested type
+ */
+int ocf_ctx_get_volume_type_id(ocf_ctx_t ctx, ocf_volume_type_t type);
+
+/**
+ * @brief Create volume of given type
+ *
+ * @param[in] ctx handle to object designating ocf context
+ * @param[out] volume volume handle
+ * @param[in] uuid OCF volume UUID
+ * @param[in] type_id cache/core volume type id
+ *
+ * @return Zero when success, othewise en error
+ */
+
+int ocf_ctx_volume_create(ocf_ctx_t ctx, ocf_volume_t *volume,
+ struct ocf_volume_uuid *uuid, uint8_t type_id);
+
+/**
+ * @brief Create and initialize OCF context
+ *
+ * @param[out] ctx OCF context
+ * @param[in] ops OCF context operations
+ *
+ * @return Zero when success, otherwise an error
+ */
+int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg);
+
+/**
+ * @brief Increase reference counter of ctx
+ *
+ * @param[in] ctx OCF context
+ */
+void ocf_ctx_get(ocf_ctx_t ctx);
+
+/**
+ * @brief Decrease reference counter of ctx
+ *
+ * @param[in] ctx OCF context
+ */
+void ocf_ctx_put(ocf_ctx_t ctx);
+
+#endif /* __OCF_CTX_H__ */
diff --git a/src/spdk/ocf/inc/ocf_def.h b/src/spdk/ocf/inc/ocf_def.h
new file mode 100644
index 000000000..bdbe21105
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_def.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+
+#ifndef __OCF_DEF_H__
+#define __OCF_DEF_H__
+
+#include "ocf_cfg.h"
+/**
+ * @file
+ * @brief OCF definitions
+ */
+
+/**
+ * @name OCF cache definitions
+ */
+/**
+ * Minimum value of a valid cache ID
+ */
+#define OCF_CACHE_ID_MIN 1
+/**
+ * Maximum value of a valid cache ID
+ */
+#define OCF_CACHE_ID_MAX 16384
+/**
+ * Invalid value of cache id
+ */
+#define OCF_CACHE_ID_INVALID 0
+/**
+ * Minimum cache size in bytes
+ */
+#define OCF_CACHE_SIZE_MIN (20 * MiB)
+/**
+ * Size of cache name
+ */
+#define OCF_CACHE_NAME_SIZE 32
+/**
+ * Value to turn off fallback pass through
+ */
+#define OCF_CACHE_FALLBACK_PT_INACTIVE 0
+/**
+ * Minimum value of io error threshold
+ */
+#define OCF_CACHE_FALLBACK_PT_MIN_ERROR_THRESHOLD \
+ OCF_CACHE_FALLBACK_PT_INACTIVE
+/**
+ * Maximum value of io error threshold
+ */
+#define OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD 1000000
+/**
+ * @}
+ */
+
+/**
+ * @name OCF cores definitions
+ */
+/**
+ * Maximum numbers of cores per cache instance
+ */
+#define OCF_CORE_MAX OCF_CONFIG_MAX_CORES
+/**
+ * Minimum value of a valid core ID
+ */
+#define OCF_CORE_ID_MIN 0
+/**
+ * Maximum value of a valid core ID
+ */
+#define OCF_CORE_ID_MAX (OCF_CORE_MAX - 1)
+/**
+ * Invalid value of core id
+ */
+#define OCF_CORE_ID_INVALID OCF_CORE_MAX
+/**
+ * Size of core name
+ */
+#define OCF_CORE_NAME_SIZE 32
+/**
+ * Minimum value of valid core sequence number
+ */
+#define OCF_SEQ_NO_MIN 1
+/**
+ * Maximum value of a valid core sequence number
+ */
+#define OCF_SEQ_NO_MAX (65535UL)
+/*
+ * Invalid value of core sequence number
+ */
+#define OCF_SEQ_NO_INVALID 0
+/**
+ * @}
+ */
+
+/**
+ * @name Miscellaneous defines
+ * @{
+ */
+#define KiB (1ULL << 10)
+#define MiB (1ULL << 20)
+#define GiB (1ULL << 30)
+
+#if OCF_CONFIG_DEBUG_STATS == 1
+/** Macro which indicates that extended debug statistics shall be on*/
+#define OCF_DEBUG_STATS
+#endif
+/**
+ * @}
+ */
+
+/**
+ * This Enumerator describes OCF cache instance state
+ */
+typedef enum {
+ ocf_cache_state_running = 0, //!< ocf_cache_state_running
+ /*!< OCF is currently running */
+
+ ocf_cache_state_stopping = 1, //!< ocf_cache_state_stopping
+ /*!< OCF cache instance is stopping */
+
+ ocf_cache_state_initializing = 2, //!< ocf_cache_state_initializing
+ /*!< OCF cache instance during initialization */
+
+ ocf_cache_state_incomplete = 3, //!< ocf_cache_state_incomplete
+ /*!< OCF cache has at least one inactive core */
+
+ ocf_cache_state_max //!< ocf_cache_state_max
+ /*!< Stopper of cache state enumerator */
+} ocf_cache_state_t;
+
+/**
+ * This Enumerator describes OCF core instance state
+ */
+typedef enum {
+ ocf_core_state_active = 0,
+ /*!< Core is active */
+
+ ocf_core_state_inactive,
+ /*!< Core is inactive (not attached) */
+
+ ocf_core_state_max,
+ /*!< Stopper of core state enumerator */
+} ocf_core_state_t;
+
+
+/**
+ * OCF supported cache modes
+ */
+typedef enum {
+ ocf_cache_mode_wt = 0,
+ /*!< Write-through cache mode */
+
+ ocf_cache_mode_wb,
+ /*!< Write-back cache mode */
+
+ ocf_cache_mode_wa,
+ /*!< Write-around cache mode */
+
+ ocf_cache_mode_pt,
+ /*!< Pass-through cache mode */
+
+ ocf_cache_mode_wi,
+ /*!< Write invalidate cache mode */
+
+ ocf_cache_mode_wo,
+ /*!< Write-only cache mode */
+
+ ocf_cache_mode_max,
+ /*!< Stopper of cache mode enumerator */
+
+ ocf_cache_mode_default = ocf_cache_mode_wt,
+ /*!< Default cache mode */
+
+ ocf_cache_mode_none = -1,
+ /*!< Current cache mode of given cache instance */
+} ocf_cache_mode_t;
+
+typedef enum {
+ ocf_seq_cutoff_policy_always = 0,
+ /*!< Sequential cutoff always on */
+
+ ocf_seq_cutoff_policy_full,
+ /*!< Sequential cutoff when occupancy is 100% */
+
+ ocf_seq_cutoff_policy_never,
+ /*!< Sequential cutoff disabled */
+
+ ocf_seq_cutoff_policy_max,
+ /*!< Stopper of sequential cutoff policy enumerator */
+
+ ocf_seq_cutoff_policy_default = ocf_seq_cutoff_policy_full,
+ /*!< Default sequential cutoff policy*/
+} ocf_seq_cutoff_policy;
+
+/**
+ * OCF supported eviction policy types
+ */
+typedef enum {
+ ocf_eviction_lru = 0,
+ /*!< Last recently used eviction policy */
+
+ ocf_eviction_max,
+ /*!< Stopper of enumerator */
+
+ ocf_eviction_default = ocf_eviction_lru,
+ /*!< Default eviction policy */
+} ocf_eviction_t;
+
+/**
+ * OCF supported promotion policy types
+ */
+typedef enum {
+ ocf_promotion_always = 0,
+ /*!< No promotion policy. Cache inserts are not filtered */
+
+ ocf_promotion_nhit,
+ /*!< Line can be inserted after N requests for it */
+
+ ocf_promotion_max,
+ /*!< Stopper of enumerator */
+
+ ocf_promotion_default = ocf_promotion_always,
+ /*!< Default promotion policy */
+} ocf_promotion_t;
+
+/**
+ * OCF supported Write-Back cleaning policies type
+ */
+typedef enum {
+ ocf_cleaning_nop = 0,
+ /*!< Cleaning won't happen in background. Only on eviction or
+ * during cache stop
+ */
+
+ ocf_cleaning_alru,
+ /*!< Approximately recently used. Cleaning thread in the
+ * background enabled which cleans dirty data during IO
+ * inactivity.
+ */
+
+ ocf_cleaning_acp,
+ /*!< Cleaning algorithm attempts to reduce core device seek
+ * distance. Cleaning thread runs concurrently with I/O.
+ */
+
+ ocf_cleaning_max,
+ /*!< Stopper of enumerator */
+
+ ocf_cleaning_default = ocf_cleaning_alru,
+ /*!< Default cleaning policy type */
+} ocf_cleaning_t;
+
+/**
+ * OCF supported cache line sizes in bytes
+ */
+typedef enum {
+ ocf_cache_line_size_none = 0,
+ /*!< None */
+
+ ocf_cache_line_size_4 = 4 * KiB,
+ /*!< 4 kiB */
+
+ ocf_cache_line_size_8 = 8 * KiB,
+ /*!< 8 kiB */
+
+ ocf_cache_line_size_16 = 16 * KiB,
+ /*!< 16 kiB */
+
+ ocf_cache_line_size_32 = 32 * KiB,
+ /*!< 32 kiB */
+
+ ocf_cache_line_size_64 = 64 * KiB,
+ /*!< 64 kiB */
+
+ ocf_cache_line_size_default = ocf_cache_line_size_4,
+ /*!< Default cache line size */
+
+ ocf_cache_line_size_min = ocf_cache_line_size_4,
+ /*!< Minimum cache line size */
+
+ ocf_cache_line_size_max = ocf_cache_line_size_64,
+ /*!< Maximal cache line size */
+
+ ocf_cache_line_size_inf = ~0ULL,
+ /*!< Force enum to be 64-bit */
+} ocf_cache_line_size_t;
+
+/**
+ * Metadata layout
+ */
+typedef enum {
+ ocf_metadata_layout_striping = 0,
+ ocf_metadata_layout_seq = 1,
+ ocf_metadata_layout_max,
+ ocf_metadata_layout_default = ocf_metadata_layout_striping
+} ocf_metadata_layout_t;
+
+/**
+ * @name OCF IO class definitions
+ */
+/**
+ * Maximum numbers of IO classes per cache instance
+ */
+#define OCF_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
+/**
+ * Minimum value of a valid IO class ID
+ */
+#define OCF_IO_CLASS_ID_MIN 0
+/**
+ * Maximum value of a valid IO class ID
+ */
+#define OCF_IO_CLASS_ID_MAX (OCF_IO_CLASS_MAX - 1)
+/**
+ * Invalid value of IO class id
+ */
+#define OCF_IO_CLASS_INVALID OCF_IO_CLASS_MAX
+
+/** Maximum size of the IO class name */
+#define OCF_IO_CLASS_NAME_MAX 1024
+
+/** IO class priority which indicates pinning */
+#define OCF_IO_CLASS_PRIO_PINNED -1
+
+/** The highest IO class priority */
+#define OCF_IO_CLASS_PRIO_HIGHEST 0
+
+/** The lowest IO class priority */
+#define OCF_IO_CLASS_PRIO_LOWEST 255
+
+/** Default IO class priority */
+#define OCF_IO_CLASS_PRIO_DEFAULT OCF_IO_CLASS_PRIO_LOWEST
+/**
+ * @}
+ */
+
+/**
+ * @name I/O operations
+ * @{
+ */
+#define OCF_READ 0
+#define OCF_WRITE 1
+/**
+ * @}
+ */
+
+/**
+ * @name OCF cleaner definitions
+ * @{
+ */
+#define OCF_CLEANER_DISABLE ~0U
+/**
+ * @}
+ */
+
+#define MAX_TRIM_RQ_SIZE (512 * KiB)
+
+#endif /* __OCF_DEF_H__ */
diff --git a/src/spdk/ocf/inc/ocf_err.h b/src/spdk/ocf/inc/ocf_err.h
new file mode 100644
index 000000000..e4f71f2a3
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_err.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ERR_H__
+#define __OCF_ERR_H__
+
+/**
+ * @file
+ * @brief OCF error codes definitions
+ */
+
+/**
+ * @brief OCF error enumerator
+ */
+typedef enum {
+ /** Invalid input parameter value */
+ OCF_ERR_INVAL = 1000000,
+
+ /** Try again */
+ OCF_ERR_AGAIN,
+
+ /** Operation interrupted */
+ OCF_ERR_INTR,
+
+ /** Operation not supported */
+ OCF_ERR_NOT_SUPP,
+
+ /** Out of memory */
+ OCF_ERR_NO_MEM,
+
+ /** Lock not acquired */
+ OCF_ERR_NO_LOCK,
+
+ /** Metadata version mismatch */
+ OCF_ERR_METADATA_VER,
+
+ /** No metadata found on device */
+ OCF_ERR_NO_METADATA,
+
+ /** Cache metadata found on device */
+ OCF_ERR_METADATA_FOUND,
+
+ /** Invalid volume type */
+ OCF_ERR_INVAL_VOLUME_TYPE,
+
+ /** Unknown error occurred */
+ OCF_ERR_UNKNOWN,
+
+ /** To many caches */
+ OCF_ERR_TOO_MANY_CACHES,
+
+ /** Not enough RAM to start cache */
+ OCF_ERR_NO_FREE_RAM,
+
+ /** Start cache failure */
+ OCF_ERR_START_CACHE_FAIL,
+
+ /** Cache ID/name does not exist */
+ OCF_ERR_CACHE_NOT_EXIST,
+
+ /** Core ID/name does not exist */
+ OCF_ERR_CORE_NOT_EXIST,
+
+ /** Cache ID/name already exists */
+ OCF_ERR_CACHE_EXIST,
+
+ /** Core ID/name already exists */
+ OCF_ERR_CORE_EXIST,
+
+ /** Too many core devices in cache */
+ OCF_ERR_TOO_MANY_CORES,
+
+ /** Core device not available */
+ OCF_ERR_CORE_NOT_AVAIL,
+
+ /** Cannot open device exclusively*/
+ OCF_ERR_NOT_OPEN_EXC,
+
+ /** Cache device not available */
+ OCF_ERR_CACHE_NOT_AVAIL,
+
+ /** IO Class does not exist */
+ OCF_ERR_IO_CLASS_NOT_EXIST,
+
+ /** IO Error */
+ OCF_ERR_IO,
+
+ /** Error while writing to cache device */
+ OCF_ERR_WRITE_CACHE,
+
+ /** Error while writing to core device */
+ OCF_ERR_WRITE_CORE,
+
+ /*!< Dirty shutdown */
+ OCF_ERR_DIRTY_SHUTDOWN,
+
+ /** Cache contains dirty data */
+ OCF_ERR_DIRTY_EXISTS,
+
+ /** Flushing of core interrupted */
+ OCF_ERR_FLUSHING_INTERRUPTED,
+
+ /** Another flushing operation in progress */
+ OCF_ERR_FLUSH_IN_PROGRESS,
+
+ /** Adding core to core pool failed */
+ OCF_ERR_CANNOT_ADD_CORE_TO_POOL,
+
+ /** Cache is in incomplete state */
+ OCF_ERR_CACHE_IN_INCOMPLETE_STATE,
+
+ /** Core device is in inactive state */
+ OCF_ERR_CORE_IN_INACTIVE_STATE,
+
+ /** Invalid cache mode */
+ OCF_ERR_INVALID_CACHE_MODE,
+
+ /** Invalid cache line size */
+ OCF_ERR_INVALID_CACHE_LINE_SIZE,
+
+ /** Invalid cache name loaded */
+ OCF_ERR_CACHE_NAME_MISMATCH,
+
+ /** Device does not meet requirements */
+ OCF_ERR_INVAL_CACHE_DEV,
+} ocf_error_t;
+
+#endif /* __OCF_ERR_H__ */
diff --git a/src/spdk/ocf/inc/ocf_io.h b/src/spdk/ocf/inc/ocf_io.h
new file mode 100644
index 000000000..f268320b8
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_io.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+
+#ifndef __OCF_IO_H__
+#define __OCF_IO_H__
+
+#include "ocf_types.h"
+
+/**
+ * @file
+ * @brief OCF IO definitions
+ */
+
+struct ocf_io;
+
+/**
+ * @brief OCF IO start
+ *
+ * @note OCF IO start notification callback
+ *
+ * @param[in] io OCF IO being started
+ */
+typedef void (*ocf_start_io_t)(struct ocf_io *io);
+
+/**
+ * @brief OCF IO handle
+ *
+ * @note OCF IO handle callback
+ *
+ * @param[in] io OCF IO to handle
+ */
+typedef void (*ocf_handle_io_t)(struct ocf_io *io, void *opaque);
+
+/**
+ * @brief OCF IO completion
+ *
+ * @note Completion function for OCF IO
+ *
+ * @param[in] io OCF IO being completed
+ * @param[in] error Completion status code
+ */
+typedef void (*ocf_end_io_t)(struct ocf_io *io, int error);
+
+/**
+ * @brief OCF IO main structure
+ */
+struct ocf_io {
+ /**
+ * @brief OCF IO destination address
+ */
+ uint64_t addr;
+
+ /**
+ * @brief OCF IO flags
+ */
+ uint64_t flags;
+
+ /**
+ * @brief OCF IO size in bytes
+ */
+ uint32_t bytes;
+
+ /**
+ * @brief OCF IO destination class
+ */
+ uint32_t io_class;
+
+ /**
+ * @brief OCF IO direction
+ */
+ uint32_t dir;
+
+ /**
+ * @brief Queue handle
+ */
+ ocf_queue_t io_queue;
+
+ /**
+ * @brief OCF IO start function
+ */
+ ocf_start_io_t start;
+
+ /**
+ * @brief OCF IO handle function
+ */
+ ocf_handle_io_t handle;
+
+ /**
+ * @brief OCF IO completion function
+ */
+ ocf_end_io_t end;
+
+ /**
+ * @brief OCF IO private 1
+ */
+ void *priv1;
+
+ /**
+ * @brief OCF IO private 2
+ */
+ void *priv2;
+};
+
+/**
+ * @brief OCF IO operations set structure
+ */
+struct ocf_io_ops {
+ /**
+ * @brief Set up data vector in OCF IO
+ *
+ * @param[in] io OCF IO to set up
+ * @param[in] data Source context data
+ * @param[in] offset Data offset in source context data
+ *
+ * @retval 0 Data set up successfully
+ * @retval Non-zero Data set up failure
+ */
+ int (*set_data)(struct ocf_io *io, ctx_data_t *data,
+ uint32_t offset);
+
+ /**
+ * @brief Get context data from OCF IO
+ *
+ * @param[in] io OCF IO to get data
+ *
+ * @return Data vector from IO
+ */
+ ctx_data_t *(*get_data)(struct ocf_io *io);
+};
+
+/**
+ * @brief Get IO private context structure
+ *
+ * @param[in] io OCF IO
+ *
+ * @return IO private context structure
+ */
+void *ocf_io_get_priv(struct ocf_io *io);
+
+/**
+ * @brief Increase reference counter in OCF IO
+ *
+ * @note Wrapper for get IO operation
+ *
+ * @param[in] io OCF IO
+ */
+void ocf_io_get(struct ocf_io *io);
+
+/**
+ * @brief Decrease reference counter in OCF IO
+ *
+ * @note If IO don't have any reference - deallocate it
+ *
+ * @param[in] io OCF IO
+ */
+void ocf_io_put(struct ocf_io *io);
+
+/**
+ * @brief Set OCF IO completion function
+ *
+ * @param[in] io OCF IO
+ * @param[in] context Context for completion function
+ * @param[in] fn Completion function
+ */
+static inline void ocf_io_set_cmpl(struct ocf_io *io, void *context,
+ void *context2, ocf_end_io_t fn)
+{
+ io->priv1 = context;
+ io->priv2 = context2;
+ io->end = fn;
+}
+
+/**
+ * @brief Set OCF IO start function
+ *
+ * @param[in] io OCF IO
+ * @param[in] fn Start callback function
+ */
+static inline void ocf_io_set_start(struct ocf_io *io, ocf_start_io_t fn)
+{
+ io->start = fn;
+}
+
+/**
+ * @brief Set OCF IO handle function
+ *
+ * @param[in] io OCF IO
+ * @param[in] fn Handle callback function
+ */
+static inline void ocf_io_set_handle(struct ocf_io *io, ocf_handle_io_t fn)
+{
+ io->handle = fn;
+}
+
+/**
+ * @brief Set up data vector in OCF IO
+ *
+ * @note Wrapper for set up data vector function
+ *
+ * @param[in] io OCF IO to set up
+ * @param[in] data Source data vector
+ * @param[in] offset Data offset in source data vector
+ *
+ * @retval 0 Data set up successfully
+ * @retval Non-zero Data set up failure
+ */
+int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, uint32_t offset);
+
+/**
+ * @brief Get data vector from OCF IO
+ *
+ * @note Wrapper for get data vector function
+ *
+ * @param[in] io OCF IO to get data
+ *
+ * @return Data vector from IO
+ */
+ctx_data_t *ocf_io_get_data(struct ocf_io *io);
+
+/**
+ * @brief Handle IO in cache engine
+ *
+ * @param[in] io OCF IO to be handled
+ * @param[in] opaque OCF opaque
+ */
+void ocf_io_handle(struct ocf_io *io, void *opaque);
+
+/**
+ * @brief Get volume associated with io
+ *
+ * @param[in] io OCF IO to be handled
+ */
+ocf_volume_t ocf_io_get_volume(struct ocf_io *io);
+
+#endif /* __OCF_IO_H__ */
diff --git a/src/spdk/ocf/inc/ocf_io_class.h b/src/spdk/ocf/inc/ocf_io_class.h
new file mode 100644
index 000000000..caab0db02
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_io_class.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/**
+ * @file
+ * @brief IO class API
+ *
+ * File contains structures and methods for handling IO Class
+ * differentiation features
+ */
+
+#ifndef __OCF_IO_CLASS_H__
+#define __OCF_IO_CLASS_H__
+
+/**
+ * @brief OCF IO class information
+ */
+struct ocf_io_class_info {
+ char name[OCF_IO_CLASS_NAME_MAX];
+ /*!< The name of the IO class */
+
+ ocf_cache_mode_t cache_mode;
+ /*!< Cache mode of the IO class */
+
+ int16_t priority;
+ /*!< IO class priority */
+
+ uint32_t curr_size;
+ /*!< Current size of the IO class - number of cache lines which
+ * were assigned into this IO class
+ */
+
+ uint32_t min_size;
+ /*!< Minimum number of cache lines that were guaranteed
+ * for specified IO class. If current size reach minimum size
+ * that no more eviction takes place
+ */
+
+ uint32_t max_size;
+ /*!< Maximum number of cache lines that might be assigned into
+ * this IO class. If current size reach maximum size no more
+ * allocation for this IO class takes place
+ */
+
+ uint8_t eviction_policy_type;
+ /*!< The type of eviction policy for given IO class */
+
+ ocf_cleaning_t cleaning_policy_type;
+ /*!< The type of cleaning policy for given IO class */
+};
+
+/**
+ * @brief retrieve io class info
+ *
+ * function meant to retrieve information pertaining to particular IO class,
+ * specifically to fill ocf_io_class_info structure based on input parameters.
+ *
+ * @param[in] cache cache handle, to which specified request pertains.
+ * @param[in] io_class id of an io class which shall be retreived.
+ * @param[out] info io class info structures to be filled as a
+ * result of this function call.
+ *
+ * @return function returns 0 upon successful completion; appropriate error
+ * code is returned otherwise
+ */
+int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
+ struct ocf_io_class_info *info);
+
+/**
+ * @brief helper function for ocf_io_class_visit
+ *
+ * This function is called back from ocf_io_class_visit for each valid
+ * configured io class; henceforth all parameters are input parameters,
+ * no exceptions. It is usable to enumerate all the io classes.
+ *
+ * @param[in] cache cache id of cache for which data is being retrieved
+ * @param[in] io_class_id id of an io class for which callback herein
+ * is invoked.
+ * @param[in] cntx a context pointer passed herein from within
+ * ocf_io_class_visit down to this callback.
+ *
+ * @return 0 upon success; Nonzero upon failure (when nonzero is returned,
+ * this callback won't be invoked for any more io classes)
+ */
+typedef int (*ocf_io_class_visitor_t)(ocf_cache_t cache,
+ uint32_t io_class_id, void *cntx);
+
+/**
+ * @brief enumerate all of the available IO classes.
+ *
+ * This function allows enumeration and retrieval of all io class id's that
+ * are valid for given cache id via visiting all those with callback function
+ * that is supplied by caller.
+ *
+ * @param[in] cache cache id to which given call pertains
+ * @param[in] visitor a callback function that will be issued for each and every
+ * IO class that is configured and valid within given cache instance
+ * @param[in] cntx a context variable - structure that shall be passed to a
+ * callback function for every call
+ *
+ * @return 0 upon successful completion of the function; otherwise nonzero result
+ * shall be returned
+ */
+int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
+ void *cntx);
+
+#endif /* __OCF_IO_CLASS_H__ */
diff --git a/src/spdk/ocf/inc/ocf_logger.h b/src/spdk/ocf/inc/ocf_logger.h
new file mode 100644
index 000000000..343a9759f
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_logger.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_LOGGER_H__
+#define __OCF_LOGGER_H__
+
+/**
+ * @file
+ * @brief Logger API
+ */
+
+#include <ocf/ocf_types.h>
+#include <stdarg.h>
+
+/**
+ * @brief Verbosity levels of context log
+ */
+typedef enum {
+ log_emerg,
+ log_alert,
+ log_crit,
+ log_err,
+ log_warn,
+ log_notice,
+ log_info,
+ log_debug,
+} ocf_logger_lvl_t;
+
+struct ocf_logger_ops {
+ int (*open)(ocf_logger_t logger);
+ void (*close)(ocf_logger_t logger);
+ int (*print)(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, va_list args);
+ int (*print_rl)(ocf_logger_t logger, const char *func_name);
+ int (*dump_stack)(ocf_logger_t logger);
+};
+
+void ocf_logger_set_priv(ocf_logger_t logger, void *priv);
+
+void *ocf_logger_get_priv(ocf_logger_t logger);
+
+#endif /* __OCF_LOGGER_H__ */
diff --git a/src/spdk/ocf/inc/ocf_metadata.h b/src/spdk/ocf/inc/ocf_metadata.h
new file mode 100644
index 000000000..5ecfbcaef
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_metadata.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_METADATA_H__
+#define __OCF_METADATA_H__
+
+/**
+ * @file
+ * @brief OCF metadata helper function
+ *
+ * Those functions can be used by volume implementation.
+ */
+
+/**
+ * @brief Atomic metadata for extended sector
+ *
+ * @warning The size of this structure has to be equal 8 bytes
+ */
+struct ocf_atomic_metadata {
+ /** Core line of core (in cache line size unit) which are cached */
+ uint64_t core_line : 46;
+
+ /** Core sequence number to which this line belongs to*/
+ uint32_t core_seq_no : 16;
+
+ /** Set bit indicates that given sector is valid (is cached) */
+ uint32_t valid : 1;
+
+ /** Set bit indicates that sector i dirty */
+ uint32_t dirty : 1;
+} __attribute__((packed));
+
+#define OCF_ATOMIC_METADATA_SIZE sizeof(struct ocf_atomic_metadata)
+
+/**
+ * @brief Get metadata entry (cache mapping) for specified sector of cache
+ * device
+ *
+ * Metadata has sector granularity. It might be used by volume which
+ * supports atomic writes - (write of data and metadata in one buffer)
+ *
+ * @param[in] cache OCF cache instance
+ * @param[in] addr Sector address in bytes
+ * @param[out] entry Metadata entry
+ *
+ * @retval 0 Metadata retrieved successfully
+ * @retval Non-zero Error
+ */
+int ocf_metadata_get_atomic_entry(ocf_cache_t cache, uint64_t addr,
+ struct ocf_atomic_metadata *entry);
+
+/**
+ * @brief Metadata probe status
+ */
+struct ocf_metadata_probe_status {
+ /** Cache was graceful stopped */
+ bool clean_shutdown;
+
+ /** Cache contains dirty data */
+ bool cache_dirty;
+
+ /** Loaded name of cache instance */
+ char cache_name[OCF_CACHE_NAME_SIZE];
+};
+
+/**
+ * @brief Metadata probe completion callback
+ *
+ * @param[in] priv Completion context
+ * @param[in] error Error code (zero on success)
+ * @param[in] status Structure describing metadata probe status
+ */
+typedef void (*ocf_metadata_probe_end_t)(void *priv, int error,
+ struct ocf_metadata_probe_status *status);
+
+/**
+ * @brief Probe cache device
+ *
+ * @param[in] ctx handle to object designating ocf context
+ * @param[in] volume Cache volume
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion context
+ */
+void ocf_metadata_probe(ocf_ctx_t ctx, ocf_volume_t volume,
+ ocf_metadata_probe_end_t cmpl, void *priv);
+
+/**
+ * @brief Metadata probe for cores completion callback
+ *
+ * @param[in] priv Completion context
+ * @param[in] error Error code (zero on success)
+ * @param[in] num_cores Number of cores in cache metadata
+ */
+typedef void (*ocf_metadata_probe_cores_end_t)(void *priv, int error,
+ unsigned int num_cores);
+
+/**
+ * @brief Probe cache device for associated cores
+ *
+ * @param[in] ctx handle to object designating ocf context
+ * @param[in] volume Cache volume
+ * @param[in,out] uuids Array of uuids
+ * @param[in] uuid_count Size of @uuid array
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion context
+ */
+void ocf_metadata_probe_cores(ocf_ctx_t ctx, ocf_volume_t volume,
+ struct ocf_volume_uuid *uuids, uint32_t uuid_count,
+ ocf_metadata_probe_cores_end_t cmpl, void *priv);
+
+/**
+ * @brief Check if sectors in cache line before given address are invalid
+ *
+ * It might be used by volume which supports
+ * atomic writes - (write of data and metadata in one buffer)
+ *
+ * @param[in] cache OCF cache instance
+ * @param[in] addr Sector address in bytes
+ *
+ * @retval 0 Not all sectors before given address are invalid
+ * @retval Non-zero Number of sectors before given address
+ */
+int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr);
+
+/**
+ * @brief Check if sectors in cache line after given end address are invalid
+ *
+ * It might be used by volume which supports
+ * atomic writes - (write of data and metadata in one buffer)
+ *
+ * @param[in] cache OCF cache instance
+ * @param[in] addr Sector address in bytes
+ * @param[in] bytes IO size in bytes
+ *
+ * @retval 0 Not all sectors after given end address are invalid
+ * @retval Non-zero Number of sectors after given end address
+ */
+int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr,
+ uint32_t bytes);
+
+#endif /* __OCF_METADATA_H__ */
diff --git a/src/spdk/ocf/inc/ocf_metadata_updater.h b/src/spdk/ocf/inc/ocf_metadata_updater.h
new file mode 100644
index 000000000..7a5084f2d
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_metadata_updater.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_METADATA_UPDATER_H__
+#define __OCF_METADATA_UPDATER_H__
+
+/**
+ * @file
+ * @brief OCF metadata updater API
+ *
+ */
+
+/**
+ * @brief Run metadata updater
+ *
+ * @param[in] mu Metadata updater instance to run
+ *
+ * @retval Hint if there is need to rerun without waiting.
+ */
+uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu);
+
+/**
+ * @brief Set metadata updater private data
+ *
+ * @param[in] c Metadata updater handle
+ * @param[in] priv Private data
+ */
+void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv);
+
+/**
+ * @brief Get metadata updater private data
+ *
+ * @param[in] c Metadata updater handle
+ *
+ * @retval Metadata updater private data
+ */
+void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu);
+
+/**
+ * @brief Get cache instance to which metadata updater belongs
+ *
+ * @param[in] c Metadata updater handle
+ *
+ * @retval Cache instance
+ */
+ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu);
+
+#endif /* __OCF_METADATA_UPDATER_H__ */
diff --git a/src/spdk/ocf/inc/ocf_mngt.h b/src/spdk/ocf/inc/ocf_mngt.h
new file mode 100644
index 000000000..e4dd0cbc7
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_mngt.h
@@ -0,0 +1,1107 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_MNGT_H__
+#define __OCF_MNGT_H__
+
+#include "ocf_cache.h"
+#include "ocf_core.h"
+
+/**
+ * @file
+ * @brief OCF management operations definitions
+ */
+
+/**
+ * @brief Core start configuration
+ */
+struct ocf_mngt_core_config {
+ /**
+ * @brief OCF core name
+ */
+ char name[OCF_CORE_NAME_SIZE];
+
+ /**
+ * @brief OCF core volume UUID
+ */
+ struct ocf_volume_uuid uuid;
+
+ /**
+ * @brief OCF core volume type
+ */
+ uint8_t volume_type;
+
+ /**
+ * @brief Add core to pool if cache isn't present or add core to
+ * earlier loaded cache
+ */
+ bool try_add;
+
+ uint32_t seq_cutoff_threshold;
+ /*!< Sequential cutoff threshold (in bytes) */
+
+ struct {
+ void *data;
+ size_t size;
+ } user_metadata;
+};
+
+/**
+ * @brief Initialize core config to default values
+ *
+ * @note This function doesn't initialize name, uuid and volume_type fields
+ * which have no default values and are required to be set by user.
+ *
+ * @param[in] cfg Core config stucture
+ */
+static inline void ocf_mngt_core_config_set_default(
+ struct ocf_mngt_core_config *cfg)
+{
+ cfg->try_add = false;
+ cfg->seq_cutoff_threshold = 1024;
+ cfg->user_metadata.data = NULL;
+ cfg->user_metadata.size = 0;
+}
+
+/**
+ * @brief Get number of OCF caches
+ *
+ * @param[in] ctx OCF context
+ *
+ * @retval Number of caches in given OCF instance
+ */
+uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx);
+
+/* Cache instances getters */
+
+/**
+ * @brief Get OCF cache by name
+ *
+ * @note This function on success also increases reference counter
+ * in given cache
+ *
+ * @param[in] ctx OCF context
+ * @param[in] name OCF cache name
+ * @param[in] name_len Cache name length
+ * @param[out] cache OCF cache handle
+ *
+ * @retval 0 Get cache successfully
+ * @retval -OCF_ERR_CACHE_NOT_EXIST Cache with given name doesn't exist
+ */
+int ocf_mngt_cache_get_by_name(ocf_ctx_t ctx, const char* name, size_t name_len,
+ ocf_cache_t *cache);
+
+/**
+ * @brief Increment reference counter of cache
+ *
+ * @param[in] cache OCF cache handle
+ *
+ * @retval 0 Reference counter incremented
+ * @retval -OCF_ERR_CACHE_NOT_AVAIL cache isn't initialised yet
+ */
+int ocf_mngt_cache_get(ocf_cache_t cache);
+
+/**
+ * @brief Decrease reference counter in cache
+ *
+ * @note If cache don't have any reference - deallocate it
+ *
+ * @param[in] cache Handle to cache
+ */
+void ocf_mngt_cache_put(ocf_cache_t cache);
+
+/**
+ * @brief Lock cache for management oparations (write lock, exclusive)
+
+ * @param[in] cache Handle to cache
+ * @param[in] error Status error code. Can be one of the following:
+ * 0 Cache successfully locked
+ * -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already stopping
+ * -OCF_ERR_NO_MEM Cannot allocate needed memory
+ * -OCF_ERR_INTR Wait operation interrupted
+ */
+typedef void (*ocf_mngt_cache_lock_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Lock cache for management oparations (write lock, exclusive)
+ *
+ * @param[in] cache Handle to cache
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Private context of completion callback
+ */
+void ocf_mngt_cache_lock(ocf_cache_t cache,
+ ocf_mngt_cache_lock_end_t cmpl, void *priv);
+
+/**
+ * @brief Lock cache for read - assures cache config does not change while
+ * lock is being held, while allowing other users to acquire
+ * read lock in parallel.
+ *
+ * @param[in] cache Handle to cache
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Private context of completion callback
+ */
+void ocf_mngt_cache_read_lock(ocf_cache_t cache,
+ ocf_mngt_cache_lock_end_t cmpl, void *priv);
+
+/**
+ * @brief Lock cache for management oparations (write lock, exclusive)
+ *
+ * @param[in] cache Handle to cache
+ *
+ * @retval 0 Cache successfully locked
+ * @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
+ * stopping
+ * @retval -OCF_ERR_NO_LOCK Lock not acquired
+ */
+int ocf_mngt_cache_trylock(ocf_cache_t cache);
+
+/**
+ * @brief Lock cache for read - assures cache config does not change while
+ * lock is being held, while allowing other users to acquire
+ * read lock in parallel.
+ *
+ * @param[in] cache Handle to cache
+ *
+ * @retval 0 Cache successfully locked
+ * @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
+ * stopping
+ * @retval -OCF_ERR_NO_LOCK Lock not acquired
+ */
+int ocf_mngt_cache_read_trylock(ocf_cache_t cache);
+
+/**
+ * @brief Write-unlock cache
+ *
+ * @param[in] cache Handle to cache
+ */
+void ocf_mngt_cache_unlock(ocf_cache_t cache);
+
+/**
+ * @brief Read-unlock cache
+ *
+ * @param[in] cache Handle to cache
+ */
+void ocf_mngt_cache_read_unlock(ocf_cache_t cache);
+
+/**
+ * @brief Cache visitor function
+ *
+ * @param[in] cache Handle to cache
+ * @param[in] cntx Visitor function context
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+typedef int (*ocf_mngt_cache_visitor_t)(ocf_cache_t cache, void *cntx);
+
+/**
+ * @brief Loop for each cache
+ *
+ * @note Visitor function is called for each cache
+ *
+ * @param[in] ctx OCF context
+ * @param[in] visitor OCF cache visitor function
+ * @param[in] cntx Context for cache visitor function
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_mngt_cache_visit(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor,
+ void *cntx);
+
+/**
+ * @brief Loop for each cache reverse
+ *
+ * @note Visitor function is called for each cache
+ *
+ * @param[in] ctx OCF context
+ * @param[in] visitor OCF cache visitor function
+ * @param[in] cntx Context for cache visitor function
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_mngt_cache_visit_reverse(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor,
+ void *cntx);
+
+/**
+ * @brief Cache start configuration
+ */
+struct ocf_mngt_cache_config {
+ /**
+ * @brief Cache name
+ */
+ char name[OCF_CACHE_NAME_SIZE];
+
+ /**
+ * @brief Cache mode
+ */
+ ocf_cache_mode_t cache_mode;
+
+ /**
+ * @brief Eviction policy type
+ */
+ ocf_eviction_t eviction_policy;
+
+ /**
+ * @brief Promotion policy type
+ */
+ ocf_promotion_t promotion_policy;
+
+ /**
+ * @brief Cache line size
+ */
+ ocf_cache_line_size_t cache_line_size;
+
+ /**
+ * @brief Metadata layout (stripping/sequential)
+ */
+ ocf_metadata_layout_t metadata_layout;
+
+ bool metadata_volatile;
+
+ /**
+ * @brief Backfill configuration
+ */
+ struct {
+ uint32_t max_queue_size;
+ uint32_t queue_unblock_size;
+ } backfill;
+
+ /**
+ * @brief Start cache and keep it locked
+ *
+ * @note In this case caller is able to perform additional activities
+ * and then shall unlock cache
+ */
+ bool locked;
+
+ /**
+ * @brief Use pass-through mode for I/O requests unaligned to 4KiB
+ */
+ bool pt_unaligned_io;
+
+ /**
+ * @brief If set, try to submit all I/O in fast path.
+ */
+ bool use_submit_io_fast;
+};
+
+/**
+ * @brief Initialize core config to default values
+ *
+ * @note This function doesn't initialize name field which has no default
+ * value and is required to be set by user.
+ *
+ * @param[in] cfg Cache config stucture
+ */
+static inline void ocf_mngt_cache_config_set_default(
+ struct ocf_mngt_cache_config *cfg)
+{
+ cfg->cache_mode = ocf_cache_mode_default;
+ cfg->eviction_policy = ocf_eviction_default;
+ cfg->promotion_policy = ocf_promotion_default;
+ cfg->cache_line_size = ocf_cache_line_size_4;
+ cfg->metadata_layout = ocf_metadata_layout_default;
+ cfg->metadata_volatile = false;
+ cfg->backfill.max_queue_size = 65536;
+ cfg->backfill.queue_unblock_size = 60000;
+ cfg->locked = false;
+ cfg->pt_unaligned_io = false;
+ cfg->use_submit_io_fast = false;
+}
+
+/**
+ * @brief Start cache instance
+ *
+ * @param[in] ctx OCF context
+ * @param[out] cache Cache handle
+ * @param[in] cfg Starting cache configuration
+ *
+ * @retval 0 Cache started successfully
+ * @retval Non-zero Error occurred and starting cache failed
+ */
+int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
+ struct ocf_mngt_cache_config *cfg);
+
+/**
+ * @brief Set queue to be used during management operations
+ *
+ * @param[in] cache Cache object
+ * @param[in] queue Queue object
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error occurred
+ */
+int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue);
+
+/**
+ * @brief Completion callback of cache stop operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_stop_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Stop cache instance
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_stop(ocf_cache_t cache,
+ ocf_mngt_cache_stop_end_t cmpl, void *priv);
+
+/**
+ * @brief Cache attach configuration
+ */
+struct ocf_mngt_cache_device_config {
+ /**
+ * @brief Cache volume UUID
+ */
+ struct ocf_volume_uuid uuid;
+
+ /**
+ * @brief Cache volume type
+ */
+ uint8_t volume_type;
+
+ /**
+ * @brief Cache line size
+ */
+ ocf_cache_line_size_t cache_line_size;
+
+ /**
+ * @brief Automatically open core volumes when loading cache
+ *
+ * If set to false, cache load will not attempt to open core volumes,
+ * and so cores will be marked "inactive" unless their volumes were
+ * earlier added to the core pool. In such case user will be expected
+ * to add cores later using function ocf_mngt_cache_add_core().
+ *
+ * @note This option is meaningful only with ocf_mngt_cache_load().
+ * When used with ocf_mngt_cache_attach() it's ignored.
+ */
+ bool open_cores;
+
+ /**
+ * @brief Ignore warnings and start cache
+ *
+ * @note It will force starting cache despite the:
+ * - overwrite dirty shutdown of previous cache
+ * - ignore cache with dirty shutdown and reinitialize cache
+ */
+ bool force;
+
+ /**
+ * @brief If set, cache features (like discard) are tested
+ * before starting cache
+ */
+ bool perform_test;
+
+ /**
+ * @brief If set, cache device will be discarded on cache start
+ */
+ bool discard_on_start;
+
+ /**
+ * @brief Optional opaque volume parameters, passed down to cache volume
+ * open callback
+ */
+ void *volume_params;
+};
+
+/**
+ * @brief Initialize core config to default values
+ *
+ * @note This function doesn't initiialize uuid and volume_type fields
+ * which have no default values and are required to be set by user.
+ *
+ * @param[in] cfg Cache device config stucture
+ */
+static inline void ocf_mngt_cache_device_config_set_default(
+ struct ocf_mngt_cache_device_config *cfg)
+{
+ cfg->cache_line_size = ocf_cache_line_size_none;
+ cfg->open_cores = true;
+ cfg->force = false;
+ cfg->perform_test = true;
+ cfg->discard_on_start = true;
+ cfg->volume_params = NULL;
+}
+
+/**
+ * @brief Get amount of free RAM needed to attach cache volume
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cfg Caching device configuration
+ * @param[out] ram_needed Amount of RAM needed in bytes
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error occurred
+ */
+int ocf_mngt_get_ram_needed(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg, uint64_t *ram_needed);
+
+/**
+ * @brief Completion callback of cache attach operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_attach_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Attach caching device to cache instance
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cfg Caching device configuration
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_attach(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg,
+ ocf_mngt_cache_attach_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of cache detach operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_detach_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Detach caching cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_detach(ocf_cache_t cache,
+ ocf_mngt_cache_detach_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of cache load operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_load_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Load cache instance
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cfg Caching device configuration
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_load(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg,
+ ocf_mngt_cache_load_end_t cmpl, void *priv);
+
+/* Adding and removing cores */
+
+/**
+ * @brief Completion callback of add core operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] core Core handle on success or NULL on failure
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_add_core_end_t)(ocf_cache_t cache,
+ ocf_core_t core, void *priv, int error);
+
+/**
+ * @brief Add core to cache instance
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cfg Core configuration
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_add_core(ocf_cache_t cache,
+ struct ocf_mngt_core_config *cfg,
+ ocf_mngt_cache_add_core_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of remove core operation
+ *
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_remove_core_end_t)(void *priv, int error);
+
+/**
+ * @brief Remove core from cache instance
+ *
+ * @param[in] core Core handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_remove_core(ocf_core_t core,
+ ocf_mngt_cache_remove_core_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of detach core operation
+ *
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_detach_core_end_t)(void *priv, int error);
+
+/**
+ * @brief Detach core from cache instance
+ *
+ * @param[in] core Core handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_detach_core(ocf_core_t core,
+ ocf_mngt_cache_detach_core_end_t cmpl, void *priv);
+
+/* Flush operations */
+
+/**
+ * @brief Completion callback of cache flush operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_flush_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Flush data from given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_flush(ocf_cache_t cache,
+ ocf_mngt_cache_flush_end_t cmpl, void *priv);
+
+/**
+ * @brief Check if core is dirty
+ *
+ * @param[in] core Core handle
+ *
+ * @retval true if core is dirty, false otherwise
+ */
+bool ocf_mngt_core_is_dirty(ocf_core_t core);
+
+/**
+ * @brief Check if cache is dirty
+ *
+ * @param[in] cache Cache handle
+ *
+ * @retval true if cache is dirty, false otherwise
+ */
+bool ocf_mngt_cache_is_dirty(ocf_cache_t cache);
+
+/**
+ * @brief Completion callback of core flush operation
+ *
+ * @param[in] core Core handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_core_flush_end_t)(ocf_core_t core,
+ void *priv, int error);
+
+/**
+ * @brief Flush data to given core
+ *
+ * @param[in] core Core handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_core_flush(ocf_core_t core,
+ ocf_mngt_core_flush_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of cache purge operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_purge_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Purge data from given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_purge(ocf_cache_t cache,
+ ocf_mngt_cache_purge_end_t cmpl, void *priv);
+
+/**
+ * @brief Completion callback of core purge operation
+ *
+ * @param[in] core Core handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_core_purge_end_t)(ocf_core_t core,
+ void *priv, int error);
+
+/**
+ * @brief Purge data to given core
+ *
+ * @param[in] core Core handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_core_purge(ocf_core_t core,
+ ocf_mngt_core_purge_end_t cmpl, void *priv);
+
+/**
+ * @brief Interrupt existing flushing of cache or core
+ *
+ * @param[in] cache Cache instance
+ */
+void ocf_mngt_cache_flush_interrupt(ocf_cache_t cache);
+
+/**
+ * @brief Completion callback of save operation
+ *
+ * @param[in] cache Cache handle
+ * @param[in] priv Callback context
+ * @param[in] error Error code (zero on success)
+ */
+typedef void (*ocf_mngt_cache_save_end_t)(ocf_cache_t cache,
+ void *priv, int error);
+
+/**
+ * @brief Save cache configuration data on cache volume
+ *
+ * This function should be called after changing cache or core parameters
+ * in order to make changes persistent.
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cmpl Completion callback
+ * @param[in] priv Completion callback context
+ */
+void ocf_mngt_cache_save(ocf_cache_t cache,
+ ocf_mngt_cache_save_end_t cmpl, void *priv);
+
+/**
+ * @brief Determines whether given cache mode has write-back semantics, i.e. it
+ * allows for writes to be serviced in cache and lazily propagated to core.
+ *
+ * @param[in] mode input cache mode
+ */
+static inline bool ocf_mngt_cache_mode_has_lazy_write(ocf_cache_mode_t mode)
+{
+ return mode == ocf_cache_mode_wb || mode == ocf_cache_mode_wo;
+}
+
+/**
+ * @brief Set cache mode in given cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] mode Cache mode to set
+ *
+ * @retval 0 Cache mode have been set successfully
+ * @retval Non-zero Error occurred and cache mode not been set
+ */
+int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode);
+
+/**
+ * @brief Set cleaning policy in given cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Cleaning policy type
+ *
+ * @retval 0 Policy has been set successfully
+ * @retval Non-zero Error occurred and policy has not been set
+ */
+int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type);
+
+/**
+ * @brief Get current cleaning policy from given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[out] type Variable to store current cleaning policy type
+ *
+ * @retval 0 Policy has been get successfully
+ * @retval Non-zero Error occurred and policy has not been get
+ */
+int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type);
+
+/**
+ * @brief Set cleaning parameter in given cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Cleaning policy type
+ * @param[in] param_id Cleaning policy parameter id
+ * @param[in] param_value Cleaning policy parameter value
+ *
+ * @retval 0 Parameter has been set successfully
+ * @retval Non-zero Error occurred and parameter has not been set
+ */
+int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type,
+ uint32_t param_id, uint32_t param_value);
+
+/**
+ * @brief Get cleaning parameter from given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Cleaning policy type
+ * @param[in] param_id Cleaning policy parameter id
+ * @param[out] param_value Variable to store parameter value
+ *
+ * @retval 0 Parameter has been get successfully
+ * @retval Non-zero Error occurred and parameter has not been get
+ */
+int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache,ocf_cleaning_t type,
+ uint32_t param_id, uint32_t *param_value);
+
+/**
+ * @brief Set promotion policy in given cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Promotion policy type
+ *
+ * @retval 0 Policy has been set successfully
+ * @retval Non-zero Error occurred and policy has not been set
+ */
+int ocf_mngt_cache_promotion_set_policy(ocf_cache_t cache, ocf_promotion_t type);
+
+/**
+ * @brief Get promotion policy in given cache
+ *
+ * @param[in] cache Cache handle
+ *
+ * @retval Currently set promotion policy type
+ */
+ocf_promotion_t ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache);
+
+/**
+ * @brief Set promotion policy parameter for given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Promotion policy type
+ * @param[in] param_id Promotion policy parameter id
+ * @param[in] param_value Promotion policy parameter value
+ *
+ * @retval 0 Parameter has been set successfully
+ * @retval Non-zero Error occurred and parameter has not been set
+ */
+int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t param_value);
+
+/**
+ * @brief Get promotion policy parameter for given cache
+ *
+ * @param[in] cache Cache handle
+ * @param[in] type Promotion policy type
+ * @param[in] param_id Promotion policy parameter id
+ * @param[out] param_value Variable to store parameter value
+ *
+ * @retval 0 Parameter has been retrieved successfully
+ * @retval Non-zero Error occurred and parameter has not been retrieved
+ */
+int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t *param_value);
+
+/**
+ * @brief IO class configuration
+ */
+struct ocf_mngt_io_class_config {
+ /**
+ * @brief IO class ID
+ */
+ uint32_t class_id;
+
+ /**
+ * @brief IO class name
+ */
+ const char *name;
+
+ /**
+ * @brief IO class eviction priority
+ */
+ int16_t prio;
+
+ /**
+ * @brief IO class cache mode
+ */
+ ocf_cache_mode_t cache_mode;
+
+ /**
+ * @brief IO class minimum size
+ */
+ uint32_t min_size;
+
+ /**
+ * @brief IO class maximum size
+ */
+ uint32_t max_size;
+};
+
+struct ocf_mngt_io_classes_config {
+ struct ocf_mngt_io_class_config config[OCF_IO_CLASS_MAX];
+};
+
+/**
+ * @brief Configure IO classes in given cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] cfg IO class configuration
+ *
+ * @retval 0 Configuration have been set successfully
+ * @retval Non-zero Error occurred and configuration not been set
+ */
+int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
+ const struct ocf_mngt_io_classes_config *cfg);
+
+/**
+ * @brief Asociate new UUID value with given core
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] core Core object
+ * @param[in] uuid new core uuid
+ *
+ * @retval 0 Success
+ * @retval Non-zero Fail
+ */
+int ocf_mngt_core_set_uuid(ocf_core_t core, const struct ocf_volume_uuid *uuid);
+
+/**
+ * @brief Set persistent user metadata for given core
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] core Core object
+ * @param[in] data User data buffer
+ * @param[in] size Size of user data buffer
+ *
+ * @retval 0 Success
+ * @retval Non-zero Core getting failed
+ */
+int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size);
+
+/**
+ * @brief Get persistent user metadata from given core
+ *
+ * @param[in] core Core object
+ * @param[out] data User data buffer
+ * @param[in] size Size of user data buffer
+ *
+ * @retval 0 Success
+ * @retval Non-zero Core getting failed
+ */
+int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size);
+
+/**
+ * @brief Set core sequential cutoff threshold
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] core Core handle
+ * @param[in] thresh threshold in bytes for sequential cutoff
+ *
+ * @retval 0 Sequential cutoff threshold has been set successfully
+ * @retval Non-zero Error occured and threshold hasn't been updated
+ */
+int ocf_mngt_core_set_seq_cutoff_threshold(ocf_core_t core, uint32_t thresh);
+
+/**
+ * @brief Set sequential cutoff threshold for all cores in cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] thresh threshold in bytes for sequential cutoff
+ *
+ * @retval 0 Sequential cutoff threshold has been set successfully
+ * @retval Non-zero Error occured and threshold hasn't been updated
+ */
+int ocf_mngt_core_set_seq_cutoff_threshold_all(ocf_cache_t cache,
+ uint32_t thresh);
+
+/**
+ * @brief Get core sequential cutoff threshold
+ *
+ * @param[in] core Core handle
+ * @param[in] thresh threshold in bytes for sequential cutoff
+ *
+ * @retval 0 Sequential cutoff threshold has been get successfully
+ * @retval Non-zero Error occured
+ */
+int ocf_mngt_core_get_seq_cutoff_threshold(ocf_core_t core, uint32_t *thresh);
+
+/**
+ * @brief Set core sequential cutoff policy
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] core Core handle
+ * @param[in] policy sequential cutoff policy
+ *
+ * @retval 0 Sequential cutoff policy has been set successfully
+ * @retval Non-zero Error occured and policy hasn't been updated
+ */
+int ocf_mngt_core_set_seq_cutoff_policy(ocf_core_t core,
+ ocf_seq_cutoff_policy policy);
+
+/**
+ * @brief Set sequential cutoff policy for all cores in cache
+ *
+ * @attention This changes only runtime state. To make changes persistent
+ * use function ocf_mngt_cache_save().
+ *
+ * @param[in] cache Cache handle
+ * @param[in] policy sequential cutoff policy
+ *
+ * @retval 0 Sequential cutoff policy has been set successfully
+ * @retval Non-zero Error occured and policy hasn't been updated
+ */
+int ocf_mngt_core_set_seq_cutoff_policy_all(ocf_cache_t cache,
+ ocf_seq_cutoff_policy policy);
+
+/**
+ * @brief Get core sequential cutoff policy
+ *
+ * @param[in] core Core handle
+ * @param[in] policy sequential cutoff policy
+ *
+ * @retval 0 Sequential cutoff policy has been get successfully
+ * @retval Non-zero Error occured
+ */
+int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core,
+ ocf_seq_cutoff_policy *policy);
+
+/**
+ * @brief Set cache fallback Pass Through error threshold
+ *
+ * @param[in] cache Cache handle
+ * @param[in] threshold Value to be set as threshold
+ *
+ * @retval 0 Fallback-PT threshold have been set successfully
+ * @retval Non-zero Error occurred
+ */
+int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache,
+ uint32_t threshold);
+
+/**
+ * @brief Get cache fallback Pass Through error threshold
+ *
+ * @param[in] cache Cache handle
+ * @param[out] threshold Fallback-PT threshold
+ *
+ * @retval 0 Fallback-PT threshold have been get successfully
+ * @retval Non-zero Error occurred
+ */
+int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache,
+ uint32_t *threshold);
+
+/**
+ * @brief Reset cache fallback Pass Through error counter
+ *
+ * @param[in] cache Cache handle
+ *
+ * @retval 0 Threshold have been reset successfully
+ */
+int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache);
+
+/**
+ * @brief Get core pool count
+ *
+ * @param[in] ctx OCF context
+ *
+ * @retval Number of cores in core pool
+ */
+int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx);
+
+/**
+ * @brief Add core to pool
+ *
+ * @param[in] ctx OCF context
+ * @param[in] uuid Cache volume UUID
+ * @param[in] type OCF core volume type
+ *
+ * @retval 0 Core added to pool successfully
+ * @retval Non-zero Error occurred and adding core to poll failed
+ */
+int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type);
+
+/**
+ * @brief Add core to pool
+ *
+ * @param[in] ctx OCF context
+ * @param[in] uuid Cache volume UUID
+ * @param[in] type OCF core volume type
+ *
+ * @retval Handler to object with same UUID
+ * @retval NULL Not found object with that id
+ */
+ocf_volume_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid,
+ ocf_volume_type_t type);
+/**
+ * @brief Iterate over all object in pool and call visitor callback
+ *
+ * @param[in] ctx OCF context
+ * @param[in] visitor Visitor callback
+ * @param[in] visior_ctx CContext for visitor callback
+ *
+ * @retval Handler to object with same UUID
+ * @retval NULL Not found object with that id
+ */
+int ocf_mngt_core_pool_visit(ocf_ctx_t ctx,
+ int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx);
+
+/**
+ * @brief Remove volume from pool
+ *
+ * Important: This function destroys volume instance but doesn't close it,
+ * so it should be either moved or closed before calling this function.
+ *
+ * @param[in] ctx OCF context
+ * @param[in] volume Core volume
+ */
+void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_volume_t volume);
+
+#endif /* __OCF_CACHE_H__ */
diff --git a/src/spdk/ocf/inc/ocf_queue.h b/src/spdk/ocf/inc/ocf_queue.h
new file mode 100644
index 000000000..3d795cd4e
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_queue.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef OCF_QUEUE_H_
+#define OCF_QUEUE_H_
+
+/**
+ * @file
+ * @brief OCF queues API
+ */
+
+/**
+ * @brief I/O queue operations
+ */
+struct ocf_queue_ops {
+ /**
+ * @brief Kick I/O queue processing
+ *
+ * This function should inform worker, thread or any other queue
+ * processing mechanism, that there are new requests in queue to
+ * be processed. Processing requests synchronously in this function
+ * is not allowed.
+ *
+ * @param[in] q I/O queue to be kicked
+ */
+ void (*kick)(ocf_queue_t q);
+
+ /**
+ * @brief Kick I/O queue processing
+ *
+ * This function should inform worker, thread or any other queue
+ * processing mechanism, that there are new requests in queue to
+ * be processed. Function kick_sync is allowed to process requests
+ * synchronously without delegating them to the worker.
+ *
+ * @param[in] q I/O queue to be kicked
+ */
+ void (*kick_sync)(ocf_queue_t q);
+
+ /**
+ * @brief Stop I/O queue
+ *
+ * @param[in] q I/O queue beeing stopped
+ */
+ void (*stop)(ocf_queue_t q);
+};
+
+/**
+ * @brief Allocate IO queue and add it to list in cache
+ *
+ * @param[in] cache Handle to cache instance
+ * @param[out] queue Handle to created queue
+ * @param[in] ops Queue operations
+ *
+ * @return Zero on success, otherwise error code
+ */
+int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
+ const struct ocf_queue_ops *ops);
+
+/**
+ * @brief Increase reference counter in queue
+ *
+ * @param[in] queue Queue
+ *
+ */
+void ocf_queue_get(ocf_queue_t queue);
+
+/**
+ * @brief Decrease reference counter in queue
+ *
+ * @note If queue don't have any reference - deallocate it
+ *
+ * @param[in] queue Queue
+ *
+ */
+void ocf_queue_put(ocf_queue_t queue);
+
+/**
+ * @brief Process single request from queue
+ *
+ * @param[in] q Queue to run
+ */
+void ocf_queue_run_single(ocf_queue_t q);
+
+/**
+ * @brief Run queue processing
+ *
+ * @param[in] q Queue to run
+ */
+void ocf_queue_run(ocf_queue_t q);
+
+/**
+ * @brief Set queue private data
+ *
+ * @param[in] q I/O queue
+ * @param[in] priv Private data
+ */
+void ocf_queue_set_priv(ocf_queue_t q, void *priv);
+
+/**
+ * @brief Get queue private data
+ *
+ * @param[in] q I/O queue
+ *
+ * @retval I/O queue private data
+ */
+void *ocf_queue_get_priv(ocf_queue_t q);
+
+/**
+ * @brief Get number of pending requests in I/O queue
+ *
+ * @param[in] q I/O queue
+ *
+ * @retval Number of pending requests in I/O queue
+ */
+uint32_t ocf_queue_pending_io(ocf_queue_t q);
+
+/**
+ * @brief Get cache instance to which I/O queue belongs
+ *
+ * @param[in] q I/O queue
+ *
+ * @retval Cache instance
+ */
+ocf_cache_t ocf_queue_get_cache(ocf_queue_t q);
+
+#endif
diff --git a/src/spdk/ocf/inc/ocf_stats.h b/src/spdk/ocf/inc/ocf_stats.h
new file mode 100644
index 000000000..b326c7409
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_stats.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/**
+ * @file
+ * @brief OCF API for updating and reseting statistics
+ *
+ * This file contains routines pertaining to manipulation of OCF IO statistics.
+ */
+
+#ifndef __OCF_STATS_H__
+#define __OCF_STATS_H__
+
+/**
+ * Entire row of statistcs
+ */
+struct ocf_stat {
+ /** Value */
+ uint64_t value;
+ /** percent x100 */
+ uint64_t fraction;
+};
+
+/**
+ * @brief Usage statistics in 4 KiB unit
+ *
+ * An example of presenting statistics:
+ * <pre>
+ * ╔══════════════════╤══════════╤═══════╤═════════════╗
+ * ║ Usage statistics │ Count │ % │ Units ║
+ * ╠══════════════════╪══════════╪═══════╪═════════════╣
+ * ║ Occupancy │ 20 │ 50.0 │ 4KiB blocks ║
+ * ║ Free │ 20 │ 50.0 │ 4KiB blocks ║
+ * ║ Clean │ 15 │ 75.0 │ 4KiB blocks ║
+ * ║ Dirty │ 5 │ 25.0 │ 4KiB blocks ║
+ * ╚══════════════════╧══════════╧═══════╧═════════════╝
+ * </pre>
+ */
+struct ocf_stats_usage {
+ struct ocf_stat occupancy;
+ struct ocf_stat free;
+ struct ocf_stat clean;
+ struct ocf_stat dirty;
+};
+
+/**
+ * @brief Requests statistcs
+ *
+ * An example of presenting statistics:
+ * <pre>
+ * ╔══════════════════════╤═══════╤═══════╤══════════╗
+ * ║ Request statistics │ Count │ % │ Units ║
+ * ╠══════════════════════╪═══════╪═══════╪══════════╣
+ * ║ Read hits │ 10 │ 4.5 │ Requests ║
+ * ║ Read partial misses │ 1 │ 0.5 │ Requests ║
+ * ║ Read full misses │ 211 │ 95.0 │ Requests ║
+ * ║ Read total │ 222 │ 100.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Write hits │ 0 │ 0.0 │ Requests ║
+ * ║ Write partial misses │ 0 │ 0.0 │ Requests ║
+ * ║ Write full misses │ 0 │ 0.0 │ Requests ║
+ * ║ Write total │ 0 │ 0.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Pass-Through reads │ 0 │ 0.0 │ Requests ║
+ * ║ Pass-Through writes │ 0 │ 0.0 │ Requests ║
+ * ║ Serviced requests │ 222 │ 100.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Total requests │ 222 │ 100.0 │ Requests ║
+ * ╚══════════════════════╧═══════╧═══════╧══════════╝
+ * </pre>
+ */
+struct ocf_stats_requests {
+ struct ocf_stat rd_hits;
+ struct ocf_stat rd_partial_misses;
+ struct ocf_stat rd_full_misses;
+ struct ocf_stat rd_total;
+ struct ocf_stat wr_hits;
+ struct ocf_stat wr_partial_misses;
+ struct ocf_stat wr_full_misses;
+ struct ocf_stat wr_total;
+ struct ocf_stat rd_pt;
+ struct ocf_stat wr_pt;
+ struct ocf_stat serviced;
+ struct ocf_stat total;
+};
+
+/**
+ * @brief Block statistics
+ *
+ * An example of presenting statistics:
+ * <pre>
+ * ╔════════════════════════════════════╤═══════╤═══════╤═════════════╗
+ * ║ Block statistics │ Count │ % │ Units ║
+ * ╠════════════════════════════════════╪═══════╪═══════╪═════════════╣
+ * ║ Reads from core volume(s) │ 426 │ 100.0 │ 4KiB blocks ║
+ * ║ Writes to core volume(s) │ 0 │ 0.0 │ 4KiB blocks ║
+ * ║ Total to/from core volume (s) │ 426 │ 100.0 │ 4KiB blocks ║
+ * ╟────────────────────────────────────┼───────┼───────┼─────────────╢
+ * ║ Reads from cache volume │ 13 │ 3.0 │ 4KiB blocks ║
+ * ║ Writes to cache volume │ 426 │ 97.0 │ 4KiB blocks ║
+ * ║ Total to/from cache volume │ 439 │ 100.0 │ 4KiB blocks ║
+ * ╟────────────────────────────────────┼───────┼───────┼─────────────╢
+ * ║ Reads from core(s) │ 439 │ 100.0 │ 4KiB blocks ║
+ * ║ Writes to core(s) │ 0 │ 0.0 │ 4KiB blocks ║
+ * ║ Total to/from core(s) │ 439 │ 100.0 │ 4KiB blocks ║
+ * ╚════════════════════════════════════╧═══════╧═══════╧═════════════╝
+ * </pre>
+ */
+struct ocf_stats_blocks {
+ struct ocf_stat core_volume_rd;
+ struct ocf_stat core_volume_wr;
+ struct ocf_stat core_volume_total;
+ struct ocf_stat cache_volume_rd;
+ struct ocf_stat cache_volume_wr;
+ struct ocf_stat cache_volume_total;
+ struct ocf_stat volume_rd;
+ struct ocf_stat volume_wr;
+ struct ocf_stat volume_total;
+};
+
+/**
+ * @brief Errors statistics
+ *
+ * An example of presenting statistics:
+ * <pre>
+ * ╔════════════════════╤═══════╤═════╤══════════╗
+ * ║ Error statistics │ Count │ % │ Units ║
+ * ╠════════════════════╪═══════╪═════╪══════════╣
+ * ║ Cache read errors │ 0 │ 0.0 │ Requests ║
+ * ║ Cache write errors │ 0 │ 0.0 │ Requests ║
+ * ║ Cache total errors │ 0 │ 0.0 │ Requests ║
+ * ╟────────────────────┼───────┼─────┼──────────╢
+ * ║ Core read errors │ 0 │ 0.0 │ Requests ║
+ * ║ Core write errors │ 0 │ 0.0 │ Requests ║
+ * ║ Core total errors │ 0 │ 0.0 │ Requests ║
+ * ╟────────────────────┼───────┼─────┼──────────╢
+ * ║ Total errors │ 0 │ 0.0 │ Requests ║
+ * ╚════════════════════╧═══════╧═════╧══════════╝
+ * </pre>
+ */
+struct ocf_stats_errors {
+ struct ocf_stat core_volume_rd;
+ struct ocf_stat core_volume_wr;
+ struct ocf_stat core_volume_total;
+ struct ocf_stat cache_volume_rd;
+ struct ocf_stat cache_volume_wr;
+ struct ocf_stat cache_volume_total;
+ struct ocf_stat total;
+};
+
+/**
+ * @param Collect statistics for given cache
+ *
+ * @param cache Cache instance for which statistics will be collected
+ * @param usage Usage statistics
+ * @param req Request statistics
+ * @param blocks Blocks statistics
+ * @param errors Errors statistics
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_stats_collect_cache(ocf_cache_t cache,
+ struct ocf_stats_usage *usage,
+ struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks,
+ struct ocf_stats_errors *errors);
+
+/**
+ * @param Collect statistics for given core
+ *
+ * @param core Core for which statistics will be collected
+ * @param usage Usage statistics
+ * @param req Request statistics
+ * @param blocks Blocks statistics
+ * @param errors Errors statistics
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_stats_collect_core(ocf_core_t core,
+ struct ocf_stats_usage *usage,
+ struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks,
+ struct ocf_stats_errors *errors);
+
+/**
+ * @param Collect statistics for given ioclass
+ *
+ * @param core Core handle for which statistics will be collected
+ * @param part_id Ioclass id for which statistics will be collected
+ * @param usage Usage statistics
+ * @param req Request statistics
+ * @param blocks Blocks statistics
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_stats_collect_part_core(ocf_core_t core, ocf_part_id_t part_id,
+ struct ocf_stats_usage *usage, struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks);
+
+/**
+ * @param Collect statistics for given ioclass
+ *
+ * @param cache Cache instance for which statistics will be collected
+ * @param part_id Ioclass id for which statistics will be collected
+ * @param usage Usage statistics
+ * @param req Request statistics
+ * @param blocks Blocks statistics
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error
+ */
+int ocf_stats_collect_part_cache(ocf_cache_t cache, ocf_part_id_t part_id,
+ struct ocf_stats_usage *usage, struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks);
+
+/**
+ * @brief Initialize or reset core statistics
+ *
+ * Initialize or reset counters used for statistics.
+ *
+ * @param[in] core Core handle
+ */
+void ocf_core_stats_initialize(ocf_core_t core);
+
+/**
+ * @brief Initialize or reset statistics of all cores in cache
+ *
+ * Initialize or reset counters used for statistics.
+ *
+ * @param[in] cache Cache handle
+ */
+void ocf_core_stats_initialize_all(ocf_cache_t cache);
+
+#endif /* __OCF_STATS_H__ */
diff --git a/src/spdk/ocf/inc/ocf_trace.h b/src/spdk/ocf/inc/ocf_trace.h
new file mode 100644
index 000000000..4a0f61390
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_trace.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_TRACE_H__
+#define __OCF_TRACE_H__
+
+#include "ocf_def.h"
+#include "ocf_types.h"
+
+typedef uint64_t log_sid_t;
+
+#define OCF_EVENT_VERSION 1
+#define OCF_TRACING_STOP 1
+
+/**
+ * @brief OCF trace (event) type
+ */
+typedef enum {
+ /** IO trace description, this event is pushed first to indicate version
+ * of traces, number of cores and provides details about cache */
+ ocf_event_type_cache_desc,
+
+ /** Event describing ocf core */
+ ocf_event_type_core_desc,
+
+ /** IO */
+ ocf_event_type_io,
+
+ /** IO completion */
+ ocf_event_type_io_cmpl,
+
+ /** IO in file domain */
+ ocf_event_type_io_file,
+} ocf_event_type;
+
+/**
+ * @brief Generic OCF trace event
+ */
+struct ocf_event_hdr {
+ /** Event sequence ID */
+ log_sid_t sid;
+
+ /** Time stamp */
+ uint64_t timestamp;
+
+ /** Trace event type */
+ ocf_event_type type;
+
+ /** Size of this event */
+ uint32_t size;
+};
+
+/**
+ * @brief Cache trace description
+*/
+struct ocf_event_cache_desc {
+ /** Event header */
+ struct ocf_event_hdr hdr;
+
+ /** Cache name */
+ const char *name;
+
+ /** Cache line size */
+ ocf_cache_line_size_t cache_line_size;
+
+ /** Cache mode */
+ ocf_cache_mode_t cache_mode;
+
+ /** Cache size in bytes*/
+ uint64_t cache_size;
+
+ /** Number of cores */
+ uint32_t cores_no;
+
+ /** Trace version */
+ uint32_t version;
+};
+
+/**
+ * @brief Core trace description
+*/
+struct ocf_event_core_desc {
+ /** Event header */
+ struct ocf_event_hdr hdr;
+
+ /** Core name */
+ const char *name;
+
+ /** Core size in bytes */
+ uint64_t core_size;
+};
+
+/** @brief IO operation */
+typedef enum {
+ /** Read */
+ ocf_event_operation_rd = 'R',
+
+ /** Write */
+ ocf_event_operation_wr = 'W',
+
+ /** Flush */
+ ocf_event_operation_flush = 'F',
+
+ /** Discard */
+ ocf_event_operation_discard = 'D',
+} ocf_event_operation_t;
+
+/**
+ * @brief IO trace event
+ */
+struct ocf_event_io {
+ /** Trace event header */
+ struct ocf_event_hdr hdr;
+
+ /** Address of IO in bytes */
+ uint64_t addr;
+
+ /** Size of IO in bytes */
+ uint32_t len;
+
+ /** IO class of IO */
+ uint32_t io_class;
+
+ /** Core name */
+ const char *core_name;
+
+ /** Operation type: read, write, trim or flush **/
+ ocf_event_operation_t operation;
+};
+
+/**
+ * @brief IO completion event
+ */
+struct ocf_event_io_cmpl {
+ /** Trace event header */
+ struct ocf_event_hdr hdr;
+
+ /** Reference event sequence ID */
+ log_sid_t rsid;
+
+ /** Was IO a cache hit or miss */
+ bool is_hit;
+};
+
+
+/** @brief Push log callback.
+ *
+ * @param[in] cache OCF cache
+ * @param[in] trace_ctx Tracing context
+ * @param[in] queue Queue handle
+ * @param[out] trace Event log
+ * @param[out] size Size of event log
+ *
+ * @return 0 If pushing trace succeeded
+ * @return Non-zero error
+ */
+typedef void (*ocf_trace_callback_t)(ocf_cache_t cache, void *trace_ctx,
+ ocf_queue_t queue, const void* trace, const uint32_t size);
+
+/**
+ * @brief Start tracing
+ *
+ * @param[in] cache OCF cache
+ * @param[in] trace_ctx Tracing context
+ * @param[in] trace_callback Callback used for pushing logs
+ *
+ * @retval 0 Tracing started successfully
+ * @retval Non-zero Error
+ */
+int ocf_mngt_start_trace(ocf_cache_t cache, void *trace_ctx,
+ ocf_trace_callback_t trace_callback);
+
+/**
+ * @brief Stop tracing
+ *
+ * @param[in] cache OCF cache
+ *
+ * @retval 0 Tracing stopped successfully
+ * @retval Non-zero Error
+ */
+int ocf_mngt_stop_trace(ocf_cache_t cache);
+
+#endif /* __OCF_TRACE_H__ */
diff --git a/src/spdk/ocf/inc/ocf_types.h b/src/spdk/ocf/inc/ocf_types.h
new file mode 100644
index 000000000..ead3b0d06
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_types.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/**
+ * @file
+ * @brief OCF types
+ */
+#ifndef __OCF_TYPES_H_
+#define __OCF_TYPES_H_
+
+#include "ocf_env_headers.h"
+
+/**
+ * @brief cache line type (by default designated as 32 bit unsigned integer)
+ */
+typedef uint32_t ocf_cache_line_t;
+
+/**
+ * @brief core id type (by default designated as 16 bit unsigned integer)
+ */
+typedef uint16_t ocf_core_id_t;
+
+/**
+ * @brief core sequence number type (by default designated as 16 bit unsigned integer)
+ */
+typedef uint16_t ocf_seq_no_t;
+
+/**
+ * @brief partition id type (by default designated as 16 bit unsigned integer)
+ */
+typedef uint16_t ocf_part_id_t;
+
+/**
+ * @brief handle to object designating ocf context
+ */
+typedef struct ocf_ctx *ocf_ctx_t;
+
+struct ocf_cache;
+/**
+ * @brief handle to object designating ocf cache device
+ */
+typedef struct ocf_cache *ocf_cache_t;
+
+struct ocf_core;
+/**
+ * @brief handle to object designating ocf core object
+ */
+typedef struct ocf_core *ocf_core_t;
+
+struct ocf_volume;
+/**
+ * @brief handle to object designating ocf volume
+ */
+typedef struct ocf_volume *ocf_volume_t;
+
+
+struct ocf_volume_type;
+/**
+ * @brief handle to volume type
+ */
+typedef struct ocf_volume_type *ocf_volume_type_t;
+
+/**
+ * @brief handle to volume uuid
+ */
+typedef struct ocf_volume_uuid *ocf_uuid_t;
+
+/**
+ * @brief handle to object designating ocf context object
+ */
+typedef void ctx_data_t;
+
+/**
+ * @brief handle to I/O queue
+ */
+typedef struct ocf_queue *ocf_queue_t;
+
+/**
+ * @brief handle to cleaner
+ */
+typedef struct ocf_cleaner *ocf_cleaner_t;
+
+/**
+ * @brief handle to metadata_updater
+ */
+typedef struct ocf_metadata_updater *ocf_metadata_updater_t;
+
+/**
+ * @brief handle to logger
+ */
+typedef struct ocf_logger *ocf_logger_t;
+
+#endif
diff --git a/src/spdk/ocf/inc/ocf_volume.h b/src/spdk/ocf/inc/ocf_volume.h
new file mode 100644
index 000000000..ca3f9c238
--- /dev/null
+++ b/src/spdk/ocf/inc/ocf_volume.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_VOLUME_H__
+#define __OCF_VOLUME_H__
+
+/**
+ * @file
+ * @brief OCF volume API
+ */
+
+#include "ocf_types.h"
+#include "ocf_env.h"
+#include "ocf/ocf_err.h"
+
+struct ocf_io;
+
+/**
+ * @brief OCF volume UUID maximum allowed size
+ */
+#define OCF_VOLUME_UUID_MAX_SIZE (4096UL - sizeof(uint32_t))
+
+/**
+ * @brief OCF volume UUID
+ */
+struct ocf_volume_uuid {
+ size_t size;
+ /*!< UUID data size */
+
+ void *data;
+ /*!< UUID data content */
+};
+
+/**
+ * @brief This structure describes volume capabilities
+ */
+struct ocf_volume_caps {
+ uint32_t atomic_writes : 1;
+ /*!< Volume supports atomic writes */
+};
+
+/**
+ * @brief OCF volume interface declaration
+ */
+struct ocf_volume_ops {
+ /**
+ * @brief Submit IO on this volume
+ *
+ * @param[in] io IO to be submitted
+ */
+ void (*submit_io)(struct ocf_io *io);
+
+ /**
+ * @brief Submit IO with flush command
+ *
+ * @param[in] io IO to be submitted
+ */
+ void (*submit_flush)(struct ocf_io *io);
+
+ /**
+ * @brief Submit IO with metadata
+ *
+ * @param[in] io IO to be submitted
+ */
+ void (*submit_metadata)(struct ocf_io *io);
+
+ /**
+ * @brief Submit IO with discard command
+ *
+ * @param[in] io IO to be submitted
+ */
+ void (*submit_discard)(struct ocf_io *io);
+
+ /**
+ * @brief Submit operation to write zeroes to target address (including
+ * metadata extended LBAs in atomic mode)
+ *
+ * @param[in] io IO description (addr, size)
+ */
+ void (*submit_write_zeroes)(struct ocf_io *io);
+
+ /**
+ * @brief Open volume
+ *
+ * @note This function performs volume initialization and should
+ * be called before any other operation on volume
+ *
+ * @param[in] volume Volume
+ * @param[in] volume_params optional volume parameters, opaque to OCF
+ *
+ * @return Zero on success, otherwise error code
+ */
+ int (*open)(ocf_volume_t volume, void *volume_params);
+
+ /**
+ * @brief Close volume
+ *
+ * @param[in] volume Volume
+ */
+ void (*close)(ocf_volume_t volume);
+
+ /**
+ * @brief Get maximum io size
+ *
+ * @param[in] volume Volume
+ *
+ * @return Maximum io size in bytes
+ */
+ unsigned int (*get_max_io_size)(ocf_volume_t volume);
+
+ /**
+ * @brief Get volume length
+ *
+ * @param[in] volume Volume
+ *
+ * @return Volume lenght in bytes
+ */
+ uint64_t (*get_length)(ocf_volume_t volume);
+};
+
+/**
+ * @brief This structure describes volume properties
+ */
+struct ocf_volume_properties {
+ const char *name;
+ /*!< The name of volume operations */
+
+ uint32_t io_priv_size;
+ /*!< Size of io private context structure */
+
+ uint32_t volume_priv_size;
+ /*!< Size of volume private context structure */
+
+ struct ocf_volume_caps caps;
+ /*!< Volume capabilities */
+
+ struct ocf_volume_ops ops;
+ /*!< Volume operations */
+
+ struct ocf_io_ops io_ops;
+ /*!< IO operations */
+
+ void (*deinit)(void);
+ /*!< Deinitialize volume type */
+};
+
+/**
+ * @brief Initialize UUID from string
+ *
+ * @param[in] uuid UUID to be initialized
+ * @param[in] str NULL-terminated string
+ *
+ * @return Zero when success, othewise error
+ */
+static inline int ocf_uuid_set_str(ocf_uuid_t uuid, char *str)
+{
+ size_t len = env_strnlen(str, OCF_VOLUME_UUID_MAX_SIZE);
+
+ if (len >= OCF_VOLUME_UUID_MAX_SIZE)
+ return -OCF_ERR_INVAL;
+
+ uuid->data = str;
+ uuid->size = len + 1;
+
+ return 0;
+}
+
+/**
+ * @brief Obtain string from UUID
+ * @param[in] uuid pointer to UUID
+ * @return String contained within UUID
+ */
+static inline const char *ocf_uuid_to_str(const struct ocf_volume_uuid *uuid)
+{
+ return (const char *)uuid->data;
+}
+
+/**
+ * @brief Initialize volume
+ *
+ * @param[in] volume volume handle
+ * @param[in] type cache/core volume type
+ * @param[in] uuid OCF volume UUID
+ * @param[in] uuid_copy crate copy of uuid data
+ *
+ * @return Zero when success, othewise error
+ */
+int ocf_volume_init(ocf_volume_t volume, ocf_volume_type_t type,
+ struct ocf_volume_uuid *uuid, bool uuid_copy);
+
+/**
+ * @brief Deinitialize volume
+ *
+ * @param[in] volume volume handle
+ */
+void ocf_volume_deinit(ocf_volume_t volume);
+
+/**
+ * @brief Allocate and initialize volume
+ *
+ * @param[out] volume pointer to volume handle
+ * @param[in] type cache/core volume type
+ * @param[in] uuid OCF volume UUID
+ *
+ * @return Zero when success, othewise en error
+ */
+int ocf_volume_create(ocf_volume_t *volume, ocf_volume_type_t type,
+ struct ocf_volume_uuid *uuid);
+
+/**
+ * @brief Deinitialize and free volume
+ *
+ * @param[in] volume volume handle
+ */
+void ocf_volume_destroy(ocf_volume_t volume);
+
+/**
+ * @brief Get volume type
+ *
+ * @param[in] volume Volume
+ *
+ * @return Volume type
+ */
+ocf_volume_type_t ocf_volume_get_type(ocf_volume_t volume);
+
+/**
+ * @brief Get volume UUID
+ *
+ * @param[in] volume Volume
+ *
+ * @return UUID of volume
+ */
+const struct ocf_volume_uuid *ocf_volume_get_uuid(ocf_volume_t volume);
+
+/**
+ * @brief Get private context of volume
+ *
+ * @param[in] volume Volume
+ *
+ * @return Volume private context
+ */
+void *ocf_volume_get_priv(ocf_volume_t volume);
+
+/**
+ * @brief Get cache handle for given volume
+ *
+ * @param volume volume handle
+ *
+ * @return Handle to cache for which volume belongs to
+ */
+ocf_cache_t ocf_volume_get_cache(ocf_volume_t volume);
+
+/**
+ * @brief Check if volume supports atomic mode
+ *
+ * @param[in] volume Volume
+ *
+ * @return Non-zero value if volume is atomic, otherwise zero
+ */
+int ocf_volume_is_atomic(ocf_volume_t volume);
+
+/**
+ * @brief Allocate new io
+ *
+ * @param[in] volume Volume
+ * @param[in] queue IO queue handle
+ * @param[in] addr OCF IO destination address
+ * @param[in] bytes OCF IO size in bytes
+ * @param[in] dir OCF IO direction
+ * @param[in] io_class OCF IO destination class
+ * @param[in] flags OCF IO flags
+ *
+ * @return ocf_io on success atomic, otherwise NULL
+ */
+struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags);
+
+
+/**
+ * @brief Submit io to volume
+ *
+ * @param[in] io IO
+ */
+void ocf_volume_submit_io(struct ocf_io *io);
+
+/**
+ * @brief Submit flush to volume
+ *
+ * @param[in] io IO
+ */
+void ocf_volume_submit_flush(struct ocf_io *io);
+
+/**
+ * @brief Submit discard to volume
+ *
+ * @param[in] io IO
+ */
+void ocf_volume_submit_discard(struct ocf_io *io);
+
+/**
+ * @brief Open volume
+ *
+ * @param[in] volume Volume
+ * @param[in] volume_params Opaque volume params
+ *
+ * @return Zero when success, othewise en error
+ */
+int ocf_volume_open(ocf_volume_t volume, void *volume_params);
+
+/**
+ * @brief Get volume max io size
+ *
+ * @param[in] volume Volume
+ */
+void ocf_volume_close(ocf_volume_t volume);
+
+/**
+ * @brief Get volume max io size
+ *
+ * @param[in] volume Volume
+ *
+ * @return Volume max io size in bytes
+ */
+unsigned int ocf_volume_get_max_io_size(ocf_volume_t volume);
+
+/**
+ * @brief Get volume length
+ *
+ * @param[in] volume Volume
+ *
+ * @return Length of volume in bytes
+ */
+uint64_t ocf_volume_get_length(ocf_volume_t volume);
+
+#endif /* __OCF_VOLUME_H__ */
diff --git a/src/spdk/ocf/inc/promotion/nhit.h b/src/spdk/ocf/inc/promotion/nhit.h
new file mode 100644
index 000000000..32f4dac63
--- /dev/null
+++ b/src/spdk/ocf/inc/promotion/nhit.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_PROMOTION_NHIT_H__
+#define __OCF_PROMOTION_NHIT_H__
+
+enum ocf_nhit_param {
+ ocf_nhit_insertion_threshold,
+ ocf_nhit_trigger_threshold,
+ ocf_nhit_param_max
+};
+
+#define OCF_NHIT_MIN_THRESHOLD 2
+#define OCF_NHIT_MAX_THRESHOLD 1000
+#define OCF_NHIT_THRESHOLD_DEFAULT 3
+
+#define OCF_NHIT_MIN_TRIGGER 0
+#define OCF_NHIT_MAX_TRIGGER 100
+#define OCF_NHIT_TRIGGER_DEFAULT 80
+
+#endif /* __OCF_PROMOTION_NHIT_H__ */
diff --git a/src/spdk/ocf/src/cleaning/acp.c b/src/spdk/ocf/src/cleaning/acp.c
new file mode 100644
index 000000000..85d7be907
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/acp.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cleaning.h"
+#include "../metadata/metadata.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_request.h"
+#include "../cleaning/acp.h"
+#include "../engine/engine_common.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+#include "cleaning_priv.h"
+
+#define OCF_ACP_DEBUG 0
+
+#if 1 == OCF_ACP_DEBUG
+
+#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
+
+#define OCF_DEBUG_LOG(cache, format, ...) \
+ ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
+ format"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
+
+#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
+ ##__VA_ARGS__)
+
+#define ACP_DEBUG_INIT(acp) acp->checksum = 0
+#define ACP_DEBUG_BEGIN(acp, cache_line) acp->checksum ^= cache_line
+#define ACP_DEBUG_END(acp, cache_line) acp->checksum ^= cache_line
+#define ACP_DEBUG_CHECK(acp) ENV_BUG_ON(acp->checksum)
+#else
+#define OCF_DEBUG_PREFIX
+#define OCF_DEBUG_LOG(cache, format, ...)
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#define ACP_DEBUG_INIT(acp)
+#define ACP_DEBUG_BEGIN(acp, cache_line)
+#define ACP_DEBUG_END(acp, cache_line)
+#define ACP_DEBUG_CHECK(acp)
+#endif
+
+#define ACP_CHUNK_SIZE (100 * MiB)
+
+/* minimal time to chunk cleaning after error */
+#define ACP_CHUNK_CLEANING_BACKOFF_TIME 5
+
+/* time to sleep when nothing to clean in ms */
+#define ACP_BACKOFF_TIME_MS 1000
+
+#define ACP_MAX_BUCKETS 11
+
+/* Upper thresholds for buckets in percent dirty pages. First bucket should have
+ * threshold=0 - it isn't cleaned and we don't want dirty chunks staying dirty
+ * forever. Last bucket also should stay at 100 for obvious reasons */
+static const uint16_t ACP_BUCKET_DEFAULTS[ACP_MAX_BUCKETS] = { 0, 10, 20, 30, 40,
+ 50, 60, 70, 80, 90, 100 };
+
+struct acp_flush_context {
+ /* number of cache lines in flush */
+ uint64_t size;
+ /* chunk_for error handling */
+ struct acp_chunk_info *chunk;
+ /* cache lines to flush */
+ struct flush_data data[OCF_ACP_MAX_FLUSH_MAX_BUFFERS];
+ /* flush error code */
+ int error;
+};
+
+struct acp_state {
+ /* currently cleaned chunk */
+ struct acp_chunk_info *chunk;
+
+ /* cache line iterator within current chunk */
+ unsigned iter;
+
+ /* true if there are cache lines to process
+ * current chunk */
+ bool in_progress;
+};
+
+struct acp_chunk_info {
+ struct list_head list;
+ uint64_t chunk_id;
+ uint64_t next_cleaning_timestamp;
+ ocf_core_id_t core_id;
+ uint16_t num_dirty;
+ uint8_t bucket_id;
+};
+
+struct acp_bucket {
+ struct list_head chunk_list;
+ uint16_t threshold; /* threshold in clines */
+};
+
+struct acp_context {
+ env_rwsem chunks_lock;
+
+ /* number of chunks per core */
+ uint64_t num_chunks[OCF_CORE_MAX];
+
+ /* per core array of all chunks */
+ struct acp_chunk_info *chunk_info[OCF_CORE_MAX];
+
+ struct acp_bucket bucket_info[ACP_MAX_BUCKETS];
+
+ /* total number of chunks in cache */
+ uint64_t chunks_total;
+
+ /* structure to keep track of I/O in progress */
+ struct acp_flush_context flush;
+
+ /* cleaning state persistent over subsequent calls to
+ perform_cleaning */
+ struct acp_state state;
+
+ /* cache handle */
+ ocf_cache_t cache;
+
+ /* cleaner completion callback */
+ ocf_cleaner_end_t cmpl;
+
+#if 1 == OCF_ACP_DEBUG
+ /* debug only */
+ uint64_t checksum;
+#endif
+};
+
+struct acp_core_line_info
+{
+ ocf_cache_line_t cache_line;
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+};
+
+#define ACP_LOCK_CHUNKS_RD() env_rwsem_down_read(&acp->chunks_lock)
+
+#define ACP_UNLOCK_CHUNKS_RD() env_rwsem_up_read(&acp->chunks_lock)
+
+#define ACP_LOCK_CHUNKS_WR() env_rwsem_down_write(&acp->chunks_lock)
+
+#define ACP_UNLOCK_CHUNKS_WR() env_rwsem_up_write(&acp->chunks_lock)
+
+static struct acp_context *_acp_get_ctx_from_cache(struct ocf_cache *cache)
+{
+ return cache->cleaner.cleaning_policy_context;
+}
+
+static struct acp_cleaning_policy_meta* _acp_meta_get(
+ struct ocf_cache *cache, uint32_t cache_line,
+ struct cleaning_policy_meta *policy_meta)
+{
+ ocf_metadata_get_cleaning_policy(cache, cache_line, policy_meta);
+ return &policy_meta->meta.acp;
+}
+
+static void _acp_meta_set(struct ocf_cache *cache, uint32_t cache_line,
+ struct cleaning_policy_meta *policy_meta)
+{
+ ocf_metadata_set_cleaning_policy(cache, cache_line, policy_meta);
+}
+
+static struct acp_core_line_info _acp_core_line_info(struct ocf_cache *cache,
+ ocf_cache_line_t cache_line)
+{
+ struct acp_core_line_info acp_core_line_info = {.cache_line = cache_line, };
+ ocf_metadata_get_core_info(cache, cache_line, &acp_core_line_info.core_id,
+ &acp_core_line_info.core_line);
+ return acp_core_line_info;
+}
+
+static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ struct acp_core_line_info core_line =
+ _acp_core_line_info(cache, cache_line);
+ uint64_t chunk_id;
+
+ chunk_id = core_line.core_line * ocf_line_size(cache) / ACP_CHUNK_SIZE;
+
+ return &acp->chunk_info[core_line.core_id][chunk_id];
+}
+
+static void _acp_remove_cores(struct ocf_cache *cache)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ for_each_core(cache, core, core_id)
+ cleaning_policy_acp_remove_core(cache, core_id);
+}
+
+static int _acp_load_cores(struct ocf_cache *cache)
+{
+
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ int err = 0;
+
+ for_each_core(cache, core, core_id) {
+ OCF_DEBUG_PARAM(cache, "loading core %i\n", core_id);
+ err = cleaning_policy_acp_add_core(cache, core_id);
+ if (err)
+ break;
+ }
+
+ if (err)
+ _acp_remove_cores(cache);
+
+ return err;
+}
+
+void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct cleaning_policy_meta policy_meta;
+ struct acp_cleaning_policy_meta *acp_meta;
+
+ /* TODO: acp meta is going to be removed soon */
+ acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
+ acp_meta->dirty = 0;
+ _acp_meta_set(cache, cache_line, &policy_meta);
+}
+
+void cleaning_policy_acp_deinitialize(struct ocf_cache *cache)
+{
+ struct acp_context *acp;
+
+ _acp_remove_cores(cache);
+
+ acp = cache->cleaner.cleaning_policy_context;
+ env_rwsem_destroy(&acp->chunks_lock);
+
+ env_vfree(cache->cleaner.cleaning_policy_context);
+ cache->cleaner.cleaning_policy_context = NULL;
+}
+
+static void _acp_rebuild(struct ocf_cache *cache)
+{
+ ocf_cache_line_t cline;
+ ocf_core_id_t cline_core_id;
+ uint32_t step = 0;
+
+ for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
+ ocf_metadata_get_core_and_part_id(cache, cline, &cline_core_id,
+ NULL);
+
+ OCF_COND_RESCHED_DEFAULT(step);
+
+ if (cline_core_id == OCF_CORE_MAX)
+ continue;
+
+ cleaning_policy_acp_init_cache_block(cache, cline);
+
+ if (!metadata_test_dirty(cache, cline))
+ continue;
+
+ cleaning_policy_acp_set_hot_cache_line(cache, cline);
+ }
+
+ ocf_cache_log(cache, log_info, "Finished rebuilding ACP metadata\n");
+}
+
+void cleaning_policy_acp_setup(struct ocf_cache *cache)
+{
+ struct acp_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
+
+ config->thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP;
+ config->flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS;
+}
+
+int cleaning_policy_acp_initialize(struct ocf_cache *cache,
+ int init_metadata)
+{
+ struct acp_context *acp;
+ int err, i;
+
+ /* bug if max chunk number would overflow dirty_no array type */
+#if defined (BUILD_BUG_ON)
+ BUILD_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
+ 1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
+#else
+ ENV_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
+ 1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
+#endif
+
+ ENV_BUG_ON(cache->cleaner.cleaning_policy_context);
+
+ acp = env_vzalloc(sizeof(*acp));
+ if (!acp) {
+ ocf_cache_log(cache, log_err, "acp context allocation error\n");
+ return -OCF_ERR_NO_MEM;
+ }
+
+ err = env_rwsem_init(&acp->chunks_lock);
+ if (err) {
+ env_vfree(acp);
+ return err;
+ }
+
+ cache->cleaner.cleaning_policy_context = acp;
+ acp->cache = cache;
+
+ for (i = 0; i < ACP_MAX_BUCKETS; i++) {
+ INIT_LIST_HEAD(&acp->bucket_info[i].chunk_list);
+ acp->bucket_info[i].threshold =
+ ((ACP_CHUNK_SIZE/ocf_line_size(cache)) *
+ ACP_BUCKET_DEFAULTS[i]) / 100;
+ }
+
+ if (cache->conf_meta->core_count > 0) {
+ err = _acp_load_cores(cache);
+ if (err) {
+ cleaning_policy_acp_deinitialize(cache);
+ return err;
+ }
+ }
+
+ _acp_rebuild(cache);
+ ocf_kick_cleaner(cache);
+
+ return 0;
+}
+
+int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t param_value)
+{
+ struct acp_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
+
+ switch (param_id) {
+ case ocf_acp_wake_up_time:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ACP_MIN_WAKE_UP,
+ OCF_ACP_MAX_WAKE_UP,
+ "thread_wakeup_time");
+ config->thread_wakeup_time = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread "
+ "wake-up time: %d\n", config->thread_wakeup_time);
+ ocf_kick_cleaner(cache);
+ break;
+ case ocf_acp_flush_max_buffers:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ACP_MIN_FLUSH_MAX_BUFFERS,
+ OCF_ACP_MAX_FLUSH_MAX_BUFFERS,
+ "flush_max_buffers");
+ config->flush_max_buffers = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread max "
+ "buffers flushed per iteration: %d\n",
+ config->flush_max_buffers);
+ break;
+ default:
+ return -OCF_ERR_INVAL;
+ }
+
+ return 0;
+}
+
+int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t *param_value)
+{
+ struct acp_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
+
+ switch (param_id) {
+ case ocf_acp_flush_max_buffers:
+ *param_value = config->flush_max_buffers;
+ break;
+ case ocf_acp_wake_up_time:
+ *param_value = config->thread_wakeup_time;
+ break;
+ default:
+ return -OCF_ERR_INVAL;
+ }
+
+ return 0;
+}
+
+
+/* attempt to lock cache line if it's dirty */
+static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
+ uint32_t core_id, uint64_t core_line)
+{
+ struct ocf_map_info info;
+ bool locked = false;
+
+ ocf_metadata_hash_lock_rd(&cache->metadata.lock, core_id, core_line);
+
+ ocf_engine_lookup_map_entry(cache, &info, core_id,
+ core_line);
+
+ if (info.status == LOOKUP_HIT &&
+ metadata_test_dirty(cache, info.coll_idx) &&
+ ocf_cache_line_try_lock_rd(cache, info.coll_idx)) {
+ locked = true;
+ }
+
+ ocf_metadata_hash_unlock_rd(&cache->metadata.lock, core_id, core_line);
+
+ return locked ? info.coll_idx : cache->device->collision_table_entries;
+}
+
+static void _acp_handle_flush_error(struct ocf_cache *cache,
+ struct acp_context *acp)
+{
+ struct acp_flush_context *flush = &acp->flush;
+
+ flush->chunk->next_cleaning_timestamp = env_get_tick_count() +
+ env_secs_to_ticks(ACP_CHUNK_CLEANING_BACKOFF_TIME);
+
+ if (ocf_cache_log_rl(cache)) {
+ ocf_core_log(&cache->core[flush->chunk->core_id],
+ log_err, "Cleaning error (%d) in range"
+ " <%llu; %llu) backing off for %u seconds\n",
+ flush->error,
+ flush->chunk->chunk_id * ACP_CHUNK_SIZE,
+ (flush->chunk->chunk_id * ACP_CHUNK_SIZE) +
+ ACP_CHUNK_SIZE,
+ ACP_CHUNK_CLEANING_BACKOFF_TIME);
+ }
+}
+
+static inline bool _acp_can_clean_chunk(struct ocf_cache *cache,
+ struct acp_chunk_info *chunk)
+{
+ /* Check if core device is opened and if timeout after cleaning error
+ * expired or wasn't set in the first place */
+ return (cache->core[chunk->core_id].opened &&
+ (chunk->next_cleaning_timestamp > env_get_tick_count() ||
+ !chunk->next_cleaning_timestamp));
+}
+
+static struct acp_chunk_info *_acp_get_cleaning_candidate(ocf_cache_t cache)
+{
+ int i;
+ struct acp_chunk_info *cur;
+ struct acp_context *acp = cache->cleaner.cleaning_policy_context;
+
+ ACP_LOCK_CHUNKS_RD();
+
+ /* go through all buckets in descending order, excluding bucket 0 which
+ * is supposed to contain all clean chunks */
+ for (i = ACP_MAX_BUCKETS - 1; i > 0; i--) {
+ list_for_each_entry(cur, &acp->bucket_info[i].chunk_list, list) {
+ if (_acp_can_clean_chunk(cache, cur)) {
+ ACP_UNLOCK_CHUNKS_RD();
+ return cur;
+ }
+ }
+ }
+
+ ACP_UNLOCK_CHUNKS_RD();
+ return NULL;
+}
+
+/* called after flush request completed */
+static void _acp_flush_end(void *priv, int error)
+{
+ struct acp_cleaning_policy_config *config;
+ struct acp_context *acp = priv;
+ struct acp_flush_context *flush = &acp->flush;
+ ocf_cache_t cache = acp->cache;
+ int i;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
+
+ for (i = 0; i < flush->size; i++) {
+ ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line);
+ ACP_DEBUG_END(acp, flush->data[i].cache_line);
+ }
+
+ if (error) {
+ flush->error = error;
+ _acp_handle_flush_error(cache, acp);
+ }
+
+ ACP_DEBUG_CHECK(acp);
+
+ acp->cmpl(&cache->cleaner, config->thread_wakeup_time);
+}
+
+/* flush data */
+static void _acp_flush(struct acp_context *acp)
+{
+ ocf_cache_t cache = acp->cache;
+ struct ocf_cleaner_attribs attribs = {
+ .cmpl_context = acp,
+ .cmpl_fn = _acp_flush_end,
+ .cache_line_lock = false,
+ .do_sort = false,
+ .io_queue = cache->cleaner.io_queue,
+ };
+
+ ocf_cleaner_do_flush_data_async(cache, acp->flush.data,
+ acp->flush.size, &attribs);
+}
+
+static bool _acp_prepare_flush_data(struct acp_context *acp,
+ uint32_t flush_max_buffers)
+{
+ ocf_cache_t cache = acp->cache;
+ struct acp_state *state = &acp->state;
+ struct acp_chunk_info *chunk = state->chunk;
+ size_t lines_per_chunk = ACP_CHUNK_SIZE / ocf_line_size(cache);
+ uint64_t first_core_line = chunk->chunk_id * lines_per_chunk;
+
+ OCF_DEBUG_PARAM(cache, "lines per chunk %llu chunk %llu "
+ "first_core_line %llu\n", (uint64_t)lines_per_chunk,
+ chunk->chunk_id, first_core_line);
+
+ acp->flush.size = 0;
+ acp->flush.chunk = chunk;
+ for (; state->iter < lines_per_chunk &&
+ acp->flush.size < flush_max_buffers; state->iter++) {
+ uint64_t core_line = first_core_line + state->iter;
+ ocf_cache_line_t cache_line;
+
+ cache_line = _acp_trylock_dirty(cache, chunk->core_id, core_line);
+ if (cache_line == cache->device->collision_table_entries)
+ continue;
+
+ ACP_DEBUG_BEGIN(acp, cache_line);
+
+ acp->flush.data[acp->flush.size].core_id = chunk->core_id;
+ acp->flush.data[acp->flush.size].core_line = core_line;
+ acp->flush.data[acp->flush.size].cache_line = cache_line;
+ acp->flush.size++;
+ }
+
+ if (state->iter == lines_per_chunk) {
+ /* reached end of chunk - reset state */
+ state->in_progress = false;
+ }
+
+ return (acp->flush.size > 0);
+}
+
+/* Clean at most 'flush_max_buffers' cache lines from current or newly
+ * selected chunk */
+void cleaning_policy_acp_perform_cleaning(ocf_cache_t cache,
+ ocf_cleaner_end_t cmpl)
+{
+ struct acp_cleaning_policy_config *config;
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ struct acp_state *state = &acp->state;
+
+ acp->cmpl = cmpl;
+
+ if (!state->in_progress) {
+ /* get next chunk to clean */
+ state->chunk = _acp_get_cleaning_candidate(cache);
+
+ if (!state->chunk) {
+ /* nothing co clean */
+ cmpl(&cache->cleaner, ACP_BACKOFF_TIME_MS);
+ return;
+ }
+
+ /* new cleaning cycle - reset state */
+ state->iter = 0;
+ state->in_progress = true;
+ }
+
+ ACP_DEBUG_INIT(acp);
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
+
+ if (_acp_prepare_flush_data(acp, config->flush_max_buffers))
+ _acp_flush(acp);
+ else
+ _acp_flush_end(acp, 0);
+}
+
+static void _acp_update_bucket(struct acp_context *acp,
+ struct acp_chunk_info *chunk)
+{
+ struct acp_bucket *bucket = &acp->bucket_info[chunk->bucket_id];
+
+ if (chunk->num_dirty > bucket->threshold) {
+ ENV_BUG_ON(chunk->bucket_id == ACP_MAX_BUCKETS - 1);
+
+ chunk->bucket_id++;
+ /* buckets are stored in array, move up one bucket.
+ * No overflow here. ENV_BUG_ON made sure of no incrementation on
+ * last bucket */
+ bucket++;
+
+ list_move_tail(&chunk->list, &bucket->chunk_list);
+ } else if (chunk->bucket_id &&
+ chunk->num_dirty <= (bucket - 1)->threshold) {
+ chunk->bucket_id--;
+ /* move down one bucket, we made sure we won't underflow */
+ bucket--;
+
+ list_move(&chunk->list, &bucket->chunk_list);
+ }
+}
+
+void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ struct cleaning_policy_meta policy_meta;
+ struct acp_cleaning_policy_meta *acp_meta;
+ struct acp_chunk_info *chunk;
+
+ ACP_LOCK_CHUNKS_WR();
+
+ acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
+ chunk = _acp_get_chunk(cache, cache_line);
+
+ if (!acp_meta->dirty) {
+ acp_meta->dirty = 1;
+ _acp_meta_set(cache, cache_line, &policy_meta);
+ chunk->num_dirty++;
+ }
+
+ _acp_update_bucket(acp, chunk);
+
+ ACP_UNLOCK_CHUNKS_WR();
+}
+
+void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ struct cleaning_policy_meta policy_meta;
+ struct acp_cleaning_policy_meta *acp_meta;
+ struct acp_chunk_info *chunk;
+
+ ACP_LOCK_CHUNKS_WR();
+
+ acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
+ chunk = _acp_get_chunk(cache, cache_line);
+
+ if (acp_meta->dirty) {
+ acp_meta->dirty = 0;
+ _acp_meta_set(cache, cache_line, &policy_meta);
+ chunk->num_dirty--;
+ }
+
+ _acp_update_bucket(acp, chunk);
+
+ ACP_UNLOCK_CHUNKS_WR();
+}
+
+int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
+ int core_id, uint64_t start_byte, uint64_t end_byte)
+{
+ return ocf_metadata_actor(cache, PARTITION_INVALID,
+ core_id, start_byte, end_byte,
+ cleaning_policy_acp_purge_block);
+}
+
+void cleaning_policy_acp_remove_core(ocf_cache_t cache,
+ ocf_core_id_t core_id)
+{
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ uint64_t i;
+
+ ENV_BUG_ON(acp->chunks_total < acp->num_chunks[core_id]);
+
+ if (acp->state.in_progress && acp->state.chunk->core_id == core_id) {
+ acp->state.in_progress = false;
+ acp->state.iter = 0;
+ acp->state.chunk = NULL;
+ }
+
+ ACP_LOCK_CHUNKS_WR();
+
+ for (i = 0; i < acp->num_chunks[core_id]; i++)
+ list_del(&acp->chunk_info[core_id][i].list);
+
+ acp->chunks_total -= acp->num_chunks[core_id];
+ acp->num_chunks[core_id] = 0;
+
+ env_vfree(acp->chunk_info[core_id]);
+ acp->chunk_info[core_id] = NULL;
+
+ ACP_UNLOCK_CHUNKS_WR();
+}
+
+int cleaning_policy_acp_add_core(ocf_cache_t cache,
+ ocf_core_id_t core_id)
+{
+ ocf_core_t core = ocf_cache_get_core(cache, core_id);
+ uint64_t core_size = core->conf_meta->length;
+ uint64_t num_chunks = OCF_DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
+ struct acp_context *acp = _acp_get_ctx_from_cache(cache);
+ int i;
+
+ OCF_DEBUG_PARAM(cache, "%s core_id %llu num_chunks %llu\n",
+ __func__, (uint64_t)core_id, (uint64_t) num_chunks);
+
+ ACP_LOCK_CHUNKS_WR();
+
+ ENV_BUG_ON(acp->chunk_info[core_id]);
+
+ acp->chunk_info[core_id] =
+ env_vzalloc(num_chunks * sizeof(acp->chunk_info[0][0]));
+
+ if (!acp->chunk_info[core_id]) {
+ ACP_UNLOCK_CHUNKS_WR();
+ OCF_DEBUG_PARAM(cache, "failed to allocate acp tables\n");
+ return -OCF_ERR_NO_MEM;
+ }
+
+ OCF_DEBUG_PARAM(cache, "successfully allocated acp tables\n");
+
+ /* increment counters */
+ acp->num_chunks[core_id] = num_chunks;
+ acp->chunks_total += num_chunks;
+
+ for (i = 0; i < acp->num_chunks[core_id]; i++) {
+ /* fill in chunk metadata and add to the clean bucket */
+ acp->chunk_info[core_id][i].core_id = core_id;
+ acp->chunk_info[core_id][i].chunk_id = i;
+ list_add(&acp->chunk_info[core_id][i].list,
+ &acp->bucket_info[0].chunk_list);
+ }
+
+ ACP_UNLOCK_CHUNKS_WR();
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/cleaning/acp.h b/src/spdk/ocf/src/cleaning/acp.h
new file mode 100644
index 000000000..1693b93a0
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/acp.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
+
+#define __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
+
+#include "cleaning.h"
+
+void cleaning_policy_acp_setup(ocf_cache_t cache);
+
+int cleaning_policy_acp_initialize(ocf_cache_t cache, int init_metadata);
+
+void cleaning_policy_acp_deinitialize(ocf_cache_t cache);
+
+void cleaning_policy_acp_perform_cleaning(ocf_cache_t cache,
+ ocf_cleaner_end_t cmpl);
+
+void cleaning_policy_acp_init_cache_block(ocf_cache_t cache,
+ uint32_t cache_line);
+
+void cleaning_policy_acp_set_hot_cache_line(ocf_cache_t cache,
+ uint32_t cache_line);
+
+void cleaning_policy_acp_purge_block(ocf_cache_t cache, uint32_t cache_line);
+
+int cleaning_policy_acp_purge_range(ocf_cache_t cache,
+ int core_id, uint64_t start_byte, uint64_t end_byte);
+
+int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t param_value);
+
+int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t *param_value);
+
+int cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id);
+
+void cleaning_policy_acp_remove_core(ocf_cache_t cache,
+ ocf_core_id_t core_id);
+
+#endif
+
diff --git a/src/spdk/ocf/src/cleaning/acp_structs.h b/src/spdk/ocf/src/cleaning/acp_structs.h
new file mode 100644
index 000000000..1bd940630
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/acp_structs.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __CLEANING_AGGRESSIVE_STRUCTS_H__
+#define __CLEANING_AGGRESSIVE_STRUCTS_H__
+
+#include "../utils/utils_cleaner.h"
+
+/* TODO: remove acp metadata */
+struct acp_cleaning_policy_meta {
+ uint8_t dirty : 1;
+};
+
+/* cleaning policy per partition metadata */
+struct acp_cleaning_policy_config {
+ uint32_t thread_wakeup_time; /* in milliseconds*/
+ uint32_t flush_max_buffers; /* in lines */
+};
+
+#endif
+
+
diff --git a/src/spdk/ocf/src/cleaning/alru.c b/src/spdk/ocf/src/cleaning/alru.c
new file mode 100644
index 000000000..91208bce9
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/alru.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cleaning.h"
+#include "alru.h"
+#include "../metadata/metadata.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_realloc.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+#include "../ocf_def_priv.h"
+#include "cleaning_priv.h"
+
+#define is_alru_head(x) (x == collision_table_entries)
+#define is_alru_tail(x) (x == collision_table_entries)
+
+#define OCF_CLEANING_DEBUG 0
+
+#if 1 == OCF_CLEANING_DEBUG
+
+#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
+
+#define OCF_DEBUG_LOG(cache, format, ...) \
+ ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
+ format"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
+
+#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
+ ##__VA_ARGS__)
+
+#else
+#define OCF_DEBUG_PREFIX
+#define OCF_DEBUG_LOG(cache, format, ...)
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+struct alru_flush_ctx {
+ struct ocf_cleaner_attribs attribs;
+ bool flush_perfomed;
+ uint32_t clines_no;
+ ocf_cache_t cache;
+ ocf_cleaner_end_t cmpl;
+ struct flush_data *flush_data;
+ size_t flush_data_limit;
+};
+
+struct alru_context {
+ struct alru_flush_ctx flush_ctx;
+ env_spinlock list_lock[OCF_IO_CLASS_MAX];
+};
+
+
+/* -- Start of ALRU functions -- */
+
+
+/* Sets the given collision_index as the new _head_ of the ALRU list. */
+static inline void update_alru_head(struct ocf_cache *cache,
+ int partition_id, unsigned int collision_index)
+{
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+
+ part->runtime->cleaning.policy.alru.lru_head = collision_index;
+}
+
+/* Sets the given collision_index as the new _tail_ of the ALRU list. */
+static inline void update_alru_tail(struct ocf_cache *cache,
+ int partition_id, unsigned int collision_index)
+{
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+
+ part->runtime->cleaning.policy.alru.lru_tail = collision_index;
+}
+
+/* Sets the given collision_index as the new _head_ and _tail_
+ * of the ALRU list.
+ */
+static inline void update_alru_head_tail(struct ocf_cache *cache,
+ int partition_id, unsigned int collision_index)
+{
+ update_alru_head(cache, partition_id, collision_index);
+ update_alru_tail(cache, partition_id, collision_index);
+}
+
+
+/* Adds the given collision_index to the _head_ of the ALRU list */
+static void add_alru_head(struct ocf_cache *cache, int partition_id,
+ unsigned int collision_index)
+{
+ unsigned int curr_head_index;
+ unsigned int collision_table_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+ struct cleaning_policy_meta policy;
+
+ ENV_BUG_ON(!(collision_index < collision_table_entries));
+
+ ENV_BUG_ON(env_atomic_read(
+ &part->runtime->cleaning.policy.alru.size) < 0);
+
+ ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
+ ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
+
+ /* First node to be added/ */
+ if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
+ update_alru_head_tail(cache, partition_id, collision_index);
+
+ ocf_metadata_get_cleaning_policy(cache, collision_index,
+ &policy);
+ policy.meta.alru.lru_next = collision_table_entries;
+ policy.meta.alru.lru_prev = collision_table_entries;
+ policy.meta.alru.timestamp = env_ticks_to_secs(
+ env_get_tick_count());
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+ } else {
+ /* Not the first node to be added. */
+
+ curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
+
+ ENV_BUG_ON(!(curr_head_index < collision_table_entries));
+
+ ocf_metadata_get_cleaning_policy(cache, collision_index,
+ &policy);
+ policy.meta.alru.lru_next = curr_head_index;
+ policy.meta.alru.lru_prev = collision_table_entries;
+ policy.meta.alru.timestamp = env_ticks_to_secs(
+ env_get_tick_count());
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+
+ ocf_metadata_get_cleaning_policy(cache, curr_head_index,
+ &policy);
+ policy.meta.alru.lru_prev = collision_index;
+ ocf_metadata_set_cleaning_policy(cache, curr_head_index,
+ &policy);
+
+ update_alru_head(cache, partition_id, collision_index);
+ }
+
+ env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
+}
+
+/* Deletes the node with the given collision_index from the ALRU list */
+static void remove_alru_list(struct ocf_cache *cache, int partition_id,
+ unsigned int collision_index)
+{
+ uint32_t prev_lru_node, next_lru_node;
+ uint32_t collision_table_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+ struct alru_cleaning_policy *cleaning_policy =
+ &part->runtime->cleaning.policy.alru;
+ struct cleaning_policy_meta policy;
+
+ ENV_BUG_ON(!(collision_index < collision_table_entries));
+
+ if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
+ ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
+ "from empty ALRU Cleaning Policy queue!\n");
+ ENV_BUG();
+ }
+
+ ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
+
+ /* Set prev and next (even if non existent) */
+ next_lru_node = policy.meta.alru.lru_next;
+ prev_lru_node = policy.meta.alru.lru_prev;
+
+ /* Check if entry is not part of the ALRU list */
+ if ((next_lru_node == collision_table_entries) &&
+ (prev_lru_node == collision_table_entries) &&
+ (cleaning_policy->lru_head != collision_index) &&
+ (cleaning_policy->lru_tail != collision_index)) {
+ return;
+ }
+
+ /* Case 0: If we are head AND tail, there is only one node. So unlink
+ * node and set that there is no node left in the list.
+ */
+ if (cleaning_policy->lru_head == collision_index &&
+ cleaning_policy->lru_tail == collision_index) {
+ policy.meta.alru.lru_next = collision_table_entries;
+ policy.meta.alru.lru_prev = collision_table_entries;
+
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+
+ update_alru_head_tail(cache, partition_id,
+ collision_table_entries);
+ }
+
+ /* Case 1: else if this collision_index is ALRU head, but not tail,
+ * update head and return
+ */
+ else if ((cleaning_policy->lru_tail != collision_index) &&
+ (cleaning_policy->lru_head == collision_index)) {
+ struct cleaning_policy_meta next_policy;
+
+ ENV_BUG_ON(!(next_lru_node < collision_table_entries));
+
+ ocf_metadata_get_cleaning_policy(cache, next_lru_node,
+ &next_policy);
+
+ update_alru_head(cache, partition_id, next_lru_node);
+
+ policy.meta.alru.lru_next = collision_table_entries;
+ next_policy.meta.alru.lru_prev = collision_table_entries;
+
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+ ocf_metadata_set_cleaning_policy(cache, next_lru_node,
+ &next_policy);
+ }
+
+ /* Case 2: else if this collision_index is ALRU tail, but not head,
+ * update tail and return
+ */
+ else if ((cleaning_policy->lru_head != collision_index) &&
+ (cleaning_policy->lru_tail == collision_index)) {
+ struct cleaning_policy_meta prev_policy;
+
+ ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
+
+ ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
+ &prev_policy);
+
+ update_alru_tail(cache, partition_id, prev_lru_node);
+
+ policy.meta.alru.lru_prev = collision_table_entries;
+ prev_policy.meta.alru.lru_next = collision_table_entries;
+
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+ ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
+ &prev_policy);
+ }
+
+ /* Case 3: else this collision_index is a middle node. There is no
+ * change to the head and the tail pointers.
+ */
+ else {
+ struct cleaning_policy_meta next_policy;
+ struct cleaning_policy_meta prev_policy;
+
+ ENV_BUG_ON(!(next_lru_node < collision_table_entries));
+ ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
+
+ ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
+ &prev_policy);
+ ocf_metadata_get_cleaning_policy(cache, next_lru_node,
+ &next_policy);
+
+ /* Update prev and next nodes */
+ prev_policy.meta.alru.lru_next = policy.meta.alru.lru_next;
+ next_policy.meta.alru.lru_prev = policy.meta.alru.lru_prev;
+
+ /* Update the given node */
+ policy.meta.alru.lru_next = collision_table_entries;
+ policy.meta.alru.lru_prev = collision_table_entries;
+
+ ocf_metadata_set_cleaning_policy(cache, collision_index,
+ &policy);
+ ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
+ &prev_policy);
+ ocf_metadata_set_cleaning_policy(cache, next_lru_node,
+ &next_policy);
+ }
+
+ env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
+}
+
+static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
+ unsigned int collision_index)
+{
+ uint32_t prev_lru_node, next_lru_node;
+ uint32_t collision_table_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+ struct alru_cleaning_policy *cleaning_policy =
+ &part->runtime->cleaning.policy.alru;
+ struct cleaning_policy_meta policy;
+
+ ENV_BUG_ON(!(collision_index < collision_table_entries));
+
+ ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
+
+ next_lru_node = policy.meta.alru.lru_next;
+ prev_lru_node = policy.meta.alru.lru_prev;
+
+ return cleaning_policy->lru_tail == collision_index ||
+ cleaning_policy->lru_head == collision_index ||
+ next_lru_node != collision_table_entries ||
+ prev_lru_node != collision_table_entries;
+}
+
+
+/* -- End of ALRU functions -- */
+
+void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct cleaning_policy_meta policy;
+
+ ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
+
+ policy.meta.alru.timestamp = 0;
+ policy.meta.alru.lru_prev = cache->device->collision_table_entries;
+ policy.meta.alru.lru_next = cache->device->collision_table_entries;
+
+ ocf_metadata_set_cleaning_policy(cache, cache_line, &policy);
+}
+
+void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct alru_context *alru = cache->cleaner.cleaning_policy_context;
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
+ cache_line);
+
+ env_spinlock_lock(&alru->list_lock[part_id]);
+ remove_alru_list(cache, part_id, cache_line);
+ env_spinlock_unlock(&alru->list_lock[part_id]);
+}
+
+static void __cleaning_policy_alru_purge_cache_block_any(
+ struct ocf_cache *cache, uint32_t cache_line)
+{
+ struct alru_context *alru = cache->cleaner.cleaning_policy_context;
+
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
+ cache_line);
+
+ env_spinlock_lock(&alru->list_lock[part_id]);
+
+ if (is_on_alru_list(cache, part_id, cache_line))
+ remove_alru_list(cache, part_id, cache_line);
+
+ env_spinlock_unlock(&alru->list_lock[part_id]);
+}
+
+int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
+ uint64_t start_byte, uint64_t end_byte) {
+ struct ocf_user_part *part;
+ ocf_part_id_t part_id;
+ int ret = 0;
+
+ for_each_part(cache, part, part_id) {
+ if (env_atomic_read(&part->runtime->cleaning.
+ policy.alru.size) == 0)
+ continue;
+
+ ret |= ocf_metadata_actor(cache, part_id,
+ core_id, start_byte, end_byte,
+ __cleaning_policy_alru_purge_cache_block_any);
+ }
+
+ return ret;
+}
+
+void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ struct alru_context *alru = cache->cleaner.cleaning_policy_context;
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
+ cache_line);
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ uint32_t prev_lru_node, next_lru_node;
+ uint32_t collision_table_entries = cache->device->collision_table_entries;
+ struct cleaning_policy_meta policy;
+
+ ENV_WARN_ON(!metadata_test_dirty(cache, cache_line));
+ ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line));
+
+ env_spinlock_lock(&alru->list_lock[part_id]);
+
+ ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
+ next_lru_node = policy.meta.alru.lru_next;
+ prev_lru_node = policy.meta.alru.lru_prev;
+
+ if ((next_lru_node != collision_table_entries) ||
+ (prev_lru_node != collision_table_entries) ||
+ ((part->runtime->cleaning.policy.
+ alru.lru_head == cache_line) &&
+ (part->runtime->cleaning.policy.
+ alru.lru_tail == cache_line)))
+ remove_alru_list(cache, part_id, cache_line);
+
+ add_alru_head(cache, part_id, cache_line);
+
+ env_spinlock_unlock(&alru->list_lock[part_id]);
+}
+
+static void _alru_rebuild(struct ocf_cache *cache)
+{
+ struct ocf_user_part *part;
+ ocf_part_id_t part_id;
+ ocf_core_id_t core_id;
+ ocf_cache_line_t cline;
+ uint32_t step = 0;
+
+ for_each_part(cache, part, part_id) {
+ /* ALRU initialization */
+ env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
+ part->runtime->cleaning.policy.alru.lru_head =
+ cache->device->collision_table_entries;
+ part->runtime->cleaning.policy.alru.lru_tail =
+ cache->device->collision_table_entries;
+ cache->device->runtime_meta->cleaning_thread_access = 0;
+ }
+
+ for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
+ ocf_metadata_get_core_and_part_id(cache, cline, &core_id,
+ NULL);
+
+ OCF_COND_RESCHED_DEFAULT(step);
+
+ if (core_id == OCF_CORE_MAX)
+ continue;
+
+ cleaning_policy_alru_init_cache_block(cache, cline);
+
+ if (!metadata_test_dirty(cache, cline))
+ continue;
+
+ cleaning_policy_alru_set_hot_cache_line(cache, cline);
+ }
+}
+
+static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
+ struct ocf_user_part *part, int init_metadata)
+{
+ if (init_metadata) {
+ /* ALRU initialization */
+ env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
+ part->runtime->cleaning.policy.alru.lru_head =
+ cache->device->collision_table_entries;
+ part->runtime->cleaning.policy.alru.lru_tail =
+ cache->device->collision_table_entries;
+ }
+
+ cache->device->runtime_meta->cleaning_thread_access = 0;
+
+ return 0;
+}
+
+void cleaning_policy_alru_setup(struct ocf_cache *cache)
+{
+ struct alru_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ config->thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP;
+ config->stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME;
+ config->flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS;
+ config->activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD;
+}
+
+int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
+{
+ struct ocf_user_part *part;
+ ocf_part_id_t part_id;
+ struct alru_context *alru;
+ int error = 0;
+ unsigned i;
+
+ alru = env_vzalloc(sizeof(*alru));
+ if (!alru) {
+ ocf_cache_log(cache, log_err, "alru context allocation error\n");
+ return -OCF_ERR_NO_MEM;
+ }
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ error = env_spinlock_init(&alru->list_lock[i]);
+ if (error)
+ break;
+ }
+
+ if (error) {
+ while (i--)
+ env_spinlock_destroy(&alru->list_lock[i]);
+ env_vfree(alru);
+ return error;
+ }
+
+
+ cache->cleaner.cleaning_policy_context = alru;
+
+ for_each_part(cache, part, part_id) {
+ cleaning_policy_alru_initialize_part(cache,
+ part, init_metadata);
+ }
+
+ if (init_metadata)
+ _alru_rebuild(cache);
+
+ ocf_kick_cleaner(cache);
+
+ return 0;
+}
+
+void cleaning_policy_alru_deinitialize(struct ocf_cache *cache)
+{
+ struct alru_context *alru = cache->cleaner.cleaning_policy_context;
+ unsigned i;
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++)
+ env_spinlock_destroy(&alru->list_lock[i]);
+
+ env_vfree(cache->cleaner.cleaning_policy_context);
+ cache->cleaner.cleaning_policy_context = NULL;
+}
+
+int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t param_value)
+{
+ struct alru_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ switch (param_id) {
+ case ocf_alru_wake_up_time:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ALRU_MIN_WAKE_UP,
+ OCF_ALRU_MAX_WAKE_UP,
+ "thread_wakeup_time");
+ config->thread_wakeup_time = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread "
+ "wake-up time: %d\n", config->thread_wakeup_time);
+ ocf_kick_cleaner(cache);
+ break;
+ case ocf_alru_stale_buffer_time:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ALRU_MIN_STALENESS_TIME,
+ OCF_ALRU_MAX_STALENESS_TIME,
+ "stale_buffer_time");
+ config->stale_buffer_time = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread "
+ "staleness time: %d\n", config->stale_buffer_time);
+ break;
+ case ocf_alru_flush_max_buffers:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ALRU_MIN_FLUSH_MAX_BUFFERS,
+ OCF_ALRU_MAX_FLUSH_MAX_BUFFERS,
+ "flush_max_buffers");
+ config->flush_max_buffers = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread max "
+ "buffers flushed per iteration: %d\n",
+ config->flush_max_buffers);
+ break;
+ case ocf_alru_activity_threshold:
+ OCF_CLEANING_CHECK_PARAM(cache, param_value,
+ OCF_ALRU_MIN_ACTIVITY_THRESHOLD,
+ OCF_ALRU_MAX_ACTIVITY_THRESHOLD,
+ "activity_threshold");
+ config->activity_threshold = param_value;
+ ocf_cache_log(cache, log_info, "Write-back flush thread "
+ "activity time threshold: %d\n",
+ config->activity_threshold);
+ break;
+ default:
+ return -OCF_ERR_INVAL;
+ }
+
+ return 0;
+}
+
+int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t *param_value)
+{
+ struct alru_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ switch (param_id) {
+ case ocf_alru_wake_up_time:
+ *param_value = config->thread_wakeup_time;
+ break;
+ case ocf_alru_stale_buffer_time:
+ *param_value = config->stale_buffer_time;
+ break;
+ case ocf_alru_flush_max_buffers:
+ *param_value = config->flush_max_buffers;
+ break;
+ case ocf_alru_activity_threshold:
+ *param_value = config->activity_threshold;
+ break;
+ default:
+ return -OCF_ERR_INVAL;
+ }
+
+ return 0;
+}
+
+static inline uint32_t compute_timestamp(
+ const struct alru_cleaning_policy_config *config)
+{
+ unsigned long time;
+
+ time = env_get_tick_count();
+ time -= env_secs_to_ticks(config->stale_buffer_time);
+ time = env_ticks_to_secs(time);
+
+ return (uint32_t) time;
+}
+
+static int check_for_io_activity(struct ocf_cache *cache,
+ struct alru_cleaning_policy_config *config)
+{
+ unsigned int now, last;
+
+ now = env_ticks_to_msecs(env_get_tick_count());
+ last = env_atomic_read(&cache->last_access_ms);
+
+ if ((now - last) < config->activity_threshold)
+ return 1;
+ return 0;
+}
+
+static bool clean_later(ocf_cache_t cache, uint32_t *delta)
+{
+ struct alru_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ *delta = env_ticks_to_secs(env_get_tick_count()) -
+ cache->device->runtime_meta->cleaning_thread_access;
+ if (*delta <= config->thread_wakeup_time)
+ return true;
+
+ return false;
+}
+
+static bool is_cleanup_possible(ocf_cache_t cache)
+{
+ struct alru_cleaning_policy_config *config;
+ uint32_t delta;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ if (check_for_io_activity(cache, config)) {
+ OCF_DEBUG_PARAM(cache, "IO activity detected");
+ return false;
+ }
+
+ if (clean_later(cache, &delta)) {
+ OCF_DEBUG_PARAM(cache,
+ "Cleaning policy configured to clean later "
+ "delta=%u wake_up=%u", delta,
+ config->thread_wakeup_time);
+ return false;
+ }
+
+ //Cleaning policy configured to not clean anything
+ if (config->flush_max_buffers == 0)
+ return false;
+
+ return true;
+}
+
+static void get_block_to_flush(struct flush_data* dst,
+ ocf_cache_line_t cache_line, struct ocf_cache* cache)
+{
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+
+ ocf_metadata_get_core_info(cache, cache_line,
+ &core_id, &core_line);
+
+ dst->cache_line = cache_line;
+ dst->core_id = core_id;
+ dst->core_line = core_line;
+}
+
+static bool more_blocks_to_flush(struct ocf_cache *cache,
+ ocf_cache_line_t cache_line, uint32_t last_access)
+{
+ struct cleaning_policy_meta policy;
+
+ if (cache_line >= cache->device->collision_table_entries)
+ return false;
+
+ ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
+
+ if (policy.meta.alru.timestamp >= last_access)
+ return false;
+
+ return true;
+}
+
+static bool block_is_busy(struct ocf_cache *cache,
+ ocf_cache_line_t cache_line)
+{
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+
+ ocf_metadata_get_core_info(cache, cache_line,
+ &core_id, &core_line);
+
+ if (!cache->core[core_id].opened)
+ return true;
+
+ if (ocf_cache_line_is_used(cache, cache_line))
+ return true;
+
+ return false;
+}
+
+static int get_data_to_flush(struct alru_context *alru)
+{
+ struct alru_flush_ctx *fctx = &alru->flush_ctx;
+ ocf_cache_t cache = fctx->cache;
+ struct alru_cleaning_policy_config *config;
+ struct cleaning_policy_meta policy;
+ ocf_cache_line_t cache_line;
+ struct ocf_user_part *part;
+ uint32_t last_access;
+ int to_flush = 0;
+ int part_id = OCF_IO_CLASS_ID_MAX;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ for_each_part(cache, part, part_id) {
+ env_spinlock_lock(&alru->list_lock[part_id]);
+
+ cache_line = part->runtime->cleaning.policy.alru.lru_tail;
+
+ last_access = compute_timestamp(config);
+
+ OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d",
+ last_access, policy.meta.alru.timestamp,
+ policy.meta.alru.timestamp < last_access);
+
+ while (more_blocks_to_flush(cache, cache_line, last_access)) {
+ if (to_flush >= fctx->clines_no) {
+ env_spinlock_unlock(&alru->list_lock[part_id]);
+ goto end;
+ }
+
+ if (!block_is_busy(cache, cache_line)) {
+ get_block_to_flush(&fctx->flush_data[to_flush], cache_line,
+ cache);
+ to_flush++;
+ }
+
+ ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
+ cache_line = policy.meta.alru.lru_prev;
+ }
+
+ env_spinlock_unlock(&alru->list_lock[part_id]);
+ }
+
+end:
+ OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush);
+
+ return to_flush;
+}
+
+static void alru_clean_complete(void *priv, int err)
+{
+ struct alru_cleaning_policy_config *config;
+ struct alru_flush_ctx *fctx = priv;
+ ocf_cache_t cache = fctx->cache;
+ int interval;
+
+ OCF_REALLOC_DEINIT(&fctx->flush_data, &fctx->flush_data_limit);
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ interval = fctx->flush_perfomed ? 0 : config->thread_wakeup_time * 1000;
+
+ fctx->cmpl(&fctx->cache->cleaner, interval);
+}
+
+static void alru_clean(struct alru_context *alru)
+{
+ struct alru_flush_ctx *fctx = &alru->flush_ctx;
+ ocf_cache_t cache = fctx->cache;
+ int to_clean;
+
+ if (!is_cleanup_possible(cache)) {
+ alru_clean_complete(fctx, 0);
+ return;
+ }
+
+ if (ocf_metadata_try_start_exclusive_access(&cache->metadata.lock)) {
+ alru_clean_complete(fctx, 0);
+ return;
+ }
+
+ OCF_REALLOC(&fctx->flush_data, sizeof(fctx->flush_data[0]),
+ fctx->clines_no, &fctx->flush_data_limit);
+ if (!fctx->flush_data) {
+ ocf_cache_log(cache, log_warn, "No memory to allocate flush "
+ "data for ALRU cleaning policy");
+ goto end;
+ }
+
+ to_clean = get_data_to_flush(alru);
+ if (to_clean > 0) {
+ fctx->flush_perfomed = true;
+ ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean,
+ &fctx->attribs);
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ return;
+ }
+
+ /* Update timestamp only if there are no items to be cleaned */
+ cache->device->runtime_meta->cleaning_thread_access =
+ env_ticks_to_secs(env_get_tick_count());
+
+end:
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ alru_clean_complete(fctx, 0);
+}
+
+void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl)
+{
+ struct alru_context *alru = cache->cleaner.cleaning_policy_context;
+ struct alru_flush_ctx *fctx = &alru->flush_ctx;
+ struct alru_cleaning_policy_config *config;
+
+ config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
+
+ OCF_REALLOC_INIT(&fctx->flush_data, &fctx->flush_data_limit);
+
+ fctx->attribs.cmpl_context = fctx;
+ fctx->attribs.cmpl_fn = alru_clean_complete;
+ fctx->attribs.cache_line_lock = true;
+ fctx->attribs.do_sort = true;
+ fctx->attribs.io_queue = cache->cleaner.io_queue;
+
+ fctx->clines_no = config->flush_max_buffers;
+ fctx->cache = cache;
+ fctx->cmpl = cmpl;
+ fctx->flush_perfomed = false;
+
+ alru_clean(alru);
+}
diff --git a/src/spdk/ocf/src/cleaning/alru.h b/src/spdk/ocf/src/cleaning/alru.h
new file mode 100644
index 000000000..ce0eb9eb4
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/alru.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __LAYER_CLEANING_POLICY_ALRU_H__
+
+#define __LAYER_CLEANING_POLICY_ALRU_H__
+
+#include "cleaning.h"
+#include "alru_structs.h"
+
+void cleaning_policy_alru_setup(ocf_cache_t cache);
+int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata);
+void cleaning_policy_alru_deinitialize(ocf_cache_t cache);
+void cleaning_policy_alru_init_cache_block(ocf_cache_t cache,
+ uint32_t cache_line);
+void cleaning_policy_alru_purge_cache_block(ocf_cache_t cache,
+ uint32_t cache_line);
+int cleaning_policy_alru_purge_range(ocf_cache_t cache, int core_id,
+ uint64_t start_byte, uint64_t end_byte);
+void cleaning_policy_alru_set_hot_cache_line(ocf_cache_t cache,
+ uint32_t cache_line);
+int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t param_value);
+int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache,
+ uint32_t param_id, uint32_t *param_value);
+void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl);
+
+#endif
+
diff --git a/src/spdk/ocf/src/cleaning/alru_structs.h b/src/spdk/ocf/src/cleaning/alru_structs.h
new file mode 100644
index 000000000..c4783fdcc
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/alru_structs.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __CLEANING_ALRU_STRUCTS_H__
+#define __CLEANING_ALRU_STRUCTS_H__
+
+#include "ocf/ocf.h"
+#include "ocf_env.h"
+
+struct alru_cleaning_policy_meta {
+ /* Lru pointers 2*4=8 bytes */
+ uint32_t timestamp;
+ uint32_t lru_prev;
+ uint32_t lru_next;
+} __attribute__((packed));
+
+struct alru_cleaning_policy_config {
+ uint32_t thread_wakeup_time; /* in seconds */
+ uint32_t stale_buffer_time; /* in seconds */
+ uint32_t flush_max_buffers; /* in lines */
+ uint32_t activity_threshold; /* in milliseconds */
+};
+
+struct alru_cleaning_policy {
+ env_atomic size;
+ uint32_t lru_head;
+ uint32_t lru_tail;
+};
+
+
+#endif
diff --git a/src/spdk/ocf/src/cleaning/cleaning.c b/src/spdk/ocf/src/cleaning/cleaning.c
new file mode 100644
index 000000000..ba79199d6
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/cleaning.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "cleaning.h"
+#include "alru.h"
+#include "nop.h"
+#include "acp.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "../mngt/ocf_mngt_common.h"
+#include "../metadata/metadata.h"
+#include "../ocf_queue_priv.h"
+
+struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
+ [ocf_cleaning_nop] = {
+ .name = "nop",
+ .perform_cleaning = cleaning_nop_perform_cleaning,
+ },
+ [ocf_cleaning_alru] = {
+ .setup = cleaning_policy_alru_setup,
+ .init_cache_block = cleaning_policy_alru_init_cache_block,
+ .purge_cache_block = cleaning_policy_alru_purge_cache_block,
+ .purge_range = cleaning_policy_alru_purge_range,
+ .set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line,
+ .initialize = cleaning_policy_alru_initialize,
+ .deinitialize = cleaning_policy_alru_deinitialize,
+ .set_cleaning_param = cleaning_policy_alru_set_cleaning_param,
+ .get_cleaning_param = cleaning_policy_alru_get_cleaning_param,
+ .perform_cleaning = cleaning_alru_perform_cleaning,
+ .name = "alru",
+ },
+ [ocf_cleaning_acp] = {
+ .setup = cleaning_policy_acp_setup,
+ .init_cache_block = cleaning_policy_acp_init_cache_block,
+ .purge_cache_block = cleaning_policy_acp_purge_block,
+ .purge_range = cleaning_policy_acp_purge_range,
+ .set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line,
+ .initialize = cleaning_policy_acp_initialize,
+ .deinitialize = cleaning_policy_acp_deinitialize,
+ .set_cleaning_param = cleaning_policy_acp_set_cleaning_param,
+ .get_cleaning_param = cleaning_policy_acp_get_cleaning_param,
+ .add_core = cleaning_policy_acp_add_core,
+ .remove_core = cleaning_policy_acp_remove_core,
+ .perform_cleaning = cleaning_policy_acp_perform_cleaning,
+ .name = "acp",
+ },
+};
+
+int ocf_start_cleaner(ocf_cache_t cache)
+{
+ return ctx_cleaner_init(cache->owner, &cache->cleaner);
+}
+
+void ocf_stop_cleaner(ocf_cache_t cache)
+{
+ ctx_cleaner_stop(cache->owner, &cache->cleaner);
+}
+
+void ocf_kick_cleaner(ocf_cache_t cache)
+{
+ ctx_cleaner_kick(cache->owner, &cache->cleaner);
+}
+
+void ocf_cleaner_set_cmpl(ocf_cleaner_t cleaner, ocf_cleaner_end_t fn)
+{
+ cleaner->end = fn;
+}
+
+void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv)
+{
+ OCF_CHECK_NULL(c);
+ c->priv = priv;
+}
+
+void *ocf_cleaner_get_priv(ocf_cleaner_t c)
+{
+ OCF_CHECK_NULL(c);
+ return c->priv;
+}
+
+ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
+{
+ OCF_CHECK_NULL(c);
+ return container_of(c, struct ocf_cache, cleaner);
+}
+
+static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
+ return 0;
+
+ for_each_core(cache, core, core_id) {
+ if (core->opened && ocf_mngt_core_is_dirty(core)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
+
+ ocf_mngt_cache_unlock(cache);
+ ocf_queue_put(cleaner->io_queue);
+ cleaner->end(cleaner, interval);
+}
+
+void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
+{
+ ocf_cache_t cache;
+ ocf_cleaning_t clean_type;
+
+ OCF_CHECK_NULL(cleaner);
+ OCF_CHECK_NULL(queue);
+
+ cache = ocf_cleaner_get_cache(cleaner);
+
+ /* Do not involve cleaning when cache is not running
+ * (error, etc.).
+ */
+ if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
+ ocf_mngt_cache_is_locked(cache)) {
+ cleaner->end(cleaner, SLEEP_TIME_MS);
+ return;
+ }
+
+ /* Sleep in case there is management operation in progress. */
+ if (ocf_mngt_cache_trylock(cache)) {
+ cleaner->end(cleaner, SLEEP_TIME_MS);
+ return;
+ }
+
+ if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
+ ocf_mngt_cache_unlock(cache);
+ cleaner->end(cleaner, SLEEP_TIME_MS);
+ return;
+ }
+
+ clean_type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(clean_type >= ocf_cleaning_max);
+
+ ocf_queue_get(queue);
+ cleaner->io_queue = queue;
+
+ cleaning_policy_ops[clean_type].perform_cleaning(cache,
+ ocf_cleaner_run_complete);
+}
diff --git a/src/spdk/ocf/src/cleaning/cleaning.h b/src/spdk/ocf/src/cleaning/cleaning.h
new file mode 100644
index 000000000..39d7b2414
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/cleaning.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __LAYER_CLEANING_POLICY_H__
+#define __LAYER_CLEANING_POLICY_H__
+
+#include "alru_structs.h"
+#include "nop_structs.h"
+#include "acp_structs.h"
+#include "ocf/ocf_cleaner.h"
+
+#define CLEANING_POLICY_CONFIG_BYTES 256
+#define CLEANING_POLICY_TYPE_MAX 4
+
+#define SLEEP_TIME_MS (1000)
+
+struct ocf_request;
+
+struct cleaning_policy_config {
+ uint8_t data[CLEANING_POLICY_CONFIG_BYTES];
+};
+
+struct cleaning_policy {
+ union {
+ struct nop_cleaning_policy nop;
+ struct alru_cleaning_policy alru;
+ } policy;
+};
+
+/* Cleaning policy metadata per cache line */
+struct cleaning_policy_meta {
+ union {
+ struct nop_cleaning_policy_meta nop;
+ struct alru_cleaning_policy_meta alru;
+ struct acp_cleaning_policy_meta acp;
+ } meta;
+};
+
+struct cleaning_policy_ops {
+ void (*setup)(ocf_cache_t cache);
+ int (*initialize)(ocf_cache_t cache, int init_metadata);
+ void (*deinitialize)(ocf_cache_t cache);
+ int (*add_core)(ocf_cache_t cache, ocf_core_id_t core_id);
+ void (*remove_core)(ocf_cache_t cache, ocf_core_id_t core_id);
+ void (*init_cache_block)(ocf_cache_t cache, uint32_t cache_line);
+ void (*purge_cache_block)(ocf_cache_t cache, uint32_t cache_line);
+ int (*purge_range)(ocf_cache_t cache, int core_id,
+ uint64_t start_byte, uint64_t end_byte);
+ void (*set_hot_cache_line)(ocf_cache_t cache, uint32_t cache_line);
+ int (*set_cleaning_param)(ocf_cache_t cache, uint32_t param_id,
+ uint32_t param_value);
+ int (*get_cleaning_param)(ocf_cache_t cache, uint32_t param_id,
+ uint32_t *param_value);
+ void (*perform_cleaning)(ocf_cache_t cache, ocf_cleaner_end_t cmpl);
+ const char *name;
+};
+
+extern struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max];
+
+struct ocf_cleaner {
+ void *cleaning_policy_context;
+ ocf_queue_t io_queue;
+ ocf_cleaner_end_t end;
+ void *priv;
+};
+
+int ocf_start_cleaner(ocf_cache_t cache);
+
+void ocf_kick_cleaner(ocf_cache_t cache);
+
+void ocf_stop_cleaner(ocf_cache_t cache);
+
+#endif
diff --git a/src/spdk/ocf/src/cleaning/cleaning_priv.h b/src/spdk/ocf/src/cleaning/cleaning_priv.h
new file mode 100644
index 000000000..028cbd1f1
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/cleaning_priv.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+static inline void cleaning_policy_param_error(ocf_cache_t cache,
+ const char *param_name, uint32_t min, uint32_t max)
+{
+ ocf_cache_log(cache, log_err, "Refusing setting flush "
+ "parameters because parameter %s is not within range "
+ "of <%d-%d>\n", param_name, min, max);
+}
+
+#define OCF_CLEANING_CHECK_PARAM(CACHE, VAL, MIN, MAX, NAME) ({ \
+ if (VAL < MIN || VAL > MAX) { \
+ cleaning_policy_param_error(CACHE, NAME, MIN, MAX); \
+ return -OCF_ERR_INVAL; \
+ } \
+})
diff --git a/src/spdk/ocf/src/cleaning/nop.c b/src/spdk/ocf/src/cleaning/nop.c
new file mode 100644
index 000000000..4d88733f0
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/nop.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright(c) 2012-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "nop.h"
+#include "../ocf_cache_priv.h"
+
+void cleaning_nop_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl)
+{
+ cmpl(&cache->cleaner, OCF_CLEANER_DISABLE);
+}
diff --git a/src/spdk/ocf/src/cleaning/nop.h b/src/spdk/ocf/src/cleaning/nop.h
new file mode 100644
index 000000000..d055acb24
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/nop.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright(c) 2012-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __LAYER_CLEANING_POLICY_NOP_H__
+#define __LAYER_CLEANING_POLICY_NOP_H__
+
+#include "cleaning.h"
+#include "nop_structs.h"
+
+void cleaning_nop_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl);
+
+#endif
diff --git a/src/spdk/ocf/src/cleaning/nop_structs.h b/src/spdk/ocf/src/cleaning/nop_structs.h
new file mode 100644
index 000000000..d12342fdf
--- /dev/null
+++ b/src/spdk/ocf/src/cleaning/nop_structs.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
+
+#define __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
+
+struct nop_cleaning_policy_meta {
+} __attribute__((packed));
+
+struct nop_cleaning_policy {
+};
+
+#endif
diff --git a/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.c b/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.c
new file mode 100644
index 000000000..5c3325448
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.c
@@ -0,0 +1,1159 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_concurrency.h"
+#include "../ocf_priv.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_realloc.h"
+
+#define OCF_CACHE_CONCURRENCY_DEBUG 0
+
+#if 1 == OCF_CACHE_CONCURRENCY_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Concurrency][Cache] %s\n", __func__)
+
+#define OCF_DEBUG_RQ(req, format, ...) \
+ ocf_cache_log(req->cache, log_info, "[Concurrency][Cache][%s] %s - " \
+ format"\n", OCF_READ == (req)->rw ? "RD" : "WR", \
+ __func__, ##__VA_ARGS__)
+
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_RQ(req, format, ...)
+#endif
+
+#define OCF_CACHE_LINE_ACCESS_WR INT_MAX
+#define OCF_CACHE_LINE_ACCESS_IDLE 0
+#define OCF_CACHE_LINE_ACCESS_ONE_RD 1
+
+#define _WAITERS_LIST_SIZE (16UL * MiB)
+#define _WAITERS_LIST_ENTRIES \
+ (_WAITERS_LIST_SIZE / sizeof(struct __waiters_list))
+
+#define _WAITERS_LIST_ITEM(cache_line) ((cache_line) % _WAITERS_LIST_ENTRIES)
+
+struct __waiter {
+ ocf_cache_line_t line;
+ void *ctx;
+ uint32_t ctx_id;
+ ocf_req_async_lock_cb cb;
+ struct list_head item;
+ int rw;
+};
+
+struct __waiters_list {
+ struct list_head head;
+ env_spinlock lock;
+};
+
+struct ocf_cache_line_concurrency {
+ env_rwsem lock;
+ env_atomic *access;
+ env_atomic waiting;
+ size_t access_limit;
+ env_allocator *allocator;
+ struct __waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
+
+};
+
+/*
+ *
+ */
+
+#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency"
+#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
+
+int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
+{
+ uint32_t i;
+ int error = 0;
+ struct ocf_cache_line_concurrency *c;
+ char name[ALLOCATOR_NAME_MAX];
+ ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
+ cache);
+
+ ENV_BUG_ON(cache->device->concurrency.cache_line);
+
+ OCF_DEBUG_TRACE(cache);
+
+ c = env_vmalloc(sizeof(*c));
+ if (!c) {
+ error = __LINE__;
+ goto ocf_cache_line_concurrency_init;
+ }
+
+ error = env_rwsem_init(&c->lock);
+ if (error) {
+ env_vfree(c);
+ error = __LINE__;
+ goto ocf_cache_line_concurrency_init;
+ }
+
+ cache->device->concurrency.cache_line = c;
+
+ OCF_REALLOC_INIT(&c->access, &c->access_limit);
+ OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), line_entries,
+ &c->access_limit);
+
+ if (!c->access) {
+ error = __LINE__;
+ goto ocf_cache_line_concurrency_init;
+ }
+
+ if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
+ ocf_cache_get_name(cache)) < 0) {
+ error = __LINE__;
+ goto ocf_cache_line_concurrency_init;
+ }
+
+ c->allocator = env_allocator_create(sizeof(struct __waiter), name);
+ if (!c->allocator) {
+ error = __LINE__;
+ goto ocf_cache_line_concurrency_init;
+ }
+
+ /* Init concurrency control table */
+ for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) {
+ INIT_LIST_HEAD(&c->waiters_lsts[i].head);
+ error = env_spinlock_init(&c->waiters_lsts[i].lock);
+ if (error)
+ goto spinlock_err;
+ }
+
+ return 0;
+
+spinlock_err:
+ while (i--)
+ env_spinlock_destroy(&c->waiters_lsts[i].lock);
+ocf_cache_line_concurrency_init:
+ ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
+ "ERROR %d", error);
+
+ if (cache->device->concurrency.cache_line)
+ ocf_cache_line_concurrency_deinit(cache);
+
+ return -1;
+}
+
+/*
+ *
+ */
+void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
+{
+ int i;
+ struct ocf_cache_line_concurrency *concurrency;
+
+ if (!cache->device->concurrency.cache_line)
+ return;
+
+ OCF_DEBUG_TRACE(cache);
+
+ concurrency = cache->device->concurrency.cache_line;
+
+ env_rwsem_destroy(&concurrency->lock);
+
+ for (i = 0; i < _WAITERS_LIST_ENTRIES; i++)
+ env_spinlock_destroy(&concurrency->waiters_lsts[i].lock);
+
+ if (concurrency->access)
+ OCF_REALLOC_DEINIT(&concurrency->access,
+ &concurrency->access_limit);
+
+ if (concurrency->allocator)
+ env_allocator_destroy(concurrency->allocator);
+
+ env_vfree(concurrency);
+ cache->device->concurrency.cache_line = NULL;
+}
+
+size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache)
+{
+ size_t size;
+
+ size = sizeof(env_atomic);
+ size *= cache->device->collision_table_entries;
+
+ size += sizeof(struct ocf_cache_line_concurrency);
+
+ return size;
+}
+
+/*
+ *
+ */
+static inline bool __are_waiters(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ bool are = false;
+ struct list_head *iter;
+ uint32_t idx = _WAITERS_LIST_ITEM(line);
+ struct __waiters_list *lst = &c->waiters_lsts[idx];
+ struct __waiter *waiter;
+
+ /* If list empty that means there are no waiters on cache line */
+ if (list_empty(&lst->head))
+ return false;
+
+ list_for_each(iter, &lst->head) {
+ waiter = list_entry(iter, struct __waiter, item);
+
+ if (waiter->line == line) {
+ are = true;
+ break;
+ }
+ }
+
+ return are;
+}
+
+/*
+ *
+ */
+static inline void __add_waiter(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line, struct __waiter *waiter)
+{
+ uint32_t idx = _WAITERS_LIST_ITEM(line);
+ struct __waiters_list *lst = &c->waiters_lsts[idx];
+
+ list_add_tail(&waiter->item, &lst->head);
+}
+
+
+#define __lock_waiters_list(cncrrncy, line, flags) \
+ do { \
+ uint32_t idx = _WAITERS_LIST_ITEM(line); \
+ struct __waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
+ env_spinlock_lock_irqsave(&lst->lock, flags); \
+ } while (0)
+
+#define __unlock_waiters_list(cncrrncy, line, flags) \
+ do { \
+ uint32_t idx = _WAITERS_LIST_ITEM(line); \
+ struct __waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
+ env_spinlock_unlock_irqrestore(&lst->lock, flags); \
+ } while (0)
+
+
+/*
+ *
+ */
+static inline bool __try_lock_wr(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+ int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
+ OCF_CACHE_LINE_ACCESS_WR);
+
+ if (prev == OCF_CACHE_LINE_ACCESS_IDLE)
+ return true;
+ else
+ return false;
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_rd_idle(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+ int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
+ OCF_CACHE_LINE_ACCESS_ONE_RD);
+
+ return (prev == OCF_CACHE_LINE_ACCESS_IDLE);
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_rd(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ return !!env_atomic_add_unless(access, 1, OCF_CACHE_LINE_ACCESS_WR);
+}
+
+/*
+ *
+ */
+static inline void __unlock_wr(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
+ env_atomic_set(access, OCF_CACHE_LINE_ACCESS_IDLE);
+}
+
+/*
+ *
+ */
+static inline void __unlock_rd(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ ENV_BUG_ON(env_atomic_read(access) == 0);
+ ENV_BUG_ON(env_atomic_read(access) == OCF_CACHE_LINE_ACCESS_WR);
+ env_atomic_dec(access);
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_wr2wr(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
+ return true;
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_wr2rd(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
+ env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD);
+ return true;
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_rd2wr(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ int v = env_atomic_read(access);
+
+ ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE);
+ ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
+
+ v = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_ONE_RD,
+ OCF_CACHE_LINE_ACCESS_WR);
+
+ return (v == OCF_CACHE_LINE_ACCESS_ONE_RD);
+}
+
+/*
+ *
+ */
+static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c,
+ ocf_cache_line_t line)
+{
+ env_atomic *access = &c->access[line];
+
+ int v = env_atomic_read(access);
+
+ ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE);
+ ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
+
+ return true;
+}
+
+/*
+ *
+ */
+static void _req_on_lock(void *ctx, ocf_req_async_lock_cb cb,
+ uint32_t ctx_id, ocf_cache_line_t line, int rw)
+{
+ struct ocf_request *req = ctx;
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
+ cache_line;
+
+ if (rw == OCF_READ)
+ req->map[ctx_id].rd_locked = true;
+ else if (rw == OCF_WRITE)
+ req->map[ctx_id].wr_locked = true;
+ else
+ ENV_BUG();
+
+ if (env_atomic_dec_return(&req->lock_remaining) == 0) {
+ /* All cache line locked, resume request */
+ OCF_DEBUG_RQ(req, "Resume");
+ ENV_BUG_ON(!cb);
+ env_atomic_dec(&c->waiting);
+ cb(req);
+ }
+}
+
+/*
+ *
+ */
+static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
+ void *ctx, uint32_t ctx_id)
+{
+ struct __waiter *waiter;
+ bool locked = false;
+ bool waiting = false;
+ unsigned long flags = 0;
+
+ if (__try_lock_wr(c, line)) {
+ /* No activity before look get */
+ if (cb)
+ _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
+ return true;
+ }
+
+ waiter = NULL;
+ if (cb) {
+ /* Need to create waiter */
+ waiter = env_allocator_new(c->allocator);
+ if (!waiter)
+ return false;
+ }
+
+ __lock_waiters_list(c, line, flags);
+
+ /* At the moment list is protected, double check if the cache line is
+ * unlocked
+ */
+ if (__try_lock_wr(c, line)) {
+ /* Look get */
+ locked = true;
+ } else if (cb) {
+ /* Setup waiters filed */
+ waiter->line = line;
+ waiter->ctx = ctx;
+ waiter->ctx_id = ctx_id;
+ waiter->cb = cb;
+ waiter->rw = OCF_WRITE;
+ INIT_LIST_HEAD(&waiter->item);
+
+ /* Add to waiters list */
+ __add_waiter(c, line, waiter);
+ waiting = true;
+ }
+
+ __unlock_waiters_list(c, line, flags);
+
+ if (locked && cb)
+ _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
+ if (!waiting && waiter)
+ env_allocator_del(c->allocator, waiter);
+
+ return locked || waiting;
+}
+
+/*
+ * Attempt to lock cache line for read.
+ * In case cache line is locked, attempt to add caller on wait list.
+ */
+static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
+ void *ctx, uint32_t ctx_id)
+{
+ struct __waiter *waiter;
+ bool locked = false;
+ bool waiting = false;
+ unsigned long flags = 0;
+
+ if (__try_lock_rd_idle(c, line)) {
+ /* No activity before look get, it is first reader */
+ if (cb)
+ _req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
+ return true;
+ }
+
+ waiter = NULL;
+
+repeat:
+ /* Lock waiters list */
+ __lock_waiters_list(c, line, flags);
+
+ if (!__are_waiters(c, line)) {
+ /* No waiters at the moment */
+
+ /* Check if read lock can be obtained */
+ if (__try_lock_rd(c, line)) {
+ /* Cache line locked */
+ locked = true;
+ goto unlock;
+ }
+ }
+
+ if (!cb)
+ goto unlock;
+
+ if (!waiter) {
+ /* Need to create waiters and add it into list */
+ __unlock_waiters_list(c, line, flags);
+ waiter = env_allocator_new(c->allocator);
+ if (!waiter)
+ goto end;
+ goto repeat;
+ }
+
+ /* Setup waiters field */
+ waiter->line = line;
+ waiter->ctx = ctx;
+ waiter->ctx_id = ctx_id;
+ waiter->cb = cb;
+ waiter->rw = OCF_READ;
+ INIT_LIST_HEAD(&waiter->item);
+
+ /* Add to waiters list */
+ __add_waiter(c, line, waiter);
+ waiting = true;
+
+unlock:
+ __unlock_waiters_list(c, line, flags);
+
+end:
+ if (locked && cb)
+ _req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
+ if (!waiting && waiter)
+ env_allocator_del(c->allocator, waiter);
+
+ return locked || waiting;
+}
+
+static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line)
+{
+ bool locked = false;
+ bool exchanged = true;
+ uint32_t i = 0;
+
+ uint32_t idx = _WAITERS_LIST_ITEM(line);
+ struct __waiters_list *lst = &c->waiters_lsts[idx];
+ struct __waiter *waiter;
+
+ struct list_head *iter, *next;
+
+ /*
+ * Lock exchange scenario
+ * 1. RD -> IDLE
+ * 2. RD -> RD
+ * 3. RD -> WR
+ */
+
+ /* Check is requested page is on the list */
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct __waiter, item);
+
+ if (line != waiter->line)
+ continue;
+
+ if (exchanged) {
+ if (waiter->rw == OCF_WRITE)
+ locked = __try_lock_rd2wr(c, line);
+ else if (waiter->rw == OCF_READ)
+ locked = __try_lock_rd2rd(c, line);
+ else
+ ENV_BUG();
+ } else {
+ if (waiter->rw == OCF_WRITE)
+ locked = __try_lock_wr(c, line);
+ else if (waiter->rw == OCF_READ)
+ locked = __try_lock_rd(c, line);
+ else
+ ENV_BUG();
+ }
+
+ i++;
+
+ if (locked) {
+ exchanged = false;
+ list_del(iter);
+
+ _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id,
+ line, waiter->rw);
+
+ env_allocator_del(c->allocator, waiter);
+ } else {
+ break;
+ }
+ }
+
+ if (exchanged) {
+ /* No exchange, no waiters on the list, unlock and return
+ * WR -> IDLE
+ */
+ __unlock_rd(c, line);
+ }
+}
+
+/*
+ *
+ */
+static inline void __unlock_cache_line_rd(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line)
+{
+ unsigned long flags = 0;
+
+ /* Lock waiters list */
+ __lock_waiters_list(c, line, flags);
+ __unlock_cache_line_rd_common(c, line);
+ __unlock_waiters_list(c, line, flags);
+}
+
+
+static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line)
+{
+ uint32_t i = 0;
+ bool locked = false;
+ bool exchanged = true;
+
+ uint32_t idx = _WAITERS_LIST_ITEM(line);
+ struct __waiters_list *lst = &c->waiters_lsts[idx];
+ struct __waiter *waiter;
+
+ struct list_head *iter, *next;
+
+ /*
+ * Lock exchange scenario
+ * 1. WR -> IDLE
+ * 2. WR -> RD
+ * 3. WR -> WR
+ */
+
+ /* Check is requested page is on the list */
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct __waiter, item);
+
+ if (line != waiter->line)
+ continue;
+
+ if (exchanged) {
+ if (waiter->rw == OCF_WRITE)
+ locked = __try_lock_wr2wr(c, line);
+ else if (waiter->rw == OCF_READ)
+ locked = __try_lock_wr2rd(c, line);
+ else
+ ENV_BUG();
+ } else {
+ if (waiter->rw == OCF_WRITE)
+ locked = __try_lock_wr(c, line);
+ else if (waiter->rw == OCF_READ)
+ locked = __try_lock_rd(c, line);
+ else
+ ENV_BUG();
+ }
+
+ i++;
+
+ if (locked) {
+ exchanged = false;
+ list_del(iter);
+
+ _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, line,
+ waiter->rw);
+
+ env_allocator_del(c->allocator, waiter);
+ } else {
+ break;
+ }
+ }
+
+ if (exchanged) {
+ /* No exchange, no waiters on the list, unlock and return
+ * WR -> IDLE
+ */
+ __unlock_wr(c, line);
+ }
+}
+
+/*
+ *
+ */
+static inline void __unlock_cache_line_wr(struct ocf_cache_line_concurrency *c,
+ const ocf_cache_line_t line)
+{
+ unsigned long flags = 0;
+
+ /* Lock waiters list */
+ __lock_waiters_list(c, line, flags);
+ __unlock_cache_line_wr_common(c, line);
+ __unlock_waiters_list(c, line, flags);
+}
+
+/*
+ * Safely remove cache line lock waiter from waiting list.
+ * Request can be assigned with lock asynchronously at any point of time,
+ * so need to check lock state under a common lock.
+ */
+static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurrency *c,
+ struct ocf_request *req, int i, void *ctx, int rw)
+{
+ ocf_cache_line_t line = req->map[i].coll_idx;
+ uint32_t idx = _WAITERS_LIST_ITEM(line);
+ struct __waiters_list *lst = &c->waiters_lsts[idx];
+ struct list_head *iter, *next;
+ struct __waiter *waiter;
+ unsigned long flags = 0;
+
+ __lock_waiters_list(c, line, flags);
+
+ if (rw == OCF_READ && req->map[i].rd_locked) {
+ __unlock_cache_line_rd_common(c, line);
+ req->map[i].rd_locked = false;
+ } else if (rw == OCF_WRITE && req->map[i].wr_locked) {
+ __unlock_cache_line_wr_common(c, line);
+ req->map[i].wr_locked = false;
+ } else {
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct __waiter, item);
+ if (waiter->ctx == ctx) {
+ list_del(iter);
+ env_allocator_del(c->allocator, waiter);
+ }
+ }
+ }
+
+ __unlock_waiters_list(c, line, flags);
+}
+
+/* Try to read-lock request without adding waiters. Function should be called
+ * under read lock, multiple threads may attempt to acquire the lock
+ * concurrently. */
+static int _ocf_req_trylock_rd(struct ocf_request *req)
+{
+ int32_t i;
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
+ cache_line;
+ ocf_cache_line_t line;
+ int ret = OCF_LOCK_ACQUIRED;
+
+ OCF_DEBUG_RQ(req, "Lock");
+
+ ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
+
+ for (i = 0; i < req->core_line_count; i++) {
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+ ENV_BUG_ON(req->map[i].rd_locked);
+ ENV_BUG_ON(req->map[i].wr_locked);
+
+ if (__lock_cache_line_rd(c, line, NULL, NULL, 0)) {
+ /* cache line locked */
+ req->map[i].rd_locked = true;
+ } else {
+ /* Not possible to lock all cachelines */
+ ret = OCF_LOCK_NOT_ACQUIRED;
+ OCF_DEBUG_RQ(req, "NO Lock, cache line = %u", line);
+ break;
+ }
+ }
+
+ /* Check if request is locked */
+ if (ret == OCF_LOCK_NOT_ACQUIRED) {
+ /* Request is not locked, discard acquired locks */
+ for (; i >= 0; i--) {
+ line = req->map[i].coll_idx;
+
+ if (req->map[i].rd_locked) {
+ __unlock_rd(c, line);
+ req->map[i].rd_locked = false;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Read-lock request cache lines. Must be called under cacheline concurrency
+ * write lock.
+ */
+static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
+{
+ int32_t i;
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
+ cache_line;
+ ocf_cache_line_t line;
+ int ret = OCF_LOCK_NOT_ACQUIRED;
+
+ ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
+
+ env_atomic_inc(&c->waiting);
+ env_atomic_set(&req->lock_remaining, req->core_line_count);
+ env_atomic_inc(&req->lock_remaining);
+
+ for (i = 0; i < req->core_line_count; i++) {
+
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ env_atomic_dec(&req->lock_remaining);
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+ ENV_BUG_ON(req->map[i].rd_locked);
+ ENV_BUG_ON(req->map[i].wr_locked);
+
+ if (!__lock_cache_line_rd(c, line, cb, req, i)) {
+ /* lock not acquired and not added to wait list */
+ ret = -OCF_ERR_NO_MEM;
+ goto err;
+ }
+ }
+
+ if (env_atomic_dec_return(&req->lock_remaining) == 0) {
+ ret = OCF_LOCK_ACQUIRED;
+ env_atomic_dec(&c->waiting);
+ }
+
+ return ret;
+
+err:
+ for (; i >= 0; i--) {
+ __remove_line_from_waiters_list(c, req, i, req,
+ OCF_READ);
+ }
+ env_atomic_set(&req->lock_remaining, 0);
+ env_atomic_dec(&c->waiting);
+
+ return ret;
+
+}
+
+int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
+{
+ struct ocf_cache_line_concurrency *c =
+ req->cache->device->concurrency.cache_line;
+ int lock;
+
+ env_rwsem_down_read(&c->lock);
+ lock = _ocf_req_trylock_rd(req);
+ env_rwsem_up_read(&c->lock);
+
+ if (lock != OCF_LOCK_ACQUIRED) {
+ env_rwsem_down_write(&c->lock);
+ lock = _ocf_req_lock_rd(req, cb);
+ env_rwsem_up_write(&c->lock);
+ }
+
+ return lock;
+}
+
+/* Try to write-lock request without adding waiters. Function should be called
+ * under read lock, multiple threads may attempt to acquire the lock
+ * concurrently. */
+static int _ocf_req_trylock_wr(struct ocf_request *req)
+{
+ int32_t i;
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
+ cache_line;
+ ocf_cache_line_t line;
+ int ret = OCF_LOCK_ACQUIRED;
+
+ ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
+
+ for (i = 0; i < req->core_line_count; i++) {
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+ ENV_BUG_ON(req->map[i].rd_locked);
+ ENV_BUG_ON(req->map[i].wr_locked);
+
+ if (__lock_cache_line_wr(c, line, NULL, NULL, 0)) {
+ /* cache line locked */
+ req->map[i].wr_locked = true;
+ } else {
+ /* Not possible to lock all cachelines */
+ ret = OCF_LOCK_NOT_ACQUIRED;
+ OCF_DEBUG_RQ(req, "NO Lock, cache line = %u", line);
+ break;
+ }
+ }
+
+ /* Check if request is locked */
+ if (ret == OCF_LOCK_NOT_ACQUIRED) {
+ /* Request is not locked, discard acquired locks */
+ for (; i >= 0; i--) {
+ line = req->map[i].coll_idx;
+
+ if (req->map[i].wr_locked) {
+ __unlock_wr(c, line);
+ req->map[i].wr_locked = false;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Write-lock request cache lines. Must be called under cacheline concurrency
+ * write lock.
+ */
+static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
+{
+ int32_t i;
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
+ cache_line;
+ ocf_cache_line_t line;
+ int ret = OCF_LOCK_NOT_ACQUIRED;
+
+ ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
+ ENV_BUG_ON(!cb);
+
+ env_atomic_inc(&c->waiting);
+ env_atomic_set(&req->lock_remaining, req->core_line_count);
+ env_atomic_inc(&req->lock_remaining);
+
+ for (i = 0; i < req->core_line_count; i++) {
+
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ env_atomic_dec(&req->lock_remaining);
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+ ENV_BUG_ON(req->map[i].rd_locked);
+ ENV_BUG_ON(req->map[i].wr_locked);
+
+ if (!__lock_cache_line_wr(c, line, cb, req, i)) {
+ /* lock not acquired and not added to wait list */
+ ret = -OCF_ERR_NO_MEM;
+ goto err;
+ }
+ }
+
+ if (env_atomic_dec_return(&req->lock_remaining) == 0) {
+ ret = OCF_LOCK_ACQUIRED;
+ env_atomic_dec(&c->waiting);
+ }
+
+ return ret;
+
+err:
+ for (; i >= 0; i--) {
+ __remove_line_from_waiters_list(c, req, i, req,
+ OCF_WRITE);
+ }
+ env_atomic_set(&req->lock_remaining, 0);
+ env_atomic_dec(&c->waiting);
+
+ return ret;
+}
+
+int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
+{
+ struct ocf_cache_line_concurrency *c =
+ req->cache->device->concurrency.cache_line;
+ int lock;
+
+ env_rwsem_down_read(&c->lock);
+ lock = _ocf_req_trylock_wr(req);
+ env_rwsem_up_read(&c->lock);
+
+ if (lock != OCF_LOCK_ACQUIRED) {
+ env_rwsem_down_write(&c->lock);
+ lock = _ocf_req_lock_wr(req, cb);
+ env_rwsem_up_write(&c->lock);
+ }
+
+ return lock;
+}
+
+
+/*
+ *
+ */
+void ocf_req_unlock_rd(struct ocf_request *req)
+{
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
+ int32_t i;
+ ocf_cache_line_t line;
+
+ OCF_DEBUG_RQ(req, "Unlock");
+
+ for (i = 0; i < req->core_line_count; i++) {
+
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+
+ ENV_BUG_ON(!req->map[i].rd_locked);
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+
+ __unlock_cache_line_rd(c, line);
+ req->map[i].rd_locked = false;
+ }
+}
+
+/*
+ *
+ */
+void ocf_req_unlock_wr(struct ocf_request *req)
+{
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
+ int32_t i;
+ ocf_cache_line_t line;
+
+ OCF_DEBUG_RQ(req, "Unlock");
+
+ for (i = 0; i < req->core_line_count; i++) {
+
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+
+ ENV_BUG_ON(!req->map[i].wr_locked);
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+
+ __unlock_cache_line_wr(c, line);
+ req->map[i].wr_locked = false;
+ }
+}
+
+/*
+ *
+ */
+void ocf_req_unlock(struct ocf_request *req)
+{
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
+ int32_t i;
+ ocf_cache_line_t line;
+
+ OCF_DEBUG_RQ(req, "Unlock");
+
+ for (i = 0; i < req->core_line_count; i++) {
+
+ if (req->map[i].status == LOOKUP_MISS) {
+ /* MISS nothing to lock */
+ continue;
+ }
+
+ line = req->map[i].coll_idx;
+ ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
+
+ if (req->map[i].rd_locked && req->map[i].wr_locked) {
+ ENV_BUG();
+ } else if (req->map[i].rd_locked) {
+ __unlock_cache_line_rd(c, line);
+ req->map[i].rd_locked = false;
+ } else if (req->map[i].wr_locked) {
+ __unlock_cache_line_wr(c, line);
+ req->map[i].wr_locked = false;
+ } else {
+ ENV_BUG();
+ }
+ }
+}
+
+/*
+ *
+ */
+void ocf_req_unlock_entry(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t entry)
+{
+ struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
+
+ ENV_BUG_ON(req->map[entry].status == LOOKUP_MISS);
+
+ if (req->map[entry].rd_locked && req->map[entry].wr_locked) {
+ ENV_BUG();
+ } else if (req->map[entry].rd_locked) {
+ __unlock_cache_line_rd(c, req->map[entry].coll_idx);
+ req->map[entry].rd_locked = false;
+ } else if (req->map[entry].wr_locked) {
+ __unlock_cache_line_wr(c, req->map[entry].coll_idx);
+ req->map[entry].wr_locked = false;
+ } else {
+ ENV_BUG();
+ }
+}
+
+/*
+ *
+ */
+bool ocf_cache_line_is_used(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
+
+ ENV_BUG_ON(line >= cache->device->collision_table_entries);
+
+ if (env_atomic_read(&(c->access[line])))
+ return true;
+
+ if (ocf_cache_line_are_waiters(cache, line))
+ return true;
+ else
+ return false;
+}
+
+/*
+ *
+ */
+bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
+ bool are;
+ unsigned long flags = 0;
+
+ ENV_BUG_ON(line >= cache->device->collision_table_entries);
+
+ /* Lock waiters list */
+ __lock_waiters_list(c, line, flags);
+
+ are = __are_waiters(c, line);
+
+ __unlock_waiters_list(c, line, flags);
+
+ return are;
+}
+
+/*
+ *
+ */
+uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache)
+{
+ struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
+
+ return env_atomic_read(&c->waiting);
+}
+
+bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
+ return __lock_cache_line_rd(c, line, NULL, NULL, 0);
+}
+
+/*
+ *
+ */
+void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
+
+ OCF_DEBUG_RQ(cache, "Cache line = %u", line);
+
+ __unlock_cache_line_rd(c, line);
+}
+
diff --git a/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.h b/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.h
new file mode 100644
index 000000000..fc71d1c4f
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_cache_line_concurrency.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef OCF_CACHE_CONCURRENCY_H_
+#define OCF_CACHE_CONCURRENCY_H_
+
+/**
+ * @file utils_req.h
+ * @brief OCF cache concurrency module
+ */
+
+/**
+ * @brief OCF cache concurrency module handle
+ */
+struct ocf_cache_line_concurrency;
+
+/**
+ * @brief Initialize OCF cache concurrency module
+ *
+ * @param cache - OCF cache instance
+ * @return 0 - Initialization successful, otherwise ERROR
+ */
+int ocf_cache_line_concurrency_init(struct ocf_cache *cache);
+
+/**
+ * @biref De-Initialize OCF cache concurrency module
+ *
+ * @param cache - OCF cache instance
+ */
+void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache);
+
+/**
+ * @brief Get number of waiting (suspended) OCF requests in due to cache
+ * overlapping
+ *
+ * @param cache - OCF cache instance
+ *
+ * @return Number of suspended OCF requests
+ */
+uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
+
+/**
+ * @brief Return memory footprint conusmed by cache concurrency module
+ *
+ * @param cache - OCF cache instance
+ *
+ * @return Memory footprint of cache concurrency module
+ */
+size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache);
+
+/* async request cacheline lock acquisition callback */
+typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
+
+/**
+ * @brief Lock OCF request for write access (Lock all cache lines in map info)
+ *
+ * @param req - OCF request
+ * @param cb - async lock acquisition callback
+ *
+ * @returns lock acquisition status or negative error code in case of internal
+ * error
+ * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
+ * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
+ * added into waiting list. When lock will be acquired @cb cllback be called
+ */
+int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
+
+/**
+ * @brief Lock OCF request for read access (Lock all cache lines in map info)
+ *
+ * @param req - OCF request
+ * @param cb - async lock acquisition callback
+ *
+ * @returns lock acquisition status or negative error code in case of internal
+ * error
+ * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
+ * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
+ * added into waiting list. When lock will be acquired @cb callback be called
+ */
+int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb);
+
+/**
+ * @brief Unlock OCF request from write access
+ *
+ * @param req - OCF request
+ */
+void ocf_req_unlock_wr(struct ocf_request *req);
+
+/**
+ * @brief Unlock OCF request from read access
+ *
+ * @param req - OCF request
+ */
+void ocf_req_unlock_rd(struct ocf_request *req);
+
+/**
+ * @brief Unlock OCF request from read or write access
+ *
+ * @param req - OCF request
+ */
+void ocf_req_unlock(struct ocf_request *req);
+
+/**
+ * @Check if cache line is used.
+ *
+ * Cache line is used when:
+ * 1. It is locked for write or read access
+ * or
+ * 2. There is set locked bit in metadata
+ *
+ * @param cache - OCF cache instance
+ * @param line - Cache line to be unlocked
+ *
+ * @retval true - cache line is used
+ * @retval false - cache line is not used
+ */
+bool ocf_cache_line_is_used(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+/**
+ * @brief Check if for specified cache line there are waiters
+ * on the waiting list
+ *
+ * @param cache - OCF cache instance
+ * @param line - Cache line to be checked for waiters
+ *
+ * @retval true - there are waiters
+ * @retval false - No waiters
+ */
+bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+/**
+ * @brief un_lock request map info entry from from write or read access.
+ *
+ * @param cache - OCF cache instance
+ * @param req - OCF request
+ * @param entry - request map entry number
+ */
+void ocf_req_unlock_entry(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t entry);
+
+/**
+ * @brief Release cache line read lock
+ *
+ * @param cache - OCF cache instance
+ * @param line - Cache line to be unlocked
+ */
+void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
+
+/**
+ * @brief Attempt to lock cache line for read
+ *
+ * @param cache - OCF cache instance
+ * @param line - Cache line to be checked for waiters
+ *
+ * @retval true - read lock successfully acquired
+ * @retval false - failed to acquire read lock
+ */
+bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
+
+#endif /* OCF_CONCURRENCY_H_ */
diff --git a/src/spdk/ocf/src/concurrency/ocf_concurrency.c b/src/spdk/ocf/src/concurrency/ocf_concurrency.c
new file mode 100644
index 000000000..91d16211e
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_concurrency.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_concurrency.h"
+
+int ocf_concurrency_init(struct ocf_cache *cache)
+{
+ int result = 0;
+
+ result = ocf_cache_line_concurrency_init(cache);
+
+ if (result)
+ ocf_concurrency_deinit(cache);
+
+ return result;
+}
+
+void ocf_concurrency_deinit(struct ocf_cache *cache)
+{
+ ocf_cache_line_concurrency_deinit(cache);
+}
+
diff --git a/src/spdk/ocf/src/concurrency/ocf_concurrency.h b/src/spdk/ocf/src/concurrency/ocf_concurrency.h
new file mode 100644
index 000000000..bea09f6c2
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_concurrency.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef OCF_CONCURRENCY_H_
+#define OCF_CONCURRENCY_H_
+
+#include "../ocf_cache_priv.h"
+
+/**
+ * @file utils_req.h
+ * @brief OCF concurrency
+ */
+
+/**
+ * @brief Lock result - Lock acquired successfully
+ */
+#define OCF_LOCK_ACQUIRED 0
+
+/**
+ * @brief Lock result - Lock not acquired, lock request added into waiting list
+ */
+#define OCF_LOCK_NOT_ACQUIRED 1
+
+/**
+ * @brief Initialize OCF concurrency module
+ *
+ * @param cache - OCF cache instance
+ * @return 0 - Initialization successful, otherwise ERROR
+ */
+int ocf_concurrency_init(struct ocf_cache *cache);
+
+/**
+ * @biref De-Initialize OCF concurrency module
+ *
+ * @param cache - OCF cache instance
+ */
+void ocf_concurrency_deinit(struct ocf_cache *cache);
+
+#include "ocf_cache_line_concurrency.h"
+
+#endif /* OCF_CONCURRENCY_H_ */
diff --git a/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.c b/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.c
new file mode 100644
index 000000000..9ceaf8c2a
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright(c) 2019-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_metadata_concurrency.h"
+#include "../metadata/metadata_misc.h"
+
+int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
+{
+ int err = 0;
+ unsigned i;
+
+ err = env_spinlock_init(&metadata_lock->eviction);
+ if (err)
+ return err;
+
+ env_rwlock_init(&metadata_lock->status);
+ err = env_rwsem_init(&metadata_lock->global);
+ if (err)
+ goto rwsem_err;
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ err = env_spinlock_init(&metadata_lock->partition[i]);
+ if (err)
+ goto spinlocks_err;
+ }
+
+ return err;
+
+spinlocks_err:
+ while (i--)
+ env_spinlock_destroy(&metadata_lock->partition[i]);
+rwsem_err:
+ env_rwlock_destroy(&metadata_lock->status);
+ env_spinlock_destroy(&metadata_lock->eviction);
+ return err;
+}
+
+void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
+{
+ unsigned i;
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ env_spinlock_destroy(&metadata_lock->partition[i]);
+ }
+
+ env_spinlock_destroy(&metadata_lock->eviction);
+ env_rwlock_destroy(&metadata_lock->status);
+ env_rwsem_destroy(&metadata_lock->global);
+}
+
+int ocf_metadata_concurrency_attached_init(
+ struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache,
+ uint32_t hash_table_entries, uint32_t colision_table_pages)
+{
+ uint32_t i;
+ int err = 0;
+
+ metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) *
+ hash_table_entries);
+ metadata_lock->collision_pages = env_vzalloc(sizeof(env_rwsem) *
+ colision_table_pages);
+ if (!metadata_lock->hash ||
+ !metadata_lock->collision_pages) {
+ env_vfree(metadata_lock->hash);
+ env_vfree(metadata_lock->collision_pages);
+ metadata_lock->hash = NULL;
+ metadata_lock->collision_pages = NULL;
+ return -OCF_ERR_NO_MEM;
+ }
+
+ for (i = 0; i < hash_table_entries; i++) {
+ err = env_rwsem_init(&metadata_lock->hash[i]);
+ if (err)
+ break;
+ }
+ if (err) {
+ while (i--)
+ env_rwsem_destroy(&metadata_lock->hash[i]);
+ env_vfree(metadata_lock->hash);
+ metadata_lock->hash = NULL;
+ ocf_metadata_concurrency_attached_deinit(metadata_lock);
+ return err;
+ }
+
+
+ for (i = 0; i < colision_table_pages; i++) {
+ err = env_rwsem_init(&metadata_lock->collision_pages[i]);
+ if (err)
+ break;
+ }
+ if (err) {
+ while (i--)
+ env_rwsem_destroy(&metadata_lock->collision_pages[i]);
+ env_vfree(metadata_lock->collision_pages);
+ metadata_lock->collision_pages = NULL;
+ ocf_metadata_concurrency_attached_deinit(metadata_lock);
+ return err;
+ }
+
+ metadata_lock->cache = cache;
+ metadata_lock->num_hash_entries = hash_table_entries;
+ metadata_lock->num_collision_pages = colision_table_pages;
+
+ return 0;
+}
+
+void ocf_metadata_concurrency_attached_deinit(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ uint32_t i;
+
+ if (metadata_lock->hash) {
+ for (i = 0; i < metadata_lock->num_hash_entries; i++)
+ env_rwsem_destroy(&metadata_lock->hash[i]);
+ env_vfree(metadata_lock->hash);
+ metadata_lock->hash = NULL;
+ metadata_lock->num_hash_entries = 0;
+ }
+
+ if (metadata_lock->collision_pages) {
+ for (i = 0; i < metadata_lock->num_collision_pages; i++)
+ env_rwsem_destroy(&metadata_lock->collision_pages[i]);
+ env_vfree(metadata_lock->collision_pages);
+ metadata_lock->collision_pages = NULL;
+ metadata_lock->num_collision_pages = 0;
+ }
+}
+
+void ocf_metadata_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ env_rwsem_down_write(&metadata_lock->global);
+}
+
+int ocf_metadata_try_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ return env_rwsem_down_write_trylock(&metadata_lock->global);
+}
+
+void ocf_metadata_end_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ env_rwsem_up_write(&metadata_lock->global);
+}
+
+void ocf_metadata_start_shared_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ env_rwsem_down_read(&metadata_lock->global);
+}
+
+int ocf_metadata_try_start_shared_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ return env_rwsem_down_read_trylock(&metadata_lock->global);
+}
+
+void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock)
+{
+ env_rwsem_up_read(&metadata_lock->global);
+}
+
+void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
+
+ if (rw == OCF_METADATA_WR)
+ env_rwsem_down_write(&metadata_lock->hash[hash]);
+ else if (rw == OCF_METADATA_RD)
+ env_rwsem_down_read(&metadata_lock->hash[hash]);
+ else
+ ENV_BUG();
+}
+
+void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
+
+ if (rw == OCF_METADATA_WR)
+ env_rwsem_up_write(&metadata_lock->hash[hash]);
+ else if (rw == OCF_METADATA_RD)
+ env_rwsem_up_read(&metadata_lock->hash[hash]);
+ else
+ ENV_BUG();
+}
+
+int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ int result = -1;
+
+ ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
+
+ if (rw == OCF_METADATA_WR) {
+ result = env_rwsem_down_write_trylock(
+ &metadata_lock->hash[hash]);
+ } else if (rw == OCF_METADATA_RD) {
+ result = env_rwsem_down_read_trylock(
+ &metadata_lock->hash[hash]);
+ } else {
+ ENV_BUG();
+ }
+
+ if (!result)
+ return -1;
+
+ return 0;
+}
+
+/* NOTE: attempt to acquire hash lock for multiple core lines may end up
+ * in deadlock. In order to hash lock multiple core lines safely, use
+ * ocf_req_hash_lock_* functions */
+void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_metadata_start_shared_access(metadata_lock);
+ ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD);
+}
+
+void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD);
+ ocf_metadata_end_shared_access(metadata_lock);
+}
+
+void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_metadata_start_shared_access(metadata_lock);
+ ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR);
+}
+
+void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR);
+ ocf_metadata_end_shared_access(metadata_lock);
+}
+
+/* number of hash entries */
+#define _NUM_HASH_ENTRIES req->cache->metadata.lock.num_hash_entries
+
+/* true if hashes are monotonic */
+#define _IS_MONOTONIC(req) (req->map[0].hash + req->core_line_count <= \
+ _NUM_HASH_ENTRIES)
+
+/* minimal hash value */
+#define _MIN_HASH(req) (_IS_MONOTONIC(req) ? req->map[0].hash : 0)
+
+/* maximal hash value */
+#define _MAX_HASH(req) (_IS_MONOTONIC(req) ? \
+ req->map[req->core_line_count - 1].hash : \
+ _NUM_HASH_ENTRIES - 1)
+
+/* number of unique hash values in request */
+#define _HASH_COUNT(req) OCF_MIN(req->core_line_count, _NUM_HASH_ENTRIES)
+
+/* true if there is a gap in hash values */
+#define _HAS_GAP(req) (_MAX_HASH(req) - _MIN_HASH(req) + 1 > _HASH_COUNT(req))
+
+/* gap size */
+#define _GAP_VAL(req) ((_MAX_HASH(req) - _MIN_HASH(req) + 1) - _HASH_COUNT(req))
+
+/* hash value after which there is a gap */
+#define _GAP_START(req) req->map[req->core_line_count - 1].hash
+
+/* get next hash value */
+#define _HASH_NEXT(req, hash) (hash + 1 + \
+ ((_HAS_GAP(req) && hash == _GAP_START(req)) ? _GAP_VAL(req) : 0))
+
+/*
+ * Iterate over hash buckets for all core lines in the request in ascending hash
+ * bucket value order. Each hash bucket is visited only once.
+ *
+ * @hash stores hash values for each iteration
+ *
+ * Example hash iteration order for _NUM_HASH_ENTRIES == 5:
+ * Request hashes Iteration order
+ * [2, 3, 4] [2, 3, 4]
+ * [2, 3, 4, 0] [0, 2, 3, 4]
+ * [2, 3, 4, 0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4]
+ * [4, 0] [0, 4]
+ * [0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4]
+ *
+ */
+#define for_each_req_hash_asc(req, hash) \
+ for (hash = _MIN_HASH(req); hash <= _MAX_HASH(req); \
+ hash = _HASH_NEXT(req, hash))
+
+void ocf_req_hash_lock_rd(struct ocf_request *req)
+{
+ ocf_cache_line_t hash;
+
+ ocf_metadata_start_shared_access(&req->cache->metadata.lock);
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+ }
+}
+
+void ocf_req_hash_unlock_rd(struct ocf_request *req)
+{
+ ocf_cache_line_t hash;
+
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+ }
+ ocf_metadata_end_shared_access(&req->cache->metadata.lock);
+}
+
+void ocf_req_hash_lock_wr(struct ocf_request *req)
+{
+ ocf_cache_line_t hash;
+
+ ocf_metadata_start_shared_access(&req->cache->metadata.lock);
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_WR);
+ }
+}
+
+void ocf_req_hash_lock_upgrade(struct ocf_request *req)
+{
+ ocf_cache_line_t hash;
+
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+ }
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_WR);
+ }
+}
+
+void ocf_req_hash_unlock_wr(struct ocf_request *req)
+{
+ ocf_cache_line_t hash;
+
+ for_each_req_hash_asc(req, hash) {
+ ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ OCF_METADATA_WR);
+ }
+ ocf_metadata_end_shared_access(&req->cache->metadata.lock);
+}
+
+void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page)
+{
+ env_rwsem_down_read(&metadata_lock->collision_pages[page]);
+}
+
+void ocf_collision_end_shared_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page)
+{
+ env_rwsem_up_read(&metadata_lock->collision_pages[page]);
+}
+
+void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page)
+{
+ env_rwsem_down_write(&metadata_lock->collision_pages[page]);
+}
+
+void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page)
+{
+ env_rwsem_up_write(&metadata_lock->collision_pages[page]);
+}
diff --git a/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.h b/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.h
new file mode 100644
index 000000000..224d8a7f5
--- /dev/null
+++ b/src/spdk/ocf/src/concurrency/ocf_metadata_concurrency.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright(c) 2019-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "../ocf_cache_priv.h"
+
+#ifndef __OCF_METADATA_CONCURRENCY_H__
+#define __OCF_METADATA_CONCURRENCY_H__
+
+#define OCF_METADATA_RD 0
+#define OCF_METADATA_WR 1
+
+int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock);
+
+void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock);
+
+int ocf_metadata_concurrency_attached_init(
+ struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache,
+ uint32_t hash_table_entries, uint32_t colision_table_pages);
+
+void ocf_metadata_concurrency_attached_deinit(
+ struct ocf_metadata_lock *metadata_lock);
+
+static inline void ocf_metadata_eviction_lock(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ env_spinlock_lock(&metadata_lock->eviction);
+}
+
+static inline void ocf_metadata_eviction_unlock(
+ struct ocf_metadata_lock *metadata_lock)
+{
+ env_spinlock_unlock(&metadata_lock->eviction);
+}
+
+static inline void ocf_metadata_partition_lock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_part_id_t part_id)
+{
+ env_spinlock_lock(&metadata_lock->partition[part_id]);
+}
+
+static inline void ocf_metadata_partition_unlock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_part_id_t part_id)
+{
+ env_spinlock_unlock(&metadata_lock->partition[part_id]);
+}
+
+#define OCF_METADATA_EVICTION_LOCK() \
+ ocf_metadata_eviction_lock(&cache->metadata.lock)
+
+#define OCF_METADATA_EVICTION_UNLOCK() \
+ ocf_metadata_eviction_unlock(&cache->metadata.lock)
+
+void ocf_metadata_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+int ocf_metadata_try_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+void ocf_metadata_end_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+int ocf_metadata_try_start_shared_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+void ocf_metadata_start_shared_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+void ocf_metadata_end_shared_access(
+ struct ocf_metadata_lock *metadata_lock);
+
+static inline void ocf_metadata_status_bits_lock(
+ struct ocf_metadata_lock *metadata_lock, int rw)
+{
+ if (rw == OCF_METADATA_WR)
+ env_rwlock_write_lock(&metadata_lock->status);
+ else if (rw == OCF_METADATA_RD)
+ env_rwlock_read_lock(&metadata_lock->status);
+ else
+ ENV_BUG();
+}
+
+static inline void ocf_metadata_status_bits_unlock(
+ struct ocf_metadata_lock *metadata_lock, int rw)
+{
+ if (rw == OCF_METADATA_WR)
+ env_rwlock_write_unlock(&metadata_lock->status);
+ else if (rw == OCF_METADATA_RD)
+ env_rwlock_read_unlock(&metadata_lock->status);
+ else
+ ENV_BUG();
+}
+
+#define OCF_METADATA_BITS_LOCK_RD() \
+ ocf_metadata_status_bits_lock(&cache->metadata.lock, \
+ OCF_METADATA_RD)
+
+#define OCF_METADATA_BITS_UNLOCK_RD() \
+ ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
+ OCF_METADATA_RD)
+
+#define OCF_METADATA_BITS_LOCK_WR() \
+ ocf_metadata_status_bits_lock(&cache->metadata.lock, \
+ OCF_METADATA_WR)
+
+#define OCF_METADATA_BITS_UNLOCK_WR() \
+ ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
+ OCF_METADATA_WR)
+
+void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+
+/* lock entire request in deadlock-free manner */
+void ocf_req_hash_lock_rd(struct ocf_request *req);
+void ocf_req_hash_unlock_rd(struct ocf_request *req);
+void ocf_req_hash_lock_wr(struct ocf_request *req);
+void ocf_req_hash_unlock_wr(struct ocf_request *req);
+void ocf_req_hash_lock_upgrade(struct ocf_request *req);
+
+/* collision table page lock interface */
+void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page);
+void ocf_collision_end_shared_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page);
+void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page);
+void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock,
+ uint32_t page);
+#endif
diff --git a/src/spdk/ocf/src/engine/cache_engine.c b/src/spdk/ocf/src/engine/cache_engine.c
new file mode 100644
index 000000000..6b6d5f9b9
--- /dev/null
+++ b/src/spdk/ocf/src/engine/cache_engine.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "cache_engine.h"
+#include "engine_common.h"
+#include "engine_rd.h"
+#include "engine_wt.h"
+#include "engine_pt.h"
+#include "engine_wi.h"
+#include "engine_wa.h"
+#include "engine_wb.h"
+#include "engine_wo.h"
+#include "engine_fast.h"
+#include "engine_discard.h"
+#include "engine_d2c.h"
+#include "engine_ops.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_refcnt.h"
+#include "../ocf_request.h"
+#include "../metadata/metadata.h"
+#include "../eviction/eviction.h"
+
+enum ocf_io_if_type {
+ /* Public OCF IO interfaces to be set by user */
+ OCF_IO_WT_IF,
+ OCF_IO_WB_IF,
+ OCF_IO_WA_IF,
+ OCF_IO_WI_IF,
+ OCF_IO_PT_IF,
+ OCF_IO_WO_IF,
+ OCF_IO_MAX_IF,
+
+ /* Private OCF interfaces */
+ OCF_IO_FAST_IF,
+ OCF_IO_DISCARD_IF,
+ OCF_IO_D2C_IF,
+ OCF_IO_OPS_IF,
+ OCF_IO_PRIV_MAX_IF,
+};
+
+static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
+ [OCF_IO_WT_IF] = {
+ .read = ocf_read_generic,
+ .write = ocf_write_wt,
+ .name = "Write Through"
+ },
+ [OCF_IO_WB_IF] = {
+ .read = ocf_read_generic,
+ .write = ocf_write_wb,
+ .name = "Write Back"
+ },
+ [OCF_IO_WA_IF] = {
+ .read = ocf_read_generic,
+ .write = ocf_write_wa,
+ .name = "Write Around"
+ },
+ [OCF_IO_WI_IF] = {
+ .read = ocf_read_generic,
+ .write = ocf_write_wi,
+ .name = "Write Invalidate"
+ },
+ [OCF_IO_PT_IF] = {
+ .read = ocf_read_pt,
+ .write = ocf_write_wi,
+ .name = "Pass Through",
+ },
+ [OCF_IO_WO_IF] = {
+ .read = ocf_read_wo,
+ .write = ocf_write_wb,
+ .name = "Write Only",
+ },
+ [OCF_IO_FAST_IF] = {
+ .read = ocf_read_fast,
+ .write = ocf_write_fast,
+ .name = "Fast",
+ },
+ [OCF_IO_DISCARD_IF] = {
+ .read = ocf_discard,
+ .write = ocf_discard,
+ .name = "Discard",
+ },
+ [OCF_IO_D2C_IF] = {
+ .read = ocf_io_d2c,
+ .write = ocf_io_d2c,
+ .name = "Direct to core",
+ },
+ [OCF_IO_OPS_IF] = {
+ .read = ocf_engine_ops,
+ .write = ocf_engine_ops,
+ .name = "Ops engine",
+ },
+};
+
+static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = {
+ [ocf_req_cache_mode_wt] = &IO_IFS[OCF_IO_WT_IF],
+ [ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF],
+ [ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF],
+ [ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF],
+ [ocf_req_cache_mode_wo] = &IO_IFS[OCF_IO_WO_IF],
+ [ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF],
+ [ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF],
+ [ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF],
+};
+
+const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
+{
+ if (req_cache_mode == ocf_req_cache_mode_max)
+ return NULL;
+ return cache_mode_io_if_map[req_cache_mode];
+}
+
+struct ocf_request *ocf_engine_pop_req(ocf_cache_t cache, ocf_queue_t q)
+{
+ unsigned long lock_flags = 0;
+ struct ocf_request *req;
+
+ OCF_CHECK_NULL(q);
+
+ /* LOCK */
+ env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
+
+ if (list_empty(&q->io_list)) {
+ /* No items on the list */
+ env_spinlock_unlock_irqrestore(&q->io_list_lock,
+ lock_flags);
+ return NULL;
+ }
+
+ /* Get the first request and remove it from the list */
+ req = list_first_entry(&q->io_list, struct ocf_request, list);
+
+ env_atomic_dec(&q->io_no);
+ list_del(&req->list);
+
+ /* UNLOCK */
+ env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
+
+ OCF_CHECK_NULL(req);
+
+ if (ocf_req_alloc_map(req)) {
+ req->complete(req, req->error);
+ return NULL;
+ }
+
+ return req;
+}
+
+bool ocf_fallback_pt_is_on(ocf_cache_t cache)
+{
+ ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
+
+ return (cache->fallback_pt_error_threshold !=
+ OCF_CACHE_FALLBACK_PT_INACTIVE &&
+ env_atomic_read(&cache->fallback_pt_error_counter) >=
+ cache->fallback_pt_error_threshold);
+}
+
+#define SEQ_CUTOFF_FULL_MARGIN \
+ (OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
+
+static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
+{
+ if (!ocf_cache_is_device_attached(cache))
+ return false;
+
+ return (ocf_freelist_num_free(cache->freelist) <=
+ SEQ_CUTOFF_FULL_MARGIN);
+}
+
+bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
+ uint64_t bytes)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+
+ ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
+
+ switch (policy) {
+ case ocf_seq_cutoff_policy_always:
+ break;
+
+ case ocf_seq_cutoff_policy_full:
+ if (ocf_seq_cutoff_is_on(cache))
+ break;
+ return false;
+
+ case ocf_seq_cutoff_policy_never:
+ return false;
+ default:
+ ENV_WARN(true, "Invalid sequential cutoff policy!");
+ return false;
+ }
+
+ if (dir == core->seq_cutoff.rw &&
+ core->seq_cutoff.last == addr &&
+ core->seq_cutoff.bytes + bytes >=
+ ocf_core_get_seq_cutoff_threshold(core)) {
+ return true;
+ }
+
+ return false;
+}
+
+void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
+{
+ /*
+ * If IO is not consequent or has another direction,
+ * reset sequential cutoff state.
+ */
+ if (req->byte_position != core->seq_cutoff.last ||
+ req->rw != core->seq_cutoff.rw) {
+ core->seq_cutoff.rw = req->rw;
+ core->seq_cutoff.bytes = 0;
+ }
+
+ /* Update last accessed position and bytes counter */
+ core->seq_cutoff.last = req->byte_position + req->byte_length;
+ core->seq_cutoff.bytes += req->byte_length;
+}
+
+void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
+ ocf_core_t core, struct ocf_request *req)
+{
+ if (req->d2c) {
+ req->cache_mode = ocf_req_cache_mode_d2c;
+ return;
+ }
+
+ if (ocf_fallback_pt_is_on(cache)){
+ req->cache_mode = ocf_req_cache_mode_pt;
+ return;
+ }
+
+ if (cache->pt_unaligned_io && !ocf_req_is_4k(req->byte_position,
+ req->byte_length)) {
+ req->cache_mode = ocf_req_cache_mode_pt;
+ return;
+ }
+
+ if (ocf_seq_cutoff_check(core, req->rw, req->byte_position,
+ req->byte_length)) {
+ req->cache_mode = ocf_req_cache_mode_pt;
+ req->seq_cutoff = 1;
+ return;
+ }
+
+ req->cache_mode = ocf_part_get_cache_mode(cache,
+ ocf_part_class2id(cache, req->part_id));
+ if (!ocf_cache_mode_is_valid(req->cache_mode))
+ req->cache_mode = cache->conf_meta->cache_mode;
+
+ if (req->rw == OCF_WRITE &&
+ ocf_req_cache_mode_has_lazy_write(req->cache_mode) &&
+ ocf_req_set_dirty(req)) {
+ req->cache_mode = ocf_req_cache_mode_wt;
+ }
+}
+
+int ocf_engine_hndl_req(struct ocf_request *req)
+{
+ ocf_cache_t cache = req->cache;
+
+ OCF_CHECK_NULL(cache);
+
+ req->io_if = ocf_get_io_if(req->cache_mode);
+ if (!req->io_if)
+ return -OCF_ERR_INVAL;
+
+ ocf_req_get(req);
+
+ /* Till OCF engine is not synchronous fully need to push OCF request
+ * to into OCF workers
+ */
+
+ ocf_engine_push_req_back(req, true);
+
+ return 0;
+}
+
+int ocf_engine_hndl_fast_req(struct ocf_request *req)
+{
+ const struct ocf_io_if *io_if;
+ int ret;
+
+ io_if = ocf_get_io_if(req->cache_mode);
+ if (!io_if)
+ return -OCF_ERR_INVAL;
+
+ ocf_req_get(req);
+
+ switch (req->rw) {
+ case OCF_READ:
+ ret = io_if->read(req);
+ break;
+ case OCF_WRITE:
+ ret = io_if->write(req);
+ break;
+ default:
+ ret = OCF_FAST_PATH_NO;
+ }
+
+ if (ret == OCF_FAST_PATH_NO)
+ ocf_req_put(req);
+
+ return ret;
+}
+
+static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
+{
+ if (OCF_READ == req->rw)
+ IO_IFS[OCF_IO_D2C_IF].read(req);
+ else if (OCF_WRITE == req->rw)
+ IO_IFS[OCF_IO_D2C_IF].write(req);
+ else
+ ENV_BUG();
+}
+
+void ocf_engine_hndl_discard_req(struct ocf_request *req)
+{
+ ocf_req_get(req);
+
+ if (req->d2c) {
+ ocf_engine_hndl_2dc_req(req);
+ return;
+ }
+
+ if (OCF_READ == req->rw)
+ IO_IFS[OCF_IO_DISCARD_IF].read(req);
+ else if (OCF_WRITE == req->rw)
+ IO_IFS[OCF_IO_DISCARD_IF].write(req);
+ else
+ ENV_BUG();
+}
+
+void ocf_engine_hndl_ops_req(struct ocf_request *req)
+{
+ ocf_req_get(req);
+
+ if (req->d2c)
+ req->io_if = &IO_IFS[OCF_IO_D2C_IF];
+ else
+ req->io_if = &IO_IFS[OCF_IO_OPS_IF];
+
+ ocf_engine_push_req_back(req, true);
+}
diff --git a/src/spdk/ocf/src/engine/cache_engine.h b/src/spdk/ocf/src/engine/cache_engine.h
new file mode 100644
index 000000000..af7ec2620
--- /dev/null
+++ b/src/spdk/ocf/src/engine/cache_engine.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __CACHE_ENGINE_H_
+#define __CACHE_ENGINE_H_
+
+struct ocf_thread_priv;
+struct ocf_request;
+
+#define LOOKUP_HIT 5
+#define LOOKUP_MISS 6
+#define LOOKUP_MAPPED 8
+
+typedef enum {
+ /* modes inherited from user API */
+ ocf_req_cache_mode_wt = ocf_cache_mode_wt,
+ ocf_req_cache_mode_wb = ocf_cache_mode_wb,
+ ocf_req_cache_mode_wa = ocf_cache_mode_wa,
+ ocf_req_cache_mode_pt = ocf_cache_mode_pt,
+ ocf_req_cache_mode_wi = ocf_cache_mode_wi,
+ ocf_req_cache_mode_wo = ocf_cache_mode_wo,
+
+ /* internal modes */
+ ocf_req_cache_mode_fast,
+ /*!< Fast path */
+ ocf_req_cache_mode_d2c,
+ /*!< Direct to Core - pass through to core without
+ touching cacheline metadata */
+
+ ocf_req_cache_mode_max,
+} ocf_req_cache_mode_t;
+
+struct ocf_io_if {
+ int (*read)(struct ocf_request *req);
+
+ int (*write)(struct ocf_request *req);
+
+ const char *name;
+};
+
+void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
+ ocf_core_t core, struct ocf_request *req);
+
+const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t cache_mode);
+
+static inline const char *ocf_get_io_iface_name(ocf_cache_mode_t cache_mode)
+{
+ const struct ocf_io_if *iface = ocf_get_io_if(cache_mode);
+
+ return iface ? iface->name : "Unknown";
+}
+
+static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
+{
+ return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max;
+}
+
+static inline bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)
+{
+ return ocf_cache_mode_is_valid((ocf_cache_mode_t)mode) &&
+ ocf_mngt_cache_mode_has_lazy_write(
+ (ocf_cache_mode_t)mode);
+}
+
+void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
+
+bool ocf_fallback_pt_is_on(ocf_cache_t cache);
+
+bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
+ uint64_t bytes);
+
+struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
+ struct ocf_queue *q);
+
+int ocf_engine_hndl_req(struct ocf_request *req);
+
+#define OCF_FAST_PATH_YES 7
+#define OCF_FAST_PATH_NO 13
+
+int ocf_engine_hndl_fast_req(struct ocf_request *req);
+
+void ocf_engine_hndl_discard_req(struct ocf_request *req);
+
+void ocf_engine_hndl_ops_req(struct ocf_request *req);
+
+#endif
diff --git a/src/spdk/ocf/src/engine/engine_bf.c b/src/spdk/ocf/src/engine/engine_bf.c
new file mode 100644
index 000000000..5899feb1a
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_bf.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "engine_bf.h"
+#include "engine_inv.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "bf"
+#include "engine_debug.h"
+
+/* Decrements and checks if queue may be unblocked again */
+static inline void backfill_queue_dec_unblock(struct ocf_cache *cache)
+{
+ env_atomic_dec(&cache->pending_read_misses_list_count);
+
+ if (!env_atomic_read(&cache->pending_read_misses_list_blocked))
+ return;
+
+ if (env_atomic_read(&cache->pending_read_misses_list_count)
+ < cache->backfill.queue_unblock_size)
+ env_atomic_set(&cache->pending_read_misses_list_blocked, 0);
+}
+
+static inline void backfill_queue_inc_block(struct ocf_cache *cache)
+{
+ if (env_atomic_inc_return(&cache->pending_read_misses_list_count)
+ >= cache->backfill.max_queue_size)
+ env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
+}
+
+static void _ocf_backfill_complete(struct ocf_request *req, int error)
+{
+ struct ocf_cache *cache = req->cache;
+
+ if (error)
+ req->error = error;
+
+ if (req->error)
+ inc_fallback_pt_error_counter(req->cache);
+
+ /* Handle callback-caller race to let only one of the two complete the
+ * request. Also, complete original request only if this is the last
+ * sub-request to complete
+ */
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ /* We must free the pages we have allocated */
+ ctx_data_secure_erase(cache->owner, req->data);
+ ctx_data_munlock(cache->owner, req->data);
+ ctx_data_free(cache->owner, req->data);
+ req->data = NULL;
+
+ if (req->error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+ ocf_engine_invalidate(req);
+ } else {
+ ocf_req_unlock(req);
+
+ /* put the request at the last point of the completion path */
+ ocf_req_put(req);
+ }
+}
+
+static int _ocf_backfill_do(struct ocf_request *req)
+{
+ unsigned int reqs_to_issue;
+
+ backfill_queue_dec_unblock(req->cache);
+
+ reqs_to_issue = ocf_engine_io_count(req);
+
+ /* There will be #reqs_to_issue completions */
+ env_atomic_set(&req->req_remaining, reqs_to_issue);
+
+ req->data = req->cp_data;
+
+ ocf_submit_cache_reqs(req->cache, req, OCF_WRITE, 0, req->byte_length,
+ reqs_to_issue, _ocf_backfill_complete);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_backfill = {
+ .read = _ocf_backfill_do,
+ .write = _ocf_backfill_do,
+};
+
+void ocf_engine_backfill(struct ocf_request *req)
+{
+ backfill_queue_inc_block(req->cache);
+ ocf_engine_push_req_front_if(req, &_io_if_backfill, true);
+}
diff --git a/src/spdk/ocf/src/engine/engine_bf.h b/src/spdk/ocf/src/engine/engine_bf.h
new file mode 100644
index 000000000..97239d795
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_bf.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_BF_H_
+#define ENGINE_BF_H_
+
+void ocf_engine_backfill(struct ocf_request *req);
+
+#endif /* ENGINE_BF_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_common.c b/src/spdk/ocf/src/engine/engine_common.c
new file mode 100644
index 000000000..b34620702
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_common.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../ocf_freelist.h"
+#include "engine_common.h"
+#define OCF_ENGINE_DEBUG_IO_NAME "common"
+#include "engine_debug.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cleaner.h"
+#include "../metadata/metadata.h"
+#include "../eviction/eviction.h"
+#include "../promotion/promotion.h"
+#include "../concurrency/ocf_concurrency.h"
+
+void ocf_engine_error(struct ocf_request *req,
+ bool stop_cache, const char *msg)
+{
+ struct ocf_cache *cache = req->cache;
+
+ if (stop_cache)
+ env_bit_clear(ocf_cache_state_running, &cache->cache_state);
+
+ if (ocf_cache_log_rl(cache)) {
+ ocf_core_log(req->core, log_err,
+ "%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
+ BYTES_TO_SECTORS(req->byte_position),
+ req->byte_length);
+ }
+}
+
+void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
+ struct ocf_map_info *entry, ocf_core_id_t core_id,
+ uint64_t core_line)
+{
+ ocf_cache_line_t line;
+ ocf_cache_line_t hash;
+
+ hash = ocf_metadata_hash_func(cache, core_line, core_id);
+
+ /* Initially assume that we have cache miss.
+ * Hash points to proper bucket.
+ */
+ entry->hash = hash;
+ entry->status = LOOKUP_MISS;
+ entry->coll_idx = cache->device->collision_table_entries;
+ entry->core_line = core_line;
+
+ line = ocf_metadata_get_hash(cache, hash);
+
+ while (line != cache->device->collision_table_entries) {
+ ocf_core_id_t curr_core_id;
+ uint64_t curr_core_line;
+
+ ocf_metadata_get_core_info(cache, line, &curr_core_id,
+ &curr_core_line);
+
+ if (core_id == curr_core_id && curr_core_line == core_line) {
+ entry->coll_idx = line;
+ entry->status = LOOKUP_HIT;
+ break;
+ }
+
+ line = ocf_metadata_get_collision_next(cache, line);
+ }
+}
+
+static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
+ struct ocf_map_info *entry, ocf_core_id_t core_id)
+{
+ ocf_core_id_t _core_id;
+ uint64_t _core_line;
+
+ if (entry->status == LOOKUP_MISS)
+ return 0;
+
+ ENV_BUG_ON(entry->coll_idx >= cache->device->collision_table_entries);
+
+ ocf_metadata_get_core_info(cache, entry->coll_idx, &_core_id,
+ &_core_line);
+
+ if (core_id == _core_id && _core_line == entry->core_line)
+ return 0;
+ else
+ return -1;
+}
+
+void ocf_engine_update_req_info(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t entry)
+{
+ uint8_t start_sector = 0;
+ uint8_t end_sector = ocf_line_end_sector(cache);
+ struct ocf_map_info *_entry = &(req->map[entry]);
+
+ start_sector = ocf_map_line_start_sector(req, entry);
+ end_sector = ocf_map_line_end_sector(req, entry);
+
+ /* Handle return value */
+ switch (_entry->status) {
+ case LOOKUP_HIT:
+ if (metadata_test_valid_sec(cache, _entry->coll_idx,
+ start_sector, end_sector)) {
+ req->info.hit_no++;
+ } else {
+ req->info.invalid_no++;
+ }
+
+ /* Check request is dirty */
+ if (metadata_test_dirty(cache, _entry->coll_idx)) {
+ req->info.dirty_any++;
+
+ /* Check if cache line is fully dirty */
+ if (metadata_test_dirty_all_sec(cache, _entry->coll_idx,
+ start_sector, end_sector))
+ req->info.dirty_all++;
+ }
+
+ if (req->part_id != ocf_metadata_get_partition_id(cache,
+ _entry->coll_idx)) {
+ /*
+ * Need to move this cache line into other partition
+ */
+ _entry->re_part = req->info.re_part = true;
+ }
+
+ break;
+ case LOOKUP_MISS:
+ req->info.seq_req = false;
+ break;
+ case LOOKUP_MAPPED:
+ break;
+ default:
+ ENV_BUG();
+ break;
+ }
+
+ /* Check if cache hit is sequential */
+ if (req->info.seq_req && entry) {
+ if (ocf_metadata_map_lg2phy(cache,
+ (req->map[entry - 1].coll_idx)) + 1 !=
+ ocf_metadata_map_lg2phy(cache,
+ _entry->coll_idx)) {
+ req->info.seq_req = false;
+ }
+ }
+}
+
+void ocf_engine_traverse(struct ocf_request *req)
+{
+ uint32_t i;
+ uint64_t core_line;
+
+ struct ocf_cache *cache = req->cache;
+ ocf_core_id_t core_id = ocf_core_get_id(req->core);
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_req_clear_info(req);
+ req->info.seq_req = true;
+
+ for (i = 0, core_line = req->core_line_first;
+ core_line <= req->core_line_last; core_line++, i++) {
+
+ struct ocf_map_info *entry = &(req->map[i]);
+
+ ocf_engine_lookup_map_entry(cache, entry, core_id,
+ core_line);
+
+ if (entry->status != LOOKUP_HIT) {
+ req->info.seq_req = false;
+
+ /* There is miss then lookup for next map entry */
+ OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
+ entry->core_line);
+ continue;
+ }
+
+ OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu",
+ entry->coll_idx, entry->core_line);
+
+ /* Update eviction (LRU) */
+ ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
+
+ ocf_engine_update_req_info(cache, req, i);
+ }
+
+ OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
+ "Yes" : "No");
+}
+
+int ocf_engine_check(struct ocf_request *req)
+{
+ int result = 0;
+ uint32_t i;
+ uint64_t core_line;
+
+ struct ocf_cache *cache = req->cache;
+ ocf_core_id_t core_id = ocf_core_get_id(req->core);
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_req_clear_info(req);
+ req->info.seq_req = true;
+
+ for (i = 0, core_line = req->core_line_first;
+ core_line <= req->core_line_last; core_line++, i++) {
+
+ struct ocf_map_info *entry = &(req->map[i]);
+
+ if (entry->status == LOOKUP_MISS) {
+ req->info.seq_req = false;
+ continue;
+ }
+
+ if (_ocf_engine_check_map_entry(cache, entry, core_id)) {
+ /* Mapping is invalid */
+ entry->invalid = true;
+ req->info.seq_req = false;
+
+ OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
+ entry->coll_idx);
+
+ result = -1;
+ } else {
+ entry->invalid = false;
+
+ OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
+ entry->coll_idx);
+
+ ocf_engine_update_req_info(cache, req, i);
+ }
+ }
+
+ OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
+ "Yes" : "No");
+
+ return result;
+}
+
+static void ocf_engine_map_cache_line(struct ocf_request *req,
+ uint64_t core_line, unsigned int hash_index,
+ ocf_cache_line_t *cache_line)
+{
+ struct ocf_cache *cache = req->cache;
+ ocf_core_id_t core_id = ocf_core_get_id(req->core);
+ ocf_part_id_t part_id = req->part_id;
+ ocf_cleaning_t clean_policy_type;
+
+ if (!ocf_freelist_get_cache_line(cache->freelist, cache_line)) {
+ req->info.mapping_error = 1;
+ return;
+ }
+
+ ocf_metadata_add_to_partition(cache, part_id, *cache_line);
+
+ /* Add the block to the corresponding collision list */
+ ocf_metadata_start_collision_shared_access(cache, *cache_line);
+ ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
+ *cache_line);
+ ocf_metadata_end_collision_shared_access(cache, *cache_line);
+
+ ocf_eviction_init_cache_line(cache, *cache_line, part_id);
+
+ /* Update LRU:: Move this node to head of lru list. */
+ ocf_eviction_set_hot_cache_line(cache, *cache_line);
+
+ /* Update dirty cache-block list */
+ clean_policy_type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max);
+
+ if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
+ cleaning_policy_ops[clean_policy_type].
+ init_cache_block(cache, *cache_line);
+}
+
+static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
+ struct ocf_request *req)
+{
+ uint32_t i;
+ struct ocf_map_info *entry;
+
+ for (i = 0; i < req->core_line_count; i++) {
+ entry = &(req->map[i]);
+
+ switch (entry->status) {
+ case LOOKUP_HIT:
+ case LOOKUP_MISS:
+ break;
+
+ case LOOKUP_MAPPED:
+ OCF_DEBUG_RQ(req, "Canceling cache line %u",
+ entry->coll_idx);
+
+ ocf_metadata_start_collision_shared_access(cache,
+ entry->coll_idx);
+
+ set_cache_line_invalid_no_flush(cache, 0,
+ ocf_line_end_sector(cache),
+ entry->coll_idx);
+
+ ocf_metadata_end_collision_shared_access(cache,
+ entry->coll_idx);
+
+ break;
+
+ default:
+ ENV_BUG();
+ break;
+ }
+ }
+}
+
+static void ocf_engine_map(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+ uint32_t i;
+ struct ocf_map_info *entry;
+ uint64_t core_line;
+ int status = LOOKUP_MAPPED;
+ ocf_core_id_t core_id = ocf_core_get_id(req->core);
+
+ if (!ocf_engine_unmapped_count(req))
+ return;
+
+ if (ocf_engine_unmapped_count(req) >
+ ocf_freelist_num_free(cache->freelist)) {
+ req->info.mapping_error = 1;
+ return;
+ }
+
+ ocf_req_clear_info(req);
+ req->info.seq_req = true;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ for (i = 0, core_line = req->core_line_first;
+ core_line <= req->core_line_last; core_line++, i++) {
+ entry = &(req->map[i]);
+
+ ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
+
+ if (entry->status != LOOKUP_HIT) {
+ ocf_engine_map_cache_line(req, entry->core_line,
+ entry->hash, &entry->coll_idx);
+
+ if (req->info.mapping_error) {
+ /*
+ * Eviction error (mapping error), need to
+ * clean, return and do pass through
+ */
+ OCF_DEBUG_RQ(req, "Eviction ERROR when mapping");
+ ocf_engine_map_hndl_error(cache, req);
+ break;
+ }
+
+ entry->status = status;
+ }
+
+ OCF_DEBUG_PARAM(req->cache,
+ "%s, cache line %u, core line = %llu",
+ entry->status == LOOKUP_HIT ? "Hit" : "Map",
+ entry->coll_idx, entry->core_line);
+
+ ocf_engine_update_req_info(cache, req, i);
+
+ }
+
+ if (!req->info.mapping_error) {
+ /* request has been inserted into cache - purge it from promotion
+ * policy */
+ ocf_promotion_req_purge(cache->promotion_policy, req);
+ }
+
+ OCF_DEBUG_PARAM(req->cache, "Sequential - %s", req->info.seq_req ?
+ "Yes" : "No");
+}
+
+static void _ocf_engine_clean_end(void *private_data, int error)
+{
+ struct ocf_request *req = private_data;
+
+ if (error) {
+ OCF_DEBUG_RQ(req, "Cleaning ERROR");
+ req->error |= error;
+
+ /* End request and do not processing */
+ ocf_req_unlock(req);
+
+ /* Complete request */
+ req->complete(req, error);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+ } else {
+ req->info.dirty_any = 0;
+ req->info.dirty_all = 0;
+ ocf_engine_push_req_front(req, true);
+ }
+}
+
+static int ocf_engine_evict(struct ocf_request *req)
+{
+ if (!ocf_engine_unmapped_count(req))
+ return 0;
+
+ return space_managment_evict_do(req->cache, req,
+ ocf_engine_unmapped_count(req));
+}
+
+static int lock_clines(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ enum ocf_engine_lock_type lock_type = engine_cbs->get_lock_type(req);
+
+ switch (lock_type) {
+ case ocf_engine_lock_write:
+ return ocf_req_async_lock_wr(req, engine_cbs->resume);
+ case ocf_engine_lock_read:
+ return ocf_req_async_lock_rd(req, engine_cbs->resume);
+ default:
+ return OCF_LOCK_ACQUIRED;
+ }
+}
+
+int ocf_engine_prepare_clines(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ bool mapped;
+ bool promote = true;
+ int lock = -ENOENT;
+ struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
+
+ /* Calculate hashes for hash-bucket locking */
+ ocf_req_hash(req);
+
+ /* Read-lock hash buckets associated with request target core & LBAs
+ * (core lines) to assure that cache mapping for these core lines does
+ * not change during traversation */
+ ocf_req_hash_lock_rd(req);
+
+ /* Traverse to check if request is mapped fully */
+ ocf_engine_traverse(req);
+
+ mapped = ocf_engine_is_mapped(req);
+ if (mapped) {
+ /* Request cachelines are already mapped, acquire cacheline
+ * lock */
+ lock = lock_clines(req, engine_cbs);
+ } else {
+ /* check if request should promote cachelines */
+ promote = ocf_promotion_req_should_promote(
+ req->cache->promotion_policy, req);
+ if (!promote)
+ req->info.mapping_error = 1;
+ }
+
+ if (mapped || !promote) {
+ /* Will not attempt mapping - release hash bucket lock */
+ ocf_req_hash_unlock_rd(req);
+ } else {
+ /* Need to map (potentially evict) cachelines. Mapping must be
+ * performed holding (at least) hash-bucket write lock */
+ ocf_req_hash_lock_upgrade(req);
+ ocf_engine_map(req);
+ if (!req->info.mapping_error)
+ lock = lock_clines(req, engine_cbs);
+ ocf_req_hash_unlock_wr(req);
+
+ if (req->info.mapping_error) {
+ /* Not mapped - evict cachelines under global exclusive
+ * lock*/
+ ocf_metadata_start_exclusive_access(metadata_lock);
+
+ /* Now there is exclusive access for metadata. May
+ * traverse once again and evict cachelines if needed.
+ */
+ if (ocf_engine_evict(req) == LOOKUP_MAPPED)
+ ocf_engine_map(req);
+
+ if (!req->info.mapping_error)
+ lock = lock_clines(req, engine_cbs);
+
+ ocf_metadata_end_exclusive_access(metadata_lock);
+ }
+ }
+
+
+ return lock;
+}
+
+static int _ocf_engine_clean_getter(struct ocf_cache *cache,
+ void *getter_context, uint32_t item, ocf_cache_line_t *line)
+{
+ struct ocf_cleaner_attribs *attribs = getter_context;
+ struct ocf_request *req = attribs->cmpl_context;
+
+ for (; attribs->getter_item < req->core_line_count;
+ attribs->getter_item++) {
+
+ struct ocf_map_info *entry = &req->map[attribs->getter_item];
+
+ if (entry->status != LOOKUP_HIT)
+ continue;
+
+ if (!metadata_test_dirty(cache, entry->coll_idx))
+ continue;
+
+ /* Line to be cleaned found, go to next item and return */
+ *line = entry->coll_idx;
+ attribs->getter_item++;
+ return 0;
+ }
+
+ return -1;
+}
+
+void ocf_engine_clean(struct ocf_request *req)
+{
+ /* Initialize attributes for cleaner */
+ struct ocf_cleaner_attribs attribs = {
+ .cache_line_lock = false,
+
+ .cmpl_context = req,
+ .cmpl_fn = _ocf_engine_clean_end,
+
+ .getter = _ocf_engine_clean_getter,
+ .getter_context = &attribs,
+ .getter_item = 0,
+
+ .count = req->info.dirty_any,
+ .io_queue = req->io_queue
+ };
+
+ /* Start cleaning */
+ ocf_cleaner_fire(req->cache, &attribs);
+}
+
+void ocf_engine_update_block_stats(struct ocf_request *req)
+{
+ ocf_core_stats_vol_block_update(req->core, req->part_id, req->rw,
+ req->byte_length);
+}
+
+void ocf_engine_update_request_stats(struct ocf_request *req)
+{
+ ocf_core_stats_request_update(req->core, req->part_id, req->rw,
+ req->info.hit_no, req->core_line_count);
+}
+
+void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
+{
+ ocf_cache_t cache = req->cache;
+ ocf_queue_t q = NULL;
+ unsigned long lock_flags = 0;
+
+ INIT_LIST_HEAD(&req->list);
+
+ ENV_BUG_ON(!req->io_queue);
+ q = req->io_queue;
+
+ if (!req->info.internal) {
+ env_atomic_set(&cache->last_access_ms,
+ env_ticks_to_msecs(env_get_tick_count()));
+ }
+
+ env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
+
+ list_add_tail(&req->list, &q->io_list);
+ env_atomic_inc(&q->io_no);
+
+ env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
+
+ /* NOTE: do not dereference @req past this line, it might
+ * be picked up by concurrent io thread and deallocated
+ * at this point */
+
+ ocf_queue_kick(q, allow_sync);
+}
+
+void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
+{
+ ocf_cache_t cache = req->cache;
+ ocf_queue_t q = NULL;
+ unsigned long lock_flags = 0;
+
+ ENV_BUG_ON(!req->io_queue);
+ INIT_LIST_HEAD(&req->list);
+
+ q = req->io_queue;
+
+ if (!req->info.internal) {
+ env_atomic_set(&cache->last_access_ms,
+ env_ticks_to_msecs(env_get_tick_count()));
+ }
+
+ env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
+
+ list_add(&req->list, &q->io_list);
+ env_atomic_inc(&q->io_no);
+
+ env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
+
+ /* NOTE: do not dereference @req past this line, it might
+ * be picked up by concurrent io thread and deallocated
+ * at this point */
+
+ ocf_queue_kick(q, allow_sync);
+}
+
+void ocf_engine_push_req_front_if(struct ocf_request *req,
+ const struct ocf_io_if *io_if,
+ bool allow_sync)
+{
+ req->error = 0; /* Please explain why!!! */
+ req->io_if = io_if;
+ ocf_engine_push_req_front(req, allow_sync);
+}
+
+void inc_fallback_pt_error_counter(ocf_cache_t cache)
+{
+ ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
+
+ if (cache->fallback_pt_error_threshold == OCF_CACHE_FALLBACK_PT_INACTIVE)
+ return;
+
+ if (env_atomic_inc_return(&cache->fallback_pt_error_counter) ==
+ cache->fallback_pt_error_threshold) {
+ ocf_cache_log(cache, log_info, "Error threshold reached. "
+ "Fallback Pass Through activated\n");
+ }
+}
+
+static int _ocf_engine_refresh(struct ocf_request *req)
+{
+ int result;
+
+ /* Check under metadata RD lock */
+ ocf_req_hash_lock_rd(req);
+
+ result = ocf_engine_check(req);
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (result == 0) {
+
+ /* Refresh successful, can process with original IO interface */
+ req->io_if = req->priv;
+
+ req->priv = NULL;
+
+ if (req->rw == OCF_READ)
+ req->io_if->read(req);
+ else if (req->rw == OCF_WRITE)
+ req->io_if->write(req);
+ else
+ ENV_BUG();
+ } else {
+ ENV_WARN(true, "Inconsistent request");
+ req->error = -OCF_ERR_INVAL;
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ /* Release WRITE lock of request */
+ ocf_req_unlock(req);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+ }
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_refresh = {
+ .read = _ocf_engine_refresh,
+ .write = _ocf_engine_refresh,
+};
+
+void ocf_engine_on_resume(struct ocf_request *req)
+{
+ ENV_BUG_ON(req->priv);
+ OCF_CHECK_NULL(req->io_if);
+
+ /* Exchange IO interface */
+ req->priv = (void *)req->io_if;
+
+ OCF_DEBUG_RQ(req, "On resume");
+
+ ocf_engine_push_req_front_if(req, &_io_if_refresh, false);
+}
diff --git a/src/spdk/ocf/src/engine/engine_common.h b/src/spdk/ocf/src/engine/engine_common.h
new file mode 100644
index 000000000..ad99c67d0
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_common.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_COMMON_H_
+#define ENGINE_COMMON_H_
+
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+
+/**
+ * @file engine_common.h
+ * @brief OCF cache engine common module
+ */
+
+/**
+ * @brief Signal and handle OCF request error
+ *
+ * @param req OCF request
+ * @param stop_cache Indicates if OCF cache engine need to be stopped
+ * @param msg Error message to be printed into log
+ */
+void ocf_engine_error(struct ocf_request *req, bool stop_cache,
+ const char *msg);
+
+/**
+ * @brief Check if OCF request is hit
+ *
+ * @param req OCF request
+ *
+ * @retval true HIT
+ * @retval false MISS
+ */
+static inline bool ocf_engine_is_hit(struct ocf_request *req)
+{
+ return req->info.hit_no == req->core_line_count;
+}
+
+/**
+ * @brief Check if OCF request is miss
+ *
+ * @param req OCF request
+ *
+ * @retval true MISS
+ * @retval false HIT
+ */
+#define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req))
+
+/**
+ * @brief Check if all cache lines are mapped fully
+ *
+ * @param req OCF request
+ *
+ * @retval true request is mapped fully
+ * @retval false request is not mapped fully and eviction might be run in
+ * order to complete mapping
+ */
+static inline bool ocf_engine_is_mapped(struct ocf_request *req)
+{
+ return req->info.hit_no + req->info.invalid_no == req->core_line_count;
+}
+
+/**
+ * @brief Check if all cache lines are dirty
+ *
+ * @param req OCF request
+ *
+ * @retval true request is dirty fully
+ * @retval false request is not dirty fully
+ */
+static inline bool ocf_engine_is_dirty_all(struct ocf_request *req)
+{
+ return req->info.dirty_all == req->core_line_count;
+}
+
+/**
+ * @brief Get number of mapped cache lines
+ *
+ * @param req OCF request
+ *
+ * @return Number of mapped cache lines
+ */
+static inline uint32_t ocf_engine_mapped_count(struct ocf_request *req)
+{
+ return req->info.hit_no + req->info.invalid_no;
+}
+
+/**
+ * @brief Get number of unmapped cache lines
+ *
+ * @param req OCF request
+ *
+ * @return Number of unmapped cache lines
+ */
+static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
+{
+ return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
+}
+
+/**
+ * @brief Get number of IOs to perform cache read or write
+ *
+ * @param req OCF request
+ *
+ * @return Count of cache IOs
+ */
+static inline uint32_t ocf_engine_io_count(struct ocf_request *req)
+{
+ return req->info.seq_req ? 1 : req->core_line_count;
+}
+
+static inline
+bool ocf_engine_map_all_sec_dirty(struct ocf_request *req, uint32_t line)
+{
+ uint8_t start = ocf_map_line_start_sector(req, line);
+ uint8_t end = ocf_map_line_end_sector(req, line);
+
+ if (req->map[line].status != LOOKUP_HIT)
+ return false;
+
+ return metadata_test_dirty_all_sec(req->cache, req->map[line].coll_idx,
+ start, end);
+}
+
+static inline
+bool ocf_engine_map_all_sec_clean(struct ocf_request *req, uint32_t line)
+{
+ uint8_t start = ocf_map_line_start_sector(req, line);
+ uint8_t end = ocf_map_line_end_sector(req, line);
+
+ if (req->map[line].status != LOOKUP_HIT)
+ return false;
+
+ if (!metadata_test_valid_sec(req->cache, req->map[line].coll_idx,
+ start, end)) {
+ return false;
+ }
+
+ return !metadata_test_dirty_sec(req->cache, req->map[line].coll_idx,
+ start, end);
+}
+
+/**
+ * @brief Clean request (flush dirty data to the core device)
+ *
+ * @param req OCF request
+ *
+ * @note After successful cleaning:
+ * - Dirty status bits in request info will be cleared
+ * - Request will be pushed front, <B>IO interface need to be set</B>
+ *
+ * @note In case of failure:
+ * - unlock request
+ * - complete request to the application
+ * - free request
+ */
+void ocf_engine_clean(struct ocf_request *req);
+
+void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
+ struct ocf_map_info *entry, ocf_core_id_t core_id,
+ uint64_t core_line);
+
+/**
+ * @brief Request cacheline lock type
+ */
+enum ocf_engine_lock_type
+{
+ /** No lock */
+ ocf_engine_lock_none = 0,
+ /** Write lock */
+ ocf_engine_lock_write,
+ /** Read lock */
+ ocf_engine_lock_read,
+};
+
+/**
+ * @brief Engine-specific callbacks for common request handling rountine
+ *
+ * TODO(arutk): expand this structure to fit all engines and all steps
+ */
+struct ocf_engine_callbacks
+{
+ /** Specify locking requirements after request is mapped */
+ enum ocf_engine_lock_type (*get_lock_type)(struct ocf_request *req);
+
+ /** Resume handling after acquiring asynchronous lock */
+ ocf_req_async_lock_cb resume;
+};
+
+/**
+ * @brief Map and lock cachelines
+ *
+ * @param req OCF request
+ *
+ * @returns eviction status
+ * @retval LOOKUP_MAPPED successfully evicted required number of cachelines
+ * @retval LOOKUP_MISS eviction failure
+ */
+int ocf_engine_prepare_clines(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs);
+
+/**
+ * @brief Traverse OCF request (lookup cache)
+ *
+ * @note This function does not evict cachelines. Only lookup in metadata is
+ * performed. Main purpose of this function is to check if there is a HIT.
+ *
+ * @param req OCF request
+ */
+void ocf_engine_traverse(struct ocf_request *req);
+
+/**
+ * @brief Check if OCF request mapping is still valid
+ *
+ * @note If mapping entries is invalid it will be marked
+ *
+ * @param req OCF request
+ *
+ * @retval 0 - OCF request mapping is valid
+ * @return Non zero - OCF request mapping is invalid and need to call re-mapping
+ */
+int ocf_engine_check(struct ocf_request *req);
+
+/**
+ * @brief Update OCF request info
+ *
+ * @param req OCF request
+ */
+void ocf_engine_update_req_info(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t entry);
+
+/**
+ * @brief Update OCF request block statistics for an exported object
+ *
+ * @param req OCF request
+ */
+void ocf_engine_update_block_stats(struct ocf_request *req);
+
+/**
+ * @brief Update OCF request request statistics for an exported object
+ * (not applicable to write wi and to read wt
+ *
+ * @param req OCF request
+ */
+void ocf_engine_update_request_stats(struct ocf_request *req);
+
+/**
+ * @brief Push front OCF request to the OCF thread worker queue
+ *
+ * @param req OCF request
+ * @param allow_sync caller allows for request from queue to be ran immediately
+ from push function in caller context
+ */
+void ocf_engine_push_req_back(struct ocf_request *req,
+ bool allow_sync);
+
+/**
+ * @brief Push back OCF request to the OCF thread worker queue
+ *
+ * @param req OCF request
+ * @param allow_sync caller allows for request from queue to be ran immediately
+ from push function in caller context
+ */
+void ocf_engine_push_req_front(struct ocf_request *req,
+ bool allow_sync);
+
+/**
+ * @brief Set interface and push from request to the OCF thread worker queue
+ *
+ * @param req OCF request
+ * @param io_if IO interface
+ * @param allow_sync caller allows for request from queue to be ran immediately
+ from push function in caller context
+ */
+void ocf_engine_push_req_front_if(struct ocf_request *req,
+ const struct ocf_io_if *io_if,
+ bool allow_sync);
+
+void inc_fallback_pt_error_counter(ocf_cache_t cache);
+
+void ocf_engine_on_resume(struct ocf_request *req);
+
+#endif /* ENGINE_COMMON_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_d2c.c b/src/spdk/ocf/src/engine/engine_d2c.c
new file mode 100644
index 000000000..ef0c3c8a4
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_d2c.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_d2c.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../metadata/metadata.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "d2c"
+#include "engine_debug.h"
+
+static void _ocf_d2c_completion(struct ocf_request *req, int error)
+{
+ req->error = error;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, req->rw);
+ }
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+}
+
+int ocf_io_d2c(struct ocf_request *req)
+{
+ ocf_core_t core = req->core;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ ocf_submit_volume_req(&core->volume, req, _ocf_d2c_completion);
+
+ ocf_engine_update_block_stats(req);
+
+ ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
+ req->info.hit_no, req->core_line_count);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+
+}
diff --git a/src/spdk/ocf/src/engine/engine_d2c.h b/src/spdk/ocf/src/engine/engine_d2c.h
new file mode 100644
index 000000000..3e93b7718
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_d2c.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_2DC_H_
+#define ENGINE_2DC_H_
+
+int ocf_io_d2c(struct ocf_request *req);
+
+#endif /* ENGINE_2DC_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_debug.h b/src/spdk/ocf/src/engine/engine_debug.h
new file mode 100644
index 000000000..5c25d9369
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_debug.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_DEBUG_H_
+#define ENGINE_DEBUG_H_
+
+#ifndef OCF_ENGINE_DEBUG
+#define OCF_ENGINE_DEBUG 0
+#endif
+
+#if 1 == OCF_ENGINE_DEBUG
+
+#ifndef OCF_ENGINE_DEBUG_IO_NAME
+#define OCF_ENGINE_DEBUG_IO_NAME "null"
+#endif
+
+#define OCF_DEBUG_PREFIX "[Engine][%s] %s "
+
+#define OCF_DEBUG_LOG(cache, format, ...) \
+ ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
+ format"\n", OCF_ENGINE_DEBUG_IO_NAME, __func__, \
+ ##__VA_ARGS__)
+
+#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
+
+#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
+ ##__VA_ARGS__)
+
+#define OCF_DEBUG_RQ(req, format, ...) \
+ ocf_cache_log(req->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
+ format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
+ OCF_READ == (req)->rw ? "RD" : "WR", req->byte_position, \
+ req->byte_length, __func__, ##__VA_ARGS__)
+
+#else
+#define OCF_DEBUG_PREFIX
+#define OCF_DEBUG_LOG(cache, format, ...)
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#define OCF_DEBUG_RQ(req, format, ...)
+#endif
+
+#endif /* ENGINE_DEBUG_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_discard.c b/src/spdk/ocf/src/engine/engine_discard.c
new file mode 100644
index 000000000..81c511220
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_discard.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cache_engine.h"
+#include "engine_common.h"
+#include "engine_discard.h"
+#include "../metadata/metadata.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG 0
+
+#define OCF_ENGINE_DEBUG_IO_NAME "discard"
+#include "engine_debug.h"
+
+static int _ocf_discard_step_do(struct ocf_request *req);
+static int _ocf_discard_step(struct ocf_request *req);
+static int _ocf_discard_flush_cache(struct ocf_request *req);
+static int _ocf_discard_core(struct ocf_request *req);
+
+static const struct ocf_io_if _io_if_discard_step = {
+ .read = _ocf_discard_step,
+ .write = _ocf_discard_step,
+};
+
+static const struct ocf_io_if _io_if_discard_step_resume = {
+ .read = _ocf_discard_step_do,
+ .write = _ocf_discard_step_do,
+};
+
+static const struct ocf_io_if _io_if_discard_flush_cache = {
+ .read = _ocf_discard_flush_cache,
+ .write = _ocf_discard_flush_cache,
+};
+
+static const struct ocf_io_if _io_if_discard_core = {
+ .read = _ocf_discard_core,
+ .write = _ocf_discard_core,
+};
+
+static void _ocf_discard_complete_req(struct ocf_request *req, int error)
+{
+ req->complete(req, error);
+
+ ocf_req_put(req);
+}
+static void _ocf_discard_core_complete(struct ocf_io *io, int error)
+{
+ struct ocf_request *req = io->priv1;
+
+ OCF_DEBUG_RQ(req, "Core DISCARD Completion");
+
+ _ocf_discard_complete_req(req, error);
+
+ ocf_io_put(io);
+}
+
+static int _ocf_discard_core(struct ocf_request *req)
+{
+ struct ocf_io *io;
+ int err;
+
+ io = ocf_volume_new_io(&req->core->volume, req->io_queue,
+ SECTORS_TO_BYTES(req->discard.sector),
+ SECTORS_TO_BYTES(req->discard.nr_sects),
+ OCF_WRITE, 0, 0);
+ if (!io) {
+ _ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ ocf_io_set_cmpl(io, req, NULL, _ocf_discard_core_complete);
+ err = ocf_io_set_data(io, req->data, 0);
+ if (err) {
+ _ocf_discard_core_complete(io, err);
+ return err;
+ }
+
+ ocf_volume_submit_discard(io);
+
+ return 0;
+}
+
+static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
+{
+ struct ocf_request *req = io->priv1;
+
+ if (error) {
+ ocf_metadata_error(req->cache);
+ _ocf_discard_complete_req(req, error);
+ ocf_io_put(io);
+ return;
+ }
+
+ req->io_if = &_io_if_discard_core;
+ ocf_engine_push_req_front(req, true);
+
+ ocf_io_put(io);
+}
+
+static int _ocf_discard_flush_cache(struct ocf_request *req)
+{
+ struct ocf_io *io;
+
+ io = ocf_volume_new_io(&req->cache->device->volume, req->io_queue,
+ 0, 0, OCF_WRITE, 0, 0);
+ if (!io) {
+ ocf_metadata_error(req->cache);
+ _ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ ocf_io_set_cmpl(io, req, NULL, _ocf_discard_cache_flush_complete);
+
+ ocf_volume_submit_flush(io);
+
+ return 0;
+}
+
+static void _ocf_discard_finish_step(struct ocf_request *req)
+{
+ req->discard.handled += BYTES_TO_SECTORS(req->byte_length);
+
+ if (req->discard.handled < req->discard.nr_sects)
+ req->io_if = &_io_if_discard_step;
+ else if (req->cache->device->init_mode != ocf_init_mode_metadata_volatile)
+ req->io_if = &_io_if_discard_flush_cache;
+ else
+ req->io_if = &_io_if_discard_core;
+
+ ocf_engine_push_req_front(req, true);
+}
+
+static void _ocf_discard_step_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ /* Release WRITE lock of request */
+ ocf_req_unlock_wr(req);
+
+ if (req->error) {
+ ocf_metadata_error(req->cache);
+ _ocf_discard_complete_req(req, req->error);
+ return;
+ }
+
+ _ocf_discard_finish_step(req);
+}
+
+int _ocf_discard_step_do(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ env_atomic_set(&req->req_remaining, 1); /* One core IO */
+
+ if (ocf_engine_mapped_count(req)) {
+ /* There are mapped cache line, need to remove them */
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Remove mapped cache lines from metadata */
+ ocf_purge_map_info(req);
+
+ if (req->info.flush_metadata) {
+ /* Request was dirty and need to flush metadata */
+ ocf_metadata_flush_do_asynch(cache, req,
+ _ocf_discard_step_complete);
+ }
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ ocf_req_hash_lock_rd(req);
+
+ /* Even if no cachelines are mapped they could be tracked in promotion
+ * policy. RD lock suffices. */
+ ocf_promotion_req_purge(req->cache->promotion_policy, req);
+
+ ocf_req_hash_unlock_rd(req);
+
+ OCF_DEBUG_RQ(req, "Discard");
+ _ocf_discard_step_complete(req, 0);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static void _ocf_discard_on_resume(struct ocf_request *req)
+{
+ OCF_DEBUG_RQ(req, "On resume");
+ ocf_engine_push_req_front(req, true);
+}
+
+static int _ocf_discard_step(struct ocf_request *req)
+{
+ int lock;
+ struct ocf_cache *cache = req->cache;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ req->byte_position = SECTORS_TO_BYTES(req->discard.sector +
+ req->discard.handled);
+ req->byte_length = OCF_MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
+ req->discard.handled), MAX_TRIM_RQ_SIZE);
+ req->core_line_first = ocf_bytes_2_lines(cache, req->byte_position);
+ req->core_line_last =
+ ocf_bytes_2_lines(cache, req->byte_position + req->byte_length - 1);
+ req->core_line_count = req->core_line_last - req->core_line_first + 1;
+ req->io_if = &_io_if_discard_step_resume;
+
+ ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count,
+ 0));
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req);
+
+ /* Travers to check if request is mapped fully */
+ ocf_engine_traverse(req);
+
+ if (ocf_engine_mapped_count(req)) {
+ /* Some cache line are mapped, lock request for WRITE access */
+ lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
+ } else {
+ lock = OCF_LOCK_ACQUIRED;
+ }
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (lock >= 0) {
+ if (OCF_LOCK_ACQUIRED == lock) {
+ _ocf_discard_step_do(req);
+ } else {
+ /* WR lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK")
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->error |= lock;
+ _ocf_discard_finish_step(req);
+ }
+
+ env_cond_resched();
+
+ return 0;
+}
+
+int ocf_discard(struct ocf_request *req)
+{
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_io_start(&req->ioi.io);
+
+ if (req->rw == OCF_READ) {
+ req->complete(req, -OCF_ERR_INVAL);
+ ocf_req_put(req);
+ return 0;
+ }
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ _ocf_discard_step(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_discard.h b/src/spdk/ocf/src/engine/engine_discard.h
new file mode 100644
index 000000000..bca81b8d4
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_discard.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __ENGINE_DISCARD_H__
+#define __ENGINE_DISCARD_H__
+
+int ocf_discard(struct ocf_request *req);
+
+#endif
diff --git a/src/spdk/ocf/src/engine/engine_fast.c b/src/spdk/ocf/src/engine/engine_fast.c
new file mode 100644
index 000000000..8cfe96c43
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_fast.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_fast.h"
+#include "engine_common.h"
+#include "engine_pt.h"
+#include "engine_wb.h"
+#include "../ocf_request.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_io.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../metadata/metadata.h"
+
+#define OCF_ENGINE_DEBUG 0
+
+#define OCF_ENGINE_DEBUG_IO_NAME "fast"
+#include "engine_debug.h"
+
+/* _____ _ ______ _ _____ _ _
+ * | __ \ | | | ____| | | | __ \ | | | |
+ * | |__) |___ __ _ __| | | |__ __ _ ___| |_ | |__) |_ _| |_| |__
+ * | _ // _ \/ _` |/ _` | | __/ _` / __| __| | ___/ _` | __| '_ \
+ * | | \ \ __/ (_| | (_| | | | | (_| \__ \ |_ | | | (_| | |_| | | |
+ * |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
+ */
+
+static void _ocf_read_fast_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (env_atomic_dec_return(&req->req_remaining)) {
+ /* Not all requests finished */
+ return;
+ }
+
+ OCF_DEBUG_RQ(req, "HIT completion");
+
+ if (req->error) {
+ OCF_DEBUG_RQ(req, "ERROR");
+
+ ocf_core_stats_cache_error_update(req->core, OCF_READ);
+ ocf_engine_push_req_front_pt(req);
+ } else {
+ ocf_req_unlock(req);
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ /* Free the request at the last point of the completion path */
+ ocf_req_put(req);
+ }
+}
+
+static int _ocf_read_fast_do(struct ocf_request *req)
+{
+ if (ocf_engine_is_miss(req)) {
+ /* It seams that after resume, now request is MISS, do PT */
+ OCF_DEBUG_RQ(req, "Switching to read PT");
+ ocf_read_pt_do(req);
+ return 0;
+
+ }
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ if (req->info.re_part) {
+ OCF_DEBUG_RQ(req, "Re-Part");
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Probably some cache lines are assigned into wrong
+ * partition. Need to move it to new one
+ */
+ ocf_part_move(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ /* Submit IO */
+ OCF_DEBUG_RQ(req, "Submit");
+ env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
+ ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
+ ocf_engine_io_count(req), _ocf_read_fast_complete);
+
+
+ /* Update statistics */
+ ocf_engine_update_request_stats(req);
+ ocf_engine_update_block_stats(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_read_fast_resume = {
+ .read = _ocf_read_fast_do,
+ .write = _ocf_read_fast_do,
+};
+
+int ocf_read_fast(struct ocf_request *req)
+{
+ bool hit;
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_read_fast_resume;
+
+ /*- Metadata RD access -----------------------------------------------*/
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req);
+
+ /* Traverse request to cache if there is hit */
+ ocf_engine_traverse(req);
+
+ hit = ocf_engine_is_hit(req);
+ if (hit) {
+ ocf_io_start(&req->ioi.io);
+ lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
+ }
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (hit) {
+ OCF_DEBUG_RQ(req, "Fast path success");
+
+ if (lock >= 0) {
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* Lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ /* Lock was acquired can perform IO */
+ _ocf_read_fast_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR");
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "Fast path failure");
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ if (hit)
+ return OCF_FAST_PATH_YES;
+ else
+ return OCF_FAST_PATH_NO;
+}
+
+/* __ __ _ _ ______ _ _____ _ _
+ * \ \ / / (_) | | ____| | | | __ \ | | | |
+ * \ \ /\ / / __ _| |_ ___ | |__ __ _ ___| |_ | |__) |_ _| |_| |__
+ * \ \/ \/ / '__| | __/ _ \ | __/ _` / __| __| | ___/ _` | __| '_ \
+ * \ /\ /| | | | || __/ | | | (_| \__ \ |_ | | | (_| | |_| | | |
+ * \/ \/ |_| |_|\__\___| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
+ */
+
+static const struct ocf_io_if _io_if_write_fast_resume = {
+ .read = ocf_write_wb_do,
+ .write = ocf_write_wb_do,
+};
+
+int ocf_write_fast(struct ocf_request *req)
+{
+ bool mapped;
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_write_fast_resume;
+
+ /*- Metadata RD access -----------------------------------------------*/
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req);
+
+ /* Traverse request to cache if there is hit */
+ ocf_engine_traverse(req);
+
+ mapped = ocf_engine_is_mapped(req);
+ if (mapped) {
+ ocf_io_start(&req->ioi.io);
+ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
+ }
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (mapped) {
+ if (lock >= 0) {
+ OCF_DEBUG_RQ(req, "Fast path success");
+
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* Lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ /* Lock was acquired can perform IO */
+ ocf_write_wb_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "Fast path lock failure");
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "Fast path failure");
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
+
+}
diff --git a/src/spdk/ocf/src/engine/engine_fast.h b/src/spdk/ocf/src/engine/engine_fast.h
new file mode 100644
index 000000000..3e8023b2e
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_fast.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_FAST_H_
+#define ENGINE_FAST_H_
+
+int ocf_read_fast(struct ocf_request *req);
+int ocf_write_fast(struct ocf_request *req);
+
+#endif /* ENGINE_WI_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_inv.c b/src/spdk/ocf/src/engine/engine_inv.c
new file mode 100644
index 000000000..4824fb0dc
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_inv.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_inv.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+#include "../metadata/metadata.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "inv"
+#include "engine_debug.h"
+
+static void _ocf_invalidate_req(struct ocf_request *req, int error)
+{
+ if (error) {
+ req->error = error;
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error)
+ ocf_engine_error(req, true, "Failed to flush metadata to cache");
+
+ ocf_req_unlock(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+}
+
+static int _ocf_invalidate_do(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ ENV_BUG_ON(env_atomic_read(&req->req_remaining));
+
+ ocf_req_hash_lock_wr(req);
+ ocf_purge_map_info(req);
+ ocf_req_hash_unlock_wr(req);
+
+ env_atomic_inc(&req->req_remaining);
+
+ if (ocf_volume_is_atomic(&cache->device->volume) &&
+ req->info.flush_metadata) {
+ /* Metadata flush IO */
+ ocf_metadata_flush_do_asynch(cache, req, _ocf_invalidate_req);
+ }
+
+ _ocf_invalidate_req(req, 0);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_invalidate = {
+ .read = _ocf_invalidate_do,
+ .write = _ocf_invalidate_do,
+};
+
+void ocf_engine_invalidate(struct ocf_request *req)
+{
+ ocf_engine_push_req_front_if(req, &_io_if_invalidate, true);
+}
diff --git a/src/spdk/ocf/src/engine/engine_inv.h b/src/spdk/ocf/src/engine/engine_inv.h
new file mode 100644
index 000000000..43b98c35d
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_inv.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_INV_H_
+#define ENGINE_INV_H_
+
+void ocf_engine_invalidate(struct ocf_request *req);
+
+#endif /* ENGINE_INV_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_ops.c b/src/spdk/ocf/src/engine/engine_ops.c
new file mode 100644
index 000000000..a56b24252
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_ops.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "engine_ops.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "ops"
+#include "engine_debug.h"
+
+static void _ocf_engine_ops_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ /* An error occured */
+ ocf_engine_error(req, false, "Core operation failure");
+ }
+
+ /* Complete requests - both to cache and to core*/
+ req->complete(req, req->error);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+}
+
+int ocf_engine_ops(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* IO to the core device and to the cache device */
+ env_atomic_set(&req->req_remaining, 2);
+
+ /* Submit operation into core device */
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_engine_ops_complete);
+
+ ocf_submit_cache_reqs(cache, req, req->rw, 0, req->byte_length,
+ 1, _ocf_engine_ops_complete);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+
diff --git a/src/spdk/ocf/src/engine/engine_ops.h b/src/spdk/ocf/src/engine/engine_ops.h
new file mode 100644
index 000000000..4d633dcdc
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_ops.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __CACHE_ENGINE_OPS_H_
+#define __CACHE_ENGINE_OPS_H_
+
+int ocf_engine_ops(struct ocf_request *req);
+
+#endif /* __CACHE_ENGINE_OPS_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_pt.c b/src/spdk/ocf/src/engine/engine_pt.c
new file mode 100644
index 000000000..112319a66
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_pt.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_pt.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_part.h"
+#include "../metadata/metadata.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "pt"
+#include "engine_debug.h"
+
+static void _ocf_read_pt_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_READ);
+ }
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ ocf_req_unlock_rd(req);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+}
+
+static inline void _ocf_read_pt_submit(struct ocf_request *req)
+{
+ env_atomic_set(&req->req_remaining, 1); /* Core device IO */
+
+ OCF_DEBUG_RQ(req, "Submit");
+
+ /* Core read */
+ ocf_submit_volume_req(&req->core->volume, req, _ocf_read_pt_complete);
+}
+
+int ocf_read_pt_do(struct ocf_request *req)
+{
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ if (req->info.dirty_any) {
+ ocf_req_hash_lock_rd(req);
+ /* Need to clean, start it */
+ ocf_engine_clean(req);
+ ocf_req_hash_unlock_rd(req);
+
+ /* Do not processing, because first we need to clean request */
+ ocf_req_put(req);
+
+ return 0;
+ }
+
+ if (req->info.re_part) {
+ OCF_DEBUG_RQ(req, "Re-Part");
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Probably some cache lines are assigned into wrong
+ * partition. Need to move it to new one
+ */
+ ocf_part_move(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ /* Submit read IO to the core */
+ _ocf_read_pt_submit(req);
+
+ /* Update statistics */
+ ocf_engine_update_block_stats(req);
+ ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
+ req->info.hit_no, req->core_line_count);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_pt_resume = {
+ .read = ocf_read_pt_do,
+ .write = ocf_read_pt_do,
+};
+
+int ocf_read_pt(struct ocf_request *req)
+{
+ bool use_cache = false;
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_pt_resume;
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req);
+
+ /* Traverse request to check if there are mapped cache lines */
+ ocf_engine_traverse(req);
+
+ if (req->seq_cutoff && ocf_engine_is_dirty_all(req)) {
+ use_cache = true;
+ } else {
+ if (ocf_engine_mapped_count(req)) {
+ /* There are mapped cache line,
+ * lock request for READ access
+ */
+ lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
+ } else {
+ /* No mapped cache lines, no need to get lock */
+ lock = OCF_LOCK_ACQUIRED;
+ }
+ }
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (use_cache) {
+ /*
+ * There is dirt HIT, and sequential cut off,
+ * because of this force read data from cache
+ */
+ ocf_req_clear(req);
+ ocf_get_io_if(ocf_cache_mode_wt)->read(req);
+ } else {
+ if (lock >= 0) {
+ if (lock == OCF_LOCK_ACQUIRED) {
+ /* Lock acquired perform read off operations */
+ ocf_read_pt_do(req);
+ } else {
+ /* WR lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+void ocf_engine_push_req_front_pt(struct ocf_request *req)
+{
+ ocf_engine_push_req_front_if(req, &_io_if_pt_resume, true);
+}
+
diff --git a/src/spdk/ocf/src/engine/engine_pt.h b/src/spdk/ocf/src/engine/engine_pt.h
new file mode 100644
index 000000000..963957c45
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_pt.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_OFF_H_
+#define ENGINE_OFF_H_
+
+int ocf_read_pt(struct ocf_request *req);
+
+int ocf_read_pt_do(struct ocf_request *req);
+
+void ocf_engine_push_req_front_pt(struct ocf_request *req);
+
+#endif /* ENGINE_OFF_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_rd.c b/src/spdk/ocf/src/engine/engine_rd.c
new file mode 100644
index 000000000..74c000f58
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_rd.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_rd.h"
+#include "engine_pt.h"
+#include "engine_inv.h"
+#include "engine_bf.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../utils/utils_io.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_part.h"
+#include "../metadata/metadata.h"
+#include "../ocf_def_priv.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "rd"
+#include "engine_debug.h"
+
+static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (req->error)
+ inc_fallback_pt_error_counter(req->cache);
+
+ /* Handle callback-caller race to let only one of the two complete the
+ * request. Also, complete original request only if this is the last
+ * sub-request to complete
+ */
+ if (env_atomic_dec_return(&req->req_remaining) == 0) {
+ OCF_DEBUG_RQ(req, "HIT completion");
+
+ if (req->error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_READ);
+ ocf_engine_push_req_front_pt(req);
+ } else {
+
+ ocf_req_unlock(req);
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ /* Free the request at the last point
+ * of the completion path
+ */
+ ocf_req_put(req);
+ }
+ }
+}
+
+static void _ocf_read_generic_miss_complete(struct ocf_request *req, int error)
+{
+ struct ocf_cache *cache = req->cache;
+
+ if (error)
+ req->error = error;
+
+ /* Handle callback-caller race to let only one of the two complete the
+ * request. Also, complete original request only if this is the last
+ * sub-request to complete
+ */
+ if (env_atomic_dec_return(&req->req_remaining) == 0) {
+ OCF_DEBUG_RQ(req, "MISS completion");
+
+ if (req->error) {
+ /*
+ * --- Do not submit this request to write-back-thread.
+ * Stop it here ---
+ */
+ req->complete(req, req->error);
+
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_READ);
+
+ ctx_data_free(cache->owner, req->cp_data);
+ req->cp_data = NULL;
+
+ /* Invalidate metadata */
+ ocf_engine_invalidate(req);
+
+ return;
+ }
+
+ /* Copy pages to copy vec, since this is the one needed
+ * by the above layer
+ */
+ ctx_data_cpy(cache->owner, req->cp_data, req->data, 0, 0,
+ req->byte_length);
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ ocf_engine_backfill(req);
+ }
+}
+
+void ocf_read_generic_submit_hit(struct ocf_request *req)
+{
+ env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
+
+ ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
+ ocf_engine_io_count(req), _ocf_read_generic_hit_complete);
+}
+
+static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+ int ret;
+
+ env_atomic_set(&req->req_remaining, 1);
+
+ req->cp_data = ctx_data_alloc(cache->owner,
+ BYTES_TO_PAGES(req->byte_length));
+ if (!req->cp_data)
+ goto err_alloc;
+
+ ret = ctx_data_mlock(cache->owner, req->cp_data);
+ if (ret)
+ goto err_alloc;
+
+ /* Submit read request to core device. */
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_read_generic_miss_complete);
+
+ return;
+
+err_alloc:
+ _ocf_read_generic_miss_complete(req, -OCF_ERR_NO_MEM);
+}
+
+static int _ocf_read_generic_do(struct ocf_request *req)
+{
+ if (ocf_engine_is_miss(req) && req->map->rd_locked) {
+ /* Miss can be handled only on write locks.
+ * Need to switch to PT
+ */
+ OCF_DEBUG_RQ(req, "Switching to PT");
+ ocf_read_pt_do(req);
+ return 0;
+ }
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ if (ocf_engine_is_miss(req)) {
+ if (req->info.dirty_any) {
+ ocf_req_hash_lock_rd(req);
+
+ /* Request is dirty need to clean request */
+ ocf_engine_clean(req);
+
+ ocf_req_hash_unlock_rd(req);
+
+ /* We need to clean request before processing, return */
+ ocf_req_put(req);
+
+ return 0;
+ }
+
+ ocf_req_hash_lock_rd(req);
+
+ /* Set valid status bits map */
+ ocf_set_valid_map_info(req);
+
+ ocf_req_hash_unlock_rd(req);
+ }
+
+ if (req->info.re_part) {
+ OCF_DEBUG_RQ(req, "Re-Part");
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Probably some cache lines are assigned into wrong
+ * partition. Need to move it to new one
+ */
+ ocf_part_move(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ OCF_DEBUG_RQ(req, "Submit");
+
+ /* Submit IO */
+ if (ocf_engine_is_hit(req))
+ ocf_read_generic_submit_hit(req);
+ else
+ _ocf_read_generic_submit_miss(req);
+
+ /* Update statistics */
+ ocf_engine_update_request_stats(req);
+ ocf_engine_update_block_stats(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_read_generic_resume = {
+ .read = _ocf_read_generic_do,
+ .write = _ocf_read_generic_do,
+};
+
+static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req)
+{
+ if (ocf_engine_is_hit(req))
+ return ocf_engine_lock_read;
+ else
+ return ocf_engine_lock_write;
+}
+
+static const struct ocf_engine_callbacks _rd_engine_callbacks =
+{
+ .get_lock_type = ocf_rd_get_lock_type,
+ .resume = ocf_engine_on_resume,
+};
+
+int ocf_read_generic(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+ struct ocf_cache *cache = req->cache;
+
+ ocf_io_start(&req->ioi.io);
+
+ if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
+ /* There are conditions to bypass IO */
+ ocf_get_io_if(ocf_cache_mode_pt)->read(req);
+ return 0;
+ }
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume call backs */
+ req->io_if = &_io_if_read_generic_resume;
+
+ lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
+
+ if (!req->info.mapping_error) {
+ if (lock >= 0) {
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* Lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ /* Lock was acquired can perform IO */
+ _ocf_read_generic_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ } else {
+ ocf_req_clear(req);
+ ocf_get_io_if(ocf_cache_mode_pt)->read(req);
+ }
+
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_rd.h b/src/spdk/ocf/src/engine/engine_rd.h
new file mode 100644
index 000000000..56373b59d
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_rd.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_RD_H_
+#define ENGINE_RD_H_
+
+int ocf_read_generic(struct ocf_request *req);
+
+void ocf_read_generic_submit_hit(struct ocf_request *req);
+
+#endif /* ENGINE_RD_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_wa.c b/src/spdk/ocf/src/engine/engine_wa.c
new file mode 100644
index 000000000..f5face017
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wa.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_wa.h"
+#include "engine_common.h"
+#include "cache_engine.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../metadata/metadata.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "wa"
+#include "engine_debug.h"
+
+static void _ocf_read_wa_complete(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error |= error;
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ if (req->error) {
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_WRITE);
+ }
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ /* Release OCF request */
+ ocf_req_put(req);
+}
+
+int ocf_write_wa(struct ocf_request *req)
+{
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ ocf_req_hash(req);
+
+ ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
+
+ /* Traverse request to check if there are mapped cache lines */
+ ocf_engine_traverse(req);
+
+ ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
+
+ if (ocf_engine_is_hit(req)) {
+ ocf_req_clear(req);
+
+ /* There is HIT, do WT */
+ ocf_get_io_if(ocf_cache_mode_wt)->write(req);
+
+ } else if (ocf_engine_mapped_count(req)) {
+ ocf_req_clear(req);
+
+ /* Partial MISS, do WI */
+ ocf_get_io_if(ocf_cache_mode_wi)->write(req);
+ } else {
+
+ /* There is no mapped cache line, write directly into core */
+
+ OCF_DEBUG_RQ(req, "Submit");
+
+ /* Submit write IO to the core */
+ env_atomic_set(&req->req_remaining, 1);
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_read_wa_complete);
+
+ /* Update statistics */
+ ocf_engine_update_block_stats(req);
+ ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
+ req->info.hit_no, req->core_line_count);
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+
diff --git a/src/spdk/ocf/src/engine/engine_wa.h b/src/spdk/ocf/src/engine/engine_wa.h
new file mode 100644
index 000000000..fde5ef9e6
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wa.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_WA_H_
+#define ENGINE_WA_H_
+
+int ocf_write_wa(struct ocf_request *req);
+
+#endif /* ENGINE_WA_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_wb.c b/src/spdk/ocf/src/engine/engine_wb.c
new file mode 100644
index 000000000..dc20fbc26
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wb.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cache_engine.h"
+#include "engine_common.h"
+#include "engine_wb.h"
+#include "engine_inv.h"
+#include "../metadata/metadata.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_part.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "wb"
+#include "engine_debug.h"
+
+static const struct ocf_io_if _io_if_wb_resume = {
+ .read = ocf_write_wb_do,
+ .write = ocf_write_wb_do,
+};
+
+static void _ocf_write_wb_update_bits(struct ocf_request *req)
+{
+ if (ocf_engine_is_miss(req)) {
+ ocf_req_hash_lock_rd(req);
+ /* Update valid status bits */
+ ocf_set_valid_map_info(req);
+
+ ocf_req_hash_unlock_rd(req);
+ }
+
+ if (!ocf_engine_is_dirty_all(req)) {
+ ocf_req_hash_lock_wr(req);
+
+ /* set dirty bits, and mark if metadata flushing is required */
+ ocf_set_dirty_map_info(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+}
+
+static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
+{
+ if (error)
+ req->error = error;
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ if (req->error)
+ ocf_engine_error(req, true, "Failed to write data to cache");
+
+ ocf_req_unlock_wr(req);
+
+ req->complete(req, req->error);
+
+ ocf_req_put(req);
+}
+
+static int ocf_write_wb_do_flush_metadata(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ env_atomic_set(&req->req_remaining, 1); /* One core IO */
+
+ if (req->info.flush_metadata) {
+ OCF_DEBUG_RQ(req, "Flush metadata");
+ ocf_metadata_flush_do_asynch(cache, req,
+ _ocf_write_wb_io_flush_metadata);
+ }
+
+ _ocf_write_wb_io_flush_metadata(req, 0);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_wb_flush_metadata = {
+ .read = ocf_write_wb_do_flush_metadata,
+ .write = ocf_write_wb_do_flush_metadata,
+};
+
+static void _ocf_write_wb_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+ req->error |= error;
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ ocf_engine_error(req, true, "Failed to write data to cache");
+
+ req->complete(req, req->error);
+
+ ocf_engine_invalidate(req);
+ } else {
+ ocf_engine_push_req_front_if(req, &_io_if_wb_flush_metadata,
+ true);
+ }
+}
+
+
+static inline void _ocf_write_wb_submit(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
+
+ /*
+ * 1. Submit data
+ * 2. Wait for completion of data
+ * 3. Then continue processing request (flush metadata)
+ */
+
+ if (req->info.re_part) {
+ OCF_DEBUG_RQ(req, "Re-Part");
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Probably some cache lines are assigned into wrong
+ * partition. Need to move it to new one
+ */
+ ocf_part_move(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ OCF_DEBUG_RQ(req, "Submit Data");
+
+ /* Data IO */
+ ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
+ ocf_engine_io_count(req), _ocf_write_wb_complete);
+}
+
+int ocf_write_wb_do(struct ocf_request *req)
+{
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Update status bits */
+ _ocf_write_wb_update_bits(req);
+
+ /* Submit IO */
+ _ocf_write_wb_submit(req);
+
+ /* Update statistics */
+ ocf_engine_update_request_stats(req);
+ ocf_engine_update_block_stats(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static enum ocf_engine_lock_type ocf_wb_get_lock_type(struct ocf_request *req)
+{
+ return ocf_engine_lock_write;
+}
+
+static const struct ocf_engine_callbacks _wb_engine_callbacks =
+{
+ .get_lock_type = ocf_wb_get_lock_type,
+ .resume = ocf_engine_on_resume,
+};
+
+int ocf_write_wb(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Not sure if we need this. */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_wb_resume;
+
+ /* TODO: Handle fits into dirty */
+
+ lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks);
+
+ if (!req->info.mapping_error) {
+ if (lock >= 0) {
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* WR lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ ocf_write_wb_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ } else {
+ ocf_req_clear(req);
+ ocf_get_io_if(ocf_cache_mode_pt)->write(req);
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_wb.h b/src/spdk/ocf/src/engine/engine_wb.h
new file mode 100644
index 000000000..b3fdc356b
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wb.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef ENGINE_WB_H_
+#define ENGINE_WB_H_
+
+int ocf_write_wb(struct ocf_request *req);
+
+int ocf_write_wb_do(struct ocf_request *req);
+
+#endif /* ENGINE_WI_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_wi.c b/src/spdk/ocf/src/engine/engine_wi.c
new file mode 100644
index 000000000..ff947e43b
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wi.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_wi.h"
+#include "engine_common.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../metadata/metadata.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "wi"
+#include "engine_debug.h"
+
+static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req);
+
+static const struct ocf_io_if _io_if_wi_flush_metadata = {
+ .read = ocf_write_wi_update_and_flush_metadata,
+ .write = ocf_write_wi_update_and_flush_metadata,
+};
+
+static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
+{
+ if (error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+ req->error |= error;
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ if (req->error)
+ ocf_engine_error(req, true, "Failed to write data to cache");
+
+ ocf_req_unlock_wr(req);
+
+ req->complete(req, req->error);
+
+ ocf_req_put(req);
+}
+
+static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ env_atomic_set(&req->req_remaining, 1); /* One core IO */
+
+ if (ocf_engine_mapped_count(req)) {
+ /* There are mapped cache line, need to remove them */
+
+ ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
+
+ /* Remove mapped cache lines from metadata */
+ ocf_purge_map_info(req);
+
+ ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
+
+ if (req->info.flush_metadata) {
+ /* Request was dirty and need to flush metadata */
+ ocf_metadata_flush_do_asynch(cache, req,
+ _ocf_write_wi_io_flush_metadata);
+ }
+
+ }
+
+ _ocf_write_wi_io_flush_metadata(req, 0);
+
+ return 0;
+}
+
+static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ req->error = error;
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_WRITE);
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ ocf_req_unlock_wr(req);
+
+ req->complete(req, req->error);
+
+ ocf_req_put(req);
+ } else {
+ ocf_engine_push_req_front_if(req, &_io_if_wi_flush_metadata,
+ true);
+ }
+}
+
+static int _ocf_write_wi_do(struct ocf_request *req)
+{
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ env_atomic_set(&req->req_remaining, 1); /* One core IO */
+
+ OCF_DEBUG_RQ(req, "Submit");
+
+ /* Submit write IO to the core */
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_write_wi_core_complete);
+
+ /* Update statistics */
+ ocf_engine_update_block_stats(req);
+ ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
+ req->info.hit_no, req->core_line_count);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static void _ocf_write_wi_on_resume(struct ocf_request *req)
+{
+ OCF_DEBUG_RQ(req, "On resume");
+ ocf_engine_push_req_front(req, true);
+}
+
+static const struct ocf_io_if _io_if_wi_resume = {
+ .read = _ocf_write_wi_do,
+ .write = _ocf_write_wi_do,
+};
+
+int ocf_write_wi(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_wi_resume;
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
+
+ /* Travers to check if request is mapped fully */
+ ocf_engine_traverse(req);
+
+ if (ocf_engine_mapped_count(req)) {
+ /* Some cache line are mapped, lock request for WRITE access */
+ lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
+ } else {
+ lock = OCF_LOCK_ACQUIRED;
+ }
+
+ ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/
+
+ if (lock >= 0) {
+ if (lock == OCF_LOCK_ACQUIRED) {
+ _ocf_write_wi_do(req);
+ } else {
+ /* WR lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_wi.h b/src/spdk/ocf/src/engine/engine_wi.h
new file mode 100644
index 000000000..5965ee039
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wi.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_WI_H_
+#define ENGINE_WI_H_
+
+int ocf_write_wi(struct ocf_request *req);
+
+#endif /* ENGINE_WI_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_wo.c b/src/spdk/ocf/src/engine/engine_wo.c
new file mode 100644
index 000000000..193198dc4
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wo.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cache_engine.h"
+#include "engine_common.h"
+#include "engine_rd.h"
+#include "engine_pt.h"
+#include "../metadata/metadata.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_part.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "wo"
+#include "engine_debug.h"
+
+static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_READ);
+ req->error |= error;
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error)
+ ocf_engine_error(req, true, "Failed to read data from cache");
+
+ ocf_req_unlock_rd(req);
+
+ /* Complete request */
+ req->complete(req, req->error);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+}
+
+static void ocf_read_wo_cache_io(struct ocf_request *req, uint64_t offset,
+ uint64_t size)
+{
+ OCF_DEBUG_RQ(req, "Submit cache");
+ env_atomic_inc(&req->req_remaining);
+ ocf_submit_cache_reqs(req->cache, req, OCF_READ, offset, size, 1,
+ ocf_read_wo_cache_complete);
+}
+
+static int ocf_read_wo_cache_do(struct ocf_request *req)
+{
+ ocf_cache_t cache = req->cache;
+ uint32_t s, e, i;
+ uint64_t line;
+ struct ocf_map_info *entry;
+ bool dirty = false;
+ bool io = false;
+ uint64_t phys_prev, phys_curr = 0;
+ uint64_t io_start = 0;
+ uint64_t offset = 0;
+ uint64_t increment = 0;
+
+ env_atomic_set(&req->req_remaining, 1);
+
+ for (line = 0; line < req->core_line_count; ++line) {
+ entry = &req->map[line];
+ s = ocf_map_line_start_sector(req, line);
+ e = ocf_map_line_end_sector(req, line);
+
+ /* if cacheline mapping is not sequential, send cache IO to
+ * previous cacheline(s) */
+ phys_prev = phys_curr;
+ if (entry->status != LOOKUP_MISS)
+ phys_curr = ocf_metadata_map_lg2phy(cache,
+ entry->coll_idx);
+ if (io && phys_prev + 1 != phys_curr) {
+ ocf_read_wo_cache_io(req, io_start, offset - io_start);
+ io = false;
+ }
+
+ /* try to seek directly to the last sector */
+ if (entry->status == LOOKUP_MISS ||
+ ocf_engine_map_all_sec_clean(req, line)) {
+ /* all sectors invalid or clean */
+ i = e + 1;
+ increment = SECTORS_TO_BYTES(e - s + 1);
+ dirty = false;
+ }
+ else if (ocf_engine_map_all_sec_dirty(req, line)) {
+ /* all sectors dirty */
+ i = e + 1;
+ increment = SECTORS_TO_BYTES(e - s + 1);
+ dirty = true;
+ } else {
+ /* need to iterate through CL sector by sector */
+ i = s;
+ }
+
+ do {
+ if (i <= e) {
+ dirty = metadata_test_dirty_one(cache,
+ entry->coll_idx, i);
+ increment = 0;
+ do {
+ ++i;
+ increment += SECTORS_TO_BYTES(1);
+ } while (i <= e && metadata_test_dirty_one(
+ cache, entry->coll_idx, i)
+ == dirty);
+ }
+
+ if (io && !dirty) {
+ /* end of sequential dirty region */
+ ocf_read_wo_cache_io(req, io_start,
+ offset - io_start);
+ io = false;
+ }
+
+ if (!io && dirty) {
+ /* beginning of sequential dirty region */
+ io = true;
+ io_start = offset;
+ }
+
+ offset += increment;
+ } while (i <= e);
+ }
+
+ if (io)
+ ocf_read_wo_cache_io(req, io_start, offset - io_start);
+
+ ocf_read_wo_cache_complete(req, 0);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_wo_cache_read = {
+ .read = ocf_read_wo_cache_do,
+ .write = ocf_read_wo_cache_do,
+};
+
+static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ req->error |= error;
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_READ);
+ }
+
+ /* if all mapped cachelines are clean, the data we've read from core
+ * is valid and we can complete the request */
+ if (!req->info.dirty_any || req->error) {
+ OCF_DEBUG_RQ(req, "Completion");
+ req->complete(req, req->error);
+ ocf_req_unlock_rd(req);
+ ocf_req_put(req);
+ return;
+ }
+
+ req->io_if = &_io_if_wo_cache_read;
+ ocf_engine_push_req_front(req, true);
+}
+
+int ocf_read_wo_do(struct ocf_request *req)
+{
+ ocf_req_get(req);
+
+ /* Lack of cacheline repartitioning here is deliberate. WO cache mode
+ * reads should not affect cacheline status as reading data from the
+ * cache is just an internal optimization. Also WO cache mode is
+ * designed to be used with partitioning based on write life-time hints
+ * and read requests do not carry write lifetime hint by definition.
+ */
+
+ if (ocf_engine_is_hit(req)) {
+ /* read hit - just fetch the data from cache */
+ OCF_DEBUG_RQ(req, "Submit cache hit");
+ ocf_read_generic_submit_hit(req);
+ } else {
+
+ OCF_DEBUG_RQ(req, "Submit core");
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_read_wo_core_complete);
+ }
+
+ ocf_engine_update_request_stats(req);
+ ocf_engine_update_block_stats(req);
+
+ ocf_req_put(req);
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_wo_resume = {
+ .read = ocf_read_wo_do,
+ .write = ocf_read_wo_do,
+};
+
+int ocf_read_wo(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_ACQUIRED;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume call backs */
+ req->io_if = &_io_if_wo_resume;
+
+ ocf_req_hash(req);
+ ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
+
+ /* Traverse request to check if there are mapped cache lines */
+ ocf_engine_traverse(req);
+
+ if (ocf_engine_mapped_count(req)) {
+ /* There are mapped cache lines,
+ * lock request for READ access
+ */
+ lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
+ }
+
+ ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
+
+ if (lock >= 0) {
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* Lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ /* Lock was acquired can perform IO */
+ ocf_read_wo_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_wo.h b/src/spdk/ocf/src/engine/engine_wo.h
new file mode 100644
index 000000000..2ae7f2921
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wo.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_WO_H_
+#define ENGINE_WO_H_
+
+int ocf_read_wo(struct ocf_request *req);
+
+#endif /* ENGINE_WO_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_wt.c b/src/spdk/ocf/src/engine/engine_wt.c
new file mode 100644
index 000000000..5adb64b31
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wt.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_wt.h"
+#include "engine_inv.h"
+#include "engine_common.h"
+#include "../ocf_request.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_part.h"
+#include "../metadata/metadata.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "wt"
+#include "engine_debug.h"
+
+static void _ocf_write_wt_req_complete(struct ocf_request *req)
+{
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_RQ(req, "Completion");
+
+ if (req->error) {
+ /* An error occured */
+
+ /* Complete request */
+ req->complete(req, req->info.core_error ? req->error : 0);
+
+ ocf_engine_invalidate(req);
+ } else {
+ /* Unlock reqest from WRITE access */
+ ocf_req_unlock_wr(req);
+
+ /* Complete request */
+ req->complete(req, req->info.core_error ? req->error : 0);
+
+ /* Release OCF request */
+ ocf_req_put(req);
+ }
+}
+
+static void _ocf_write_wt_cache_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ req->error = req->error ?: error;
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+
+ if (req->error)
+ inc_fallback_pt_error_counter(req->cache);
+ }
+
+ _ocf_write_wt_req_complete(req);
+}
+
+static void _ocf_write_wt_core_complete(struct ocf_request *req, int error)
+{
+ if (error) {
+ req->error = error;
+ req->info.core_error = 1;
+ ocf_core_stats_core_error_update(req->core, OCF_WRITE);
+ }
+
+ _ocf_write_wt_req_complete(req);
+}
+
+static inline void _ocf_write_wt_submit(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ /* Submit IOs */
+ OCF_DEBUG_RQ(req, "Submit");
+
+ /* Calculate how many IOs need to be submited */
+ env_atomic_set(&req->req_remaining, ocf_engine_io_count(req)); /* Cache IO */
+ env_atomic_inc(&req->req_remaining); /* Core device IO */
+
+ if (req->info.flush_metadata) {
+ /* Metadata flush IO */
+
+ ocf_metadata_flush_do_asynch(cache, req,
+ _ocf_write_wt_cache_complete);
+ }
+
+ /* To cache */
+ ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
+ ocf_engine_io_count(req), _ocf_write_wt_cache_complete);
+
+ /* To core */
+ ocf_submit_volume_req(&req->core->volume, req,
+ _ocf_write_wt_core_complete);
+}
+
+static void _ocf_write_wt_update_bits(struct ocf_request *req)
+{
+ if (ocf_engine_is_miss(req)) {
+ ocf_req_hash_lock_rd(req);
+
+ /* Update valid status bits */
+ ocf_set_valid_map_info(req);
+
+ ocf_req_hash_unlock_rd(req);
+ }
+
+ if (req->info.dirty_any) {
+ ocf_req_hash_lock_wr(req);
+
+ /* Writes goes to SDD and HDD, need to update status bits from
+ * dirty to clean
+ */
+
+ ocf_set_clean_map_info(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+
+ if (req->info.re_part) {
+ OCF_DEBUG_RQ(req, "Re-Part");
+
+ ocf_req_hash_lock_wr(req);
+
+ /* Probably some cache lines are assigned into wrong
+ * partition. Need to move it to new one
+ */
+ ocf_part_move(req);
+
+ ocf_req_hash_unlock_wr(req);
+ }
+}
+
+static int _ocf_write_wt_do(struct ocf_request *req)
+{
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Update status bits */
+ _ocf_write_wt_update_bits(req);
+
+ /* Submit IO */
+ _ocf_write_wt_submit(req);
+
+ /* Update statistics */
+ ocf_engine_update_request_stats(req);
+ ocf_engine_update_block_stats(req);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_wt_resume = {
+ .read = _ocf_write_wt_do,
+ .write = _ocf_write_wt_do,
+};
+
+static enum ocf_engine_lock_type ocf_wt_get_lock_type(struct ocf_request *req)
+{
+ return ocf_engine_lock_write;
+}
+
+static const struct ocf_engine_callbacks _wt_engine_callbacks =
+{
+ .get_lock_type = ocf_wt_get_lock_type,
+ .resume = ocf_engine_on_resume,
+};
+
+int ocf_write_wt(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ ocf_io_start(&req->ioi.io);
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Set resume io_if */
+ req->io_if = &_io_if_wt_resume;
+
+ lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks);
+
+ if (!req->info.mapping_error) {
+ if (lock >= 0) {
+ if (lock != OCF_LOCK_ACQUIRED) {
+ /* WR lock was not acquired, need to wait for resume */
+ OCF_DEBUG_RQ(req, "NO LOCK");
+ } else {
+ _ocf_write_wt_do(req);
+ }
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d\n", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+ } else {
+ ocf_req_clear(req);
+ ocf_get_io_if(ocf_cache_mode_pt)->write(req);
+ }
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/engine/engine_wt.h b/src/spdk/ocf/src/engine/engine_wt.h
new file mode 100644
index 000000000..ee8593a4b
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_wt.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_WT_H_
+#define ENGINE_WT_H_
+
+int ocf_write_wt(struct ocf_request *req);
+
+#endif /* ENGINE_WT_H_ */
diff --git a/src/spdk/ocf/src/engine/engine_zero.c b/src/spdk/ocf/src/engine/engine_zero.c
new file mode 100644
index 000000000..23f665cb2
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_zero.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "engine_zero.h"
+#include "engine_common.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../metadata/metadata.h"
+
+#define OCF_ENGINE_DEBUG_IO_NAME "zero"
+#include "engine_debug.h"
+
+static int ocf_zero_purge(struct ocf_request *req)
+{
+ if (req->error) {
+ ocf_engine_error(req, true, "Failed to discard data on cache");
+ } else {
+ /* There are mapped cache line, need to remove them */
+
+ ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
+
+ /* Remove mapped cache lines from metadata */
+ ocf_purge_map_info(req);
+
+ ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
+ }
+
+ ocf_req_unlock_wr(req);
+
+ req->complete(req, req->error);
+
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_zero_purge = {
+ .read = ocf_zero_purge,
+ .write = ocf_zero_purge,
+};
+
+static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
+{
+ if (error) {
+ ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
+ req->error = error;
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ ocf_engine_push_req_front_if(req, &_io_if_zero_purge, true);
+}
+
+static inline void ocf_zero_map_info(struct ocf_request *req)
+{
+ uint32_t map_idx = 0;
+ uint8_t start_bit;
+ uint8_t end_bit;
+ struct ocf_map_info *map = req->map;
+ struct ocf_cache *cache = req->cache;
+ uint32_t count = req->core_line_count;
+
+ /* Purge range on the basis of map info
+ *
+ * | 01234567 | 01234567 | ... | 01234567 | 01234567 |
+ * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
+ * | first | Middle | last |
+ */
+
+ for (map_idx = 0; map_idx < count; map_idx++) {
+ if (map[map_idx].status == LOOKUP_MISS)
+ continue;
+
+ start_bit = 0;
+ end_bit = ocf_line_end_sector(cache);
+
+ if (map_idx == 0) {
+ /* First */
+ start_bit = BYTES_TO_SECTORS(req->byte_position)
+ % ocf_line_sectors(cache);
+ }
+
+ if (map_idx == (count - 1)) {
+ /* Last */
+ end_bit = BYTES_TO_SECTORS(req->byte_position +
+ req->byte_length - 1) %
+ ocf_line_sectors(cache);
+ }
+
+ ocf_metadata_flush_mark(cache, req, map_idx, INVALID,
+ start_bit, end_bit);
+ }
+}
+
+static int _ocf_zero_do(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+
+ /* Get OCF request - increase reference counter */
+ ocf_req_get(req);
+
+ /* Mark cache lines for zeroing/discarding */
+ ocf_zero_map_info(req);
+
+ /* Discard marked cache lines */
+ env_atomic_set(&req->req_remaining, 1);
+ if (req->info.flush_metadata) {
+ /* Request was dirty and need to flush metadata */
+ ocf_metadata_flush_do_asynch(cache, req,
+ _ocf_zero_io_flush_metadata);
+ }
+ _ocf_zero_io_flush_metadata(req, 0);
+
+ /* Put OCF request - decrease reference counter */
+ ocf_req_put(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_ocf_zero_do = {
+ .read = _ocf_zero_do,
+ .write = _ocf_zero_do,
+};
+
+/**
+ * @note
+ * - Caller has to have metadata write lock
+ * - Core line has to be mapped
+ */
+void ocf_engine_zero_line(struct ocf_request *req)
+{
+ int lock = OCF_LOCK_NOT_ACQUIRED;
+
+ ENV_BUG_ON(req->core_line_count != 1);
+
+ /* No hash bucket locking here - ocf_engine_zero_line caller must hold
+ * metadata global write lock, so we have exclusive access to all hash
+ * buckets here. */
+
+ /* Traverse to check if request is mapped */
+ ocf_engine_traverse(req);
+
+ ENV_BUG_ON(!ocf_engine_is_mapped(req));
+
+ req->io_if = &_io_if_ocf_zero_do;
+
+ /* Some cache line are mapped, lock request for WRITE access */
+ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
+
+ if (lock >= 0) {
+ ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
+ ocf_engine_push_req_front_if(req, &_io_if_ocf_zero_do, true);
+ } else {
+ OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
+ req->complete(req, lock);
+ ocf_req_put(req);
+ }
+}
+
diff --git a/src/spdk/ocf/src/engine/engine_zero.h b/src/spdk/ocf/src/engine/engine_zero.h
new file mode 100644
index 000000000..202f82359
--- /dev/null
+++ b/src/spdk/ocf/src/engine/engine_zero.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef ENGINE_ZERO_H_
+#define ENGINE_ZERO_H_
+
+void ocf_engine_zero_line(struct ocf_request *req);
+
+#endif /* ENGINE_ZERO_H_ */
diff --git a/src/spdk/ocf/src/eviction/eviction.c b/src/spdk/ocf/src/eviction/eviction.c
new file mode 100644
index 000000000..2d76db4af
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/eviction.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "eviction.h"
+#include "ops.h"
+#include "../utils/utils_part.h"
+
+struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
+ [ocf_eviction_lru] = {
+ .init_cline = evp_lru_init_cline,
+ .rm_cline = evp_lru_rm_cline,
+ .req_clines = evp_lru_req_clines,
+ .hot_cline = evp_lru_hot_cline,
+ .init_evp = evp_lru_init_evp,
+ .dirty_cline = evp_lru_dirty_cline,
+ .clean_cline = evp_lru_clean_cline,
+ .name = "lru",
+ },
+};
+
+static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
+ uint32_t to_evict)
+{
+ if (part->runtime->curr_size <= part->config->min_size) {
+ /*
+ * Cannot evict from this partition because current size
+ * is less than minimum size
+ */
+ return 0;
+ }
+
+ if (to_evict < OCF_TO_EVICTION_MIN)
+ to_evict = OCF_TO_EVICTION_MIN;
+
+ if (to_evict > (part->runtime->curr_size - part->config->min_size))
+ to_evict = part->runtime->curr_size - part->config->min_size;
+
+ return to_evict;
+}
+
+static inline uint32_t ocf_evict_do(ocf_cache_t cache,
+ ocf_queue_t io_queue, const uint32_t evict_cline_no,
+ ocf_part_id_t target_part_id)
+{
+ uint32_t to_evict = 0, evicted = 0;
+ struct ocf_user_part *part;
+ struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
+ ocf_part_id_t part_id;
+
+ /* For each partition from the lowest priority to highest one */
+ for_each_part(cache, part, part_id) {
+
+ if (!ocf_eviction_can_evict(cache))
+ goto out;
+
+ /*
+ * Check stop and continue conditions
+ */
+ if (target_part->config->priority > part->config->priority) {
+ /*
+ * iterate partition have higher priority, do not evict
+ */
+ break;
+ }
+ if (!part->config->flags.eviction) {
+ /* It seams that no more partition for eviction */
+ break;
+ }
+ if (part_id == target_part_id) {
+ /* Omit targeted, evict from different first */
+ continue;
+ }
+ if (evicted >= evict_cline_no) {
+ /* Evicted requested number of cache line, stop */
+ goto out;
+ }
+
+ to_evict = ocf_evict_calculate(part, evict_cline_no);
+ if (to_evict == 0) {
+ /* No cache lines to evict for this partition */
+ continue;
+ }
+
+ evicted += ocf_eviction_need_space(cache, io_queue,
+ part_id, to_evict);
+ }
+
+ if (!ocf_eviction_can_evict(cache))
+ goto out;
+
+ if (evicted < evict_cline_no) {
+ /* Now we can evict form targeted partition */
+ to_evict = ocf_evict_calculate(target_part, evict_cline_no);
+ if (to_evict) {
+ evicted += ocf_eviction_need_space(cache, io_queue,
+ target_part_id, to_evict);
+ }
+ }
+
+out:
+ return evicted;
+}
+
+int space_managment_evict_do(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t evict_cline_no)
+{
+ uint32_t evicted;
+ uint32_t free;
+
+ free = ocf_freelist_num_free(cache->freelist);
+ if (evict_cline_no <= free)
+ return LOOKUP_MAPPED;
+
+ evict_cline_no -= free;
+ evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no,
+ req->part_id);
+
+ if (evict_cline_no <= evicted)
+ return LOOKUP_MAPPED;
+
+ req->info.mapping_error |= true;
+ return LOOKUP_MISS;
+}
diff --git a/src/spdk/ocf/src/eviction/eviction.h b/src/spdk/ocf/src/eviction/eviction.h
new file mode 100644
index 000000000..644452109
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/eviction.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __LAYER_EVICTION_POLICY_H__
+#define __LAYER_EVICTION_POLICY_H__
+
+#include "ocf/ocf.h"
+#include "lru.h"
+#include "lru_structs.h"
+#include "../ocf_request.h"
+
+#define OCF_TO_EVICTION_MIN 128UL
+#define OCF_PENDING_EVICTION_LIMIT 512UL
+
+struct eviction_policy {
+ union {
+ struct lru_eviction_policy lru;
+ } policy;
+};
+
+/* Eviction policy metadata per cache line */
+union eviction_policy_meta {
+ struct lru_eviction_policy_meta lru;
+} __attribute__((packed));
+
+/* the caller must hold the metadata lock for all operations
+ *
+ * For range operations the caller can:
+ * set core_id to -1 to purge the whole cache device
+ * set core_id to -2 to purge the whole cache partition
+ */
+struct eviction_policy_ops {
+ void (*init_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
+ void (*rm_cline)(ocf_cache_t cache,
+ ocf_cache_line_t cline);
+ bool (*can_evict)(ocf_cache_t cache);
+ uint32_t (*req_clines)(ocf_cache_t cache,
+ ocf_queue_t io_queue, ocf_part_id_t part_id,
+ uint32_t cline_no);
+ void (*hot_cline)(ocf_cache_t cache,
+ ocf_cache_line_t cline);
+ void (*init_evp)(ocf_cache_t cache,
+ ocf_part_id_t part_id);
+ void (*dirty_cline)(ocf_cache_t cache,
+ ocf_part_id_t part_id,
+ uint32_t cline_no);
+ void (*clean_cline)(ocf_cache_t cache,
+ ocf_part_id_t part_id,
+ uint32_t cline_no);
+ const char *name;
+};
+
+extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
+
+/*
+ * Deallocates space from low priority partitions.
+ *
+ * Returns -1 on error
+ * or the destination partition ID for the free buffers
+ * (it matches label and is part of the object (#core_id) IO group)
+ */
+int space_managment_evict_do(ocf_cache_t cache,
+ struct ocf_request *req, uint32_t evict_cline_no);
+
+int space_management_free(ocf_cache_t cache, uint32_t count);
+
+#endif
diff --git a/src/spdk/ocf/src/eviction/lru.c b/src/spdk/ocf/src/eviction/lru.c
new file mode 100644
index 000000000..ebbeaba0b
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/lru.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "eviction.h"
+#include "lru.h"
+#include "ops.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_cache_line.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../mngt/ocf_mngt_common.h"
+#include "../engine/engine_zero.h"
+#include "../ocf_request.h"
+
+#define OCF_EVICTION_MAX_SCAN 1024
+
+/* -- Start of LRU functions --*/
+
+/* Returns 1 if the given collision_index is the _head_ of
+ * the LRU list, 0 otherwise.
+ */
+/* static inline int is_lru_head(unsigned collision_index) {
+ * return collision_index == lru_list.lru_head;
+ * }
+ */
+
+#define is_lru_head(x) (x == collision_table_entries)
+#define is_lru_tail(x) (x == collision_table_entries)
+
+/* Sets the given collision_index as the new _head_ of the LRU list. */
+static inline void update_lru_head(ocf_cache_t cache,
+ int partition_id, unsigned int collision_index,
+ int cline_dirty)
+{
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+
+
+ if (cline_dirty)
+ part->runtime->eviction.policy.lru.dirty_head = collision_index;
+ else
+ part->runtime->eviction.policy.lru.clean_head = collision_index;
+}
+
+/* Sets the given collision_index as the new _tail_ of the LRU list. */
+static inline void update_lru_tail(ocf_cache_t cache,
+ int partition_id, unsigned int collision_index,
+ int cline_dirty)
+{
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+
+ if (cline_dirty)
+ part->runtime->eviction.policy.lru.dirty_tail = collision_index;
+ else
+ part->runtime->eviction.policy.lru.clean_tail = collision_index;
+}
+
+/* Sets the given collision_index as the new _head_ and _tail_ of
+ * the LRU list.
+ */
+static inline void update_lru_head_tail(ocf_cache_t cache,
+ int partition_id, unsigned int collision_index, int cline_dirty)
+{
+ update_lru_head(cache, partition_id, collision_index, cline_dirty);
+ update_lru_tail(cache, partition_id, collision_index, cline_dirty);
+}
+
+/* Adds the given collision_index to the _head_ of the LRU list */
+static void add_lru_head(ocf_cache_t cache, int partition_id,
+ unsigned int collision_index, int cline_dirty)
+{
+ unsigned int curr_head_index;
+ unsigned int collision_table_entries =
+ cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+ union eviction_policy_meta eviction;
+
+ ENV_BUG_ON(!(collision_index < collision_table_entries));
+
+ ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
+
+ /* First node to be added/ */
+ if ((cline_dirty && !part->runtime->eviction.policy.lru.has_dirty_nodes) ||
+ (!cline_dirty && !part->runtime->eviction.policy.lru.has_clean_nodes)) {
+ update_lru_head_tail(cache, partition_id, collision_index, cline_dirty);
+
+ eviction.lru.next = collision_table_entries;
+ eviction.lru.prev = collision_table_entries;
+
+ if (cline_dirty)
+ part->runtime->eviction.policy.lru.has_dirty_nodes = 1;
+ else
+ part->runtime->eviction.policy.lru.has_clean_nodes = 1;
+
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+ } else {
+ union eviction_policy_meta eviction_curr;
+
+ /* Not the first node to be added. */
+ curr_head_index = cline_dirty ?
+ part->runtime->eviction.policy.lru.dirty_head :
+ part->runtime->eviction.policy.lru.clean_head;
+
+ ENV_BUG_ON(!(curr_head_index < collision_table_entries));
+
+ ocf_metadata_get_evicition_policy(cache, curr_head_index,
+ &eviction_curr);
+
+ eviction.lru.next = curr_head_index;
+ eviction.lru.prev = collision_table_entries;
+ eviction_curr.lru.prev = collision_index;
+
+ update_lru_head(cache, partition_id, collision_index, cline_dirty);
+
+ ocf_metadata_set_evicition_policy(cache, curr_head_index,
+ &eviction_curr);
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+ }
+}
+
+/* Deletes the node with the given collision_index from the lru list */
+static void remove_lru_list(ocf_cache_t cache, int partition_id,
+ unsigned int collision_index, int cline_dirty)
+{
+ int is_clean_head = 0, is_clean_tail = 0, is_dirty_head = 0, is_dirty_tail = 0;
+ uint32_t prev_lru_node, next_lru_node;
+ uint32_t collision_table_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[partition_id];
+ union eviction_policy_meta eviction;
+
+ ENV_BUG_ON(!(collision_index < collision_table_entries));
+
+ ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
+
+ /* Find out if this node is LRU _head_ or LRU _tail_ */
+ if (part->runtime->eviction.policy.lru.clean_head == collision_index)
+ is_clean_head = 1;
+ if (part->runtime->eviction.policy.lru.dirty_head == collision_index)
+ is_dirty_head = 1;
+ if (part->runtime->eviction.policy.lru.clean_tail == collision_index)
+ is_clean_tail = 1;
+ if (part->runtime->eviction.policy.lru.dirty_tail == collision_index)
+ is_dirty_tail = 1;
+ ENV_BUG_ON((is_clean_tail || is_clean_head) && (is_dirty_tail || is_dirty_head));
+
+ /* Set prev and next (even if not existent) */
+ next_lru_node = eviction.lru.next;
+ prev_lru_node = eviction.lru.prev;
+
+ /* Case 1: If we are head AND tail, there is only one node.
+ * So unlink node and set that there is no node left in the list.
+ */
+ if ((is_clean_head && is_clean_tail) || (is_dirty_head && is_dirty_tail)) {
+ eviction.lru.next = collision_table_entries;
+ eviction.lru.prev = collision_table_entries;
+
+ update_lru_head_tail(cache, partition_id, collision_table_entries, cline_dirty);
+
+ if (cline_dirty)
+ part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
+ else
+ part->runtime->eviction.policy.lru.has_clean_nodes = 0;
+
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+
+ update_lru_head_tail(cache, partition_id,
+ collision_table_entries, cline_dirty);
+ }
+
+ /* Case 2: else if this collision_index is LRU head, but not tail,
+ * update head and return
+ */
+ else if ((!is_clean_tail && is_clean_head) || (!is_dirty_tail && is_dirty_head)) {
+ union eviction_policy_meta eviction_next;
+
+ ENV_BUG_ON(!(next_lru_node < collision_table_entries));
+
+ ocf_metadata_get_evicition_policy(cache, next_lru_node,
+ &eviction_next);
+
+ update_lru_head(cache, partition_id, next_lru_node, cline_dirty);
+
+ eviction.lru.next = collision_table_entries;
+ eviction_next.lru.prev = collision_table_entries;
+
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+
+ ocf_metadata_set_evicition_policy(cache, next_lru_node,
+ &eviction_next);
+ }
+
+ /* Case 3: else if this collision_index is LRU tail, but not head,
+ * update tail and return
+ */
+ else if ((is_clean_tail && !is_clean_head) || (is_dirty_tail && !is_dirty_head)) {
+ union eviction_policy_meta eviction_prev;
+
+ ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
+
+ update_lru_tail(cache, partition_id, prev_lru_node, cline_dirty);
+
+ ocf_metadata_get_evicition_policy(cache, prev_lru_node,
+ &eviction_prev);
+
+ eviction.lru.prev = collision_table_entries;
+ eviction_prev.lru.next = collision_table_entries;
+
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+
+ ocf_metadata_set_evicition_policy(cache, prev_lru_node,
+ &eviction_prev);
+ }
+
+ /* Case 4: else this collision_index is a middle node. There is no
+ * change to the head and the tail pointers.
+ */
+ else {
+ union eviction_policy_meta eviction_prev;
+ union eviction_policy_meta eviction_next;
+
+ ENV_BUG_ON(!(next_lru_node < collision_table_entries));
+ ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
+
+ ocf_metadata_get_evicition_policy(cache, next_lru_node,
+ &eviction_next);
+ ocf_metadata_get_evicition_policy(cache, prev_lru_node,
+ &eviction_prev);
+
+ /* Update prev and next nodes */
+ eviction_prev.lru.next = eviction.lru.next;
+ eviction_next.lru.prev = eviction.lru.prev;
+
+ /* Update the given node */
+ eviction.lru.next = collision_table_entries;
+ eviction.lru.prev = collision_table_entries;
+
+ ocf_metadata_set_evicition_policy(cache, collision_index,
+ &eviction);
+ ocf_metadata_set_evicition_policy(cache, next_lru_node,
+ &eviction_next);
+ ocf_metadata_set_evicition_policy(cache, prev_lru_node,
+ &eviction_prev);
+ }
+}
+
+/*-- End of LRU functions*/
+
+void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ union eviction_policy_meta eviction;
+
+ ocf_metadata_get_evicition_policy(cache, cline, &eviction);
+
+ eviction.lru.prev = cache->device->collision_table_entries;
+ eviction.lru.next = cache->device->collision_table_entries;
+
+ ocf_metadata_set_evicition_policy(cache, cline, &eviction);
+}
+
+
+/* the caller must hold the metadata lock */
+void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
+
+ remove_lru_list(cache, part_id, cline, metadata_test_dirty(cache, cline));
+}
+
+static void evp_lru_clean_end(void *private_data, int error)
+{
+ struct ocf_refcnt *counter = private_data;
+
+ ocf_refcnt_dec(counter);
+}
+
+static int evp_lru_clean_getter(ocf_cache_t cache,
+ void *getter_context, uint32_t item, ocf_cache_line_t *line)
+{
+ union eviction_policy_meta eviction;
+ struct ocf_cleaner_attribs *attribs = getter_context;
+ ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item;
+
+ while (curr_cline < cache->device->collision_table_entries) {
+ ocf_metadata_get_evicition_policy(cache, curr_cline,
+ &eviction);
+ prev_cline = eviction.lru.prev;
+
+ /* Prevent evicting already locked items */
+ if (ocf_cache_line_is_used(cache, curr_cline)) {
+ curr_cline = prev_cline;
+ continue;
+ }
+
+ ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline));
+
+ *line = curr_cline;
+ attribs->getter_item = prev_cline;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
+ ocf_part_id_t part_id, uint32_t count)
+{
+ struct ocf_refcnt *counter = &cache->refcnt.cleaning[part_id];
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+ struct ocf_cleaner_attribs attribs = {
+ .cache_line_lock = true,
+ .do_sort = true,
+
+ .cmpl_context = counter,
+ .cmpl_fn = evp_lru_clean_end,
+
+ .getter = evp_lru_clean_getter,
+ .getter_context = &attribs,
+ .getter_item = part->runtime->eviction.policy.lru.dirty_tail,
+
+ .count = count > 32 ? 32 : count,
+
+ .io_queue = io_queue
+ };
+ int cnt;
+
+ if (ocf_mngt_cache_is_locked(cache))
+ return;
+
+ cnt = ocf_refcnt_inc(counter);
+ if (!cnt) {
+ /* cleaner disabled by management operation */
+ return;
+ }
+ if (cnt > 1) {
+ /* cleaning already running for this partition */
+ ocf_refcnt_dec(counter);
+ return;
+ }
+
+ ocf_cleaner_fire(cache, &attribs);
+}
+
+static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
+{
+ env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
+}
+
+static void evp_lru_zero_line(ocf_cache_t cache, ocf_queue_t io_queue,
+ ocf_cache_line_t line)
+{
+ struct ocf_request *req;
+ ocf_core_id_t id;
+ uint64_t addr, core_line;
+
+ ocf_metadata_get_core_info(cache, line, &id, &core_line);
+ addr = core_line * ocf_line_size(cache);
+
+ req = ocf_req_new(io_queue, &cache->core[id], addr,
+ ocf_line_size(cache), OCF_WRITE);
+ if (!req)
+ return;
+
+ if (req->d2c) {
+ /* cache device is being detached */
+ ocf_req_put(req);
+ return;
+ }
+
+ req->info.internal = true;
+ req->complete = evp_lru_zero_line_complete;
+
+ env_atomic_inc(&cache->pending_eviction_clines);
+
+ ocf_engine_zero_line(req);
+}
+
+bool evp_lru_can_evict(ocf_cache_t cache)
+{
+ if (env_atomic_read(&cache->pending_eviction_clines) >=
+ OCF_PENDING_EVICTION_LIMIT) {
+ return false;
+ }
+
+ return true;
+}
+
+/* the caller must hold the metadata lock */
+uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
+ ocf_part_id_t part_id, uint32_t cline_no)
+{
+ uint32_t i;
+ ocf_cache_line_t curr_cline, prev_cline;
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+ union eviction_policy_meta eviction;
+
+ if (cline_no == 0)
+ return 0;
+
+ i = 0;
+ curr_cline = part->runtime->eviction.policy.lru.clean_tail;
+ /* Find cachelines to be evicted. */
+ while (i < cline_no) {
+ ENV_BUG_ON(curr_cline > cache->device->collision_table_entries);
+
+ if (!evp_lru_can_evict(cache))
+ break;
+
+ if (curr_cline == cache->device->collision_table_entries)
+ break;
+
+ ocf_metadata_get_evicition_policy(cache, curr_cline,
+ &eviction);
+ prev_cline = eviction.lru.prev;
+
+ /* Prevent evicting already locked items */
+ if (ocf_cache_line_is_used(cache, curr_cline)) {
+ curr_cline = prev_cline;
+ continue;
+ }
+
+ ENV_BUG_ON(metadata_test_dirty(cache, curr_cline));
+
+ if (ocf_volume_is_atomic(&cache->device->volume)) {
+ /* atomic cache, we have to trim cache lines before
+ * eviction
+ */
+ evp_lru_zero_line(cache, io_queue, curr_cline);
+
+ } else {
+ ocf_metadata_start_collision_shared_access(cache,
+ curr_cline);
+ set_cache_line_invalid_no_flush(cache, 0,
+ ocf_line_end_sector(cache),
+ curr_cline);
+ ocf_metadata_end_collision_shared_access(cache,
+ curr_cline);
+
+ /* Goto next item. */
+ i++;
+ }
+
+ curr_cline = prev_cline;
+ }
+
+ if (i < cline_no && part->runtime->eviction.policy.lru.dirty_tail !=
+ cache->device->collision_table_entries) {
+ evp_lru_clean(cache, io_queue, part_id, cline_no - i);
+ }
+
+ /* Return number of clines that were really evicted */
+ return i;
+}
+
+/* the caller must hold the metadata lock */
+void evp_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ uint32_t prev_lru_node, next_lru_node;
+ uint32_t collision_table_entries = cache->device->collision_table_entries;
+ union eviction_policy_meta eviction;
+
+ int cline_dirty;
+
+ ocf_metadata_get_evicition_policy(cache, cline, &eviction);
+
+ next_lru_node = eviction.lru.next;
+ prev_lru_node = eviction.lru.prev;
+
+ cline_dirty = metadata_test_dirty(cache, cline);
+
+ if ((next_lru_node != collision_table_entries) ||
+ (prev_lru_node != collision_table_entries) ||
+ ((part->runtime->eviction.policy.lru.clean_head == cline) &&
+ (part->runtime->eviction.policy.lru.clean_tail == cline)) ||
+ ((part->runtime->eviction.policy.lru.dirty_head == cline) &&
+ (part->runtime->eviction.policy.lru.dirty_tail == cline))) {
+ remove_lru_list(cache, part_id, cline, cline_dirty);
+ }
+
+ /* Update LRU */
+ add_lru_head(cache, part_id, cline, cline_dirty);
+}
+
+void evp_lru_init_evp(ocf_cache_t cache, ocf_part_id_t part_id)
+{
+ unsigned int collision_table_entries =
+ cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ part->runtime->eviction.policy.lru.has_clean_nodes = 0;
+ part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
+ part->runtime->eviction.policy.lru.clean_head = collision_table_entries;
+ part->runtime->eviction.policy.lru.clean_tail = collision_table_entries;
+ part->runtime->eviction.policy.lru.dirty_head = collision_table_entries;
+ part->runtime->eviction.policy.lru.dirty_tail = collision_table_entries;
+}
+
+void evp_lru_clean_cline(ocf_cache_t cache, ocf_part_id_t part_id,
+ uint32_t cline)
+{
+ OCF_METADATA_EVICTION_LOCK();
+ remove_lru_list(cache, part_id, cline, 1);
+ add_lru_head(cache, part_id, cline, 0);
+ OCF_METADATA_EVICTION_UNLOCK();
+}
+
+void evp_lru_dirty_cline(ocf_cache_t cache, ocf_part_id_t part_id,
+ uint32_t cline)
+{
+ OCF_METADATA_EVICTION_LOCK();
+ remove_lru_list(cache, part_id, cline, 0);
+ add_lru_head(cache, part_id, cline, 1);
+ OCF_METADATA_EVICTION_UNLOCK();
+}
+
diff --git a/src/spdk/ocf/src/eviction/lru.h b/src/spdk/ocf/src/eviction/lru.h
new file mode 100644
index 000000000..fa996f752
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/lru.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __EVICTION_LRU_H__
+#define __EVICTION_LRU_H__
+
+#include "eviction.h"
+#include "lru_structs.h"
+
+void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
+void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
+bool evp_lru_can_evict(struct ocf_cache *cache);
+uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,
+ ocf_part_id_t part_id, uint32_t cline_no);
+void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
+void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id);
+void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
+void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
+
+#endif
diff --git a/src/spdk/ocf/src/eviction/lru_structs.h b/src/spdk/ocf/src/eviction/lru_structs.h
new file mode 100644
index 000000000..813dd8c16
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/lru_structs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __EVICTION_LRU_STRUCTS_H__
+
+#define __EVICTION_LRU_STRUCTS_H__
+
+struct lru_eviction_policy_meta {
+ /* LRU pointers 2*4=8 bytes */
+ uint32_t prev;
+ uint32_t next;
+} __attribute__((packed));
+
+struct lru_eviction_policy {
+ int has_clean_nodes;
+ int has_dirty_nodes;
+ uint32_t dirty_head;
+ uint32_t dirty_tail;
+ uint32_t clean_head;
+ uint32_t clean_tail;
+};
+
+#endif
diff --git a/src/spdk/ocf/src/eviction/ops.h b/src/spdk/ocf/src/eviction/ops.h
new file mode 100644
index 000000000..acf235d41
--- /dev/null
+++ b/src/spdk/ocf/src/eviction/ops.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef LAYER_EVICTION_POLICY_OPS_H_
+#define LAYER_EVICTION_POLICY_OPS_H_
+
+#include "eviction.h"
+#include "../metadata/metadata.h"
+#include "../concurrency/ocf_metadata_concurrency.h"
+
+/**
+ * @brief Initialize cache line before adding it into eviction
+ *
+ * @note This operation is called under WR metadata lock
+ */
+static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id)
+{
+ uint8_t type;
+
+ type = cache->conf_meta->eviction_policy_type;
+
+ ENV_BUG_ON(type >= ocf_eviction_max);
+
+ if (likely(evict_policy_ops[type].init_cline))
+ evict_policy_ops[type].init_cline(cache, line);
+}
+
+static inline void ocf_eviction_purge_cache_line(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ uint8_t type = cache->conf_meta->eviction_policy_type;
+
+ ENV_BUG_ON(type >= ocf_eviction_max);
+
+ if (likely(evict_policy_ops[type].rm_cline)) {
+ OCF_METADATA_EVICTION_LOCK();
+ evict_policy_ops[type].rm_cline(cache, line);
+ OCF_METADATA_EVICTION_UNLOCK();
+ }
+}
+
+
+static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
+{
+ uint8_t type = cache->conf_meta->eviction_policy_type;
+
+ if (likely(evict_policy_ops[type].can_evict))
+ return evict_policy_ops[type].can_evict(cache);
+
+ return true;
+}
+
+static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
+ ocf_queue_t io_queue, ocf_part_id_t part_id, uint32_t clines)
+{
+ uint8_t type;
+ uint32_t result = 0;
+
+ type = cache->conf_meta->eviction_policy_type;
+
+ ENV_BUG_ON(type >= ocf_eviction_max);
+
+ if (likely(evict_policy_ops[type].req_clines)) {
+ /*
+ * This is called under METADATA WR lock. No need to get
+ * eviction lock.
+ */
+ result = evict_policy_ops[type].req_clines(cache, io_queue,
+ part_id, clines);
+ }
+
+ return result;
+}
+
+static inline void ocf_eviction_set_hot_cache_line(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ uint8_t type = cache->conf_meta->eviction_policy_type;
+
+ ENV_BUG_ON(type >= ocf_eviction_max);
+
+ if (likely(evict_policy_ops[type].hot_cline)) {
+ OCF_METADATA_EVICTION_LOCK();
+ evict_policy_ops[type].hot_cline(cache, line);
+ OCF_METADATA_EVICTION_UNLOCK();
+ }
+}
+
+static inline void ocf_eviction_initialize(struct ocf_cache *cache,
+ ocf_part_id_t part_id)
+{
+ uint8_t type = cache->conf_meta->eviction_policy_type;
+
+ ENV_BUG_ON(type >= ocf_eviction_max);
+
+ if (likely(evict_policy_ops[type].init_evp)) {
+ OCF_METADATA_EVICTION_LOCK();
+ evict_policy_ops[type].init_evp(cache, part_id);
+ OCF_METADATA_EVICTION_UNLOCK();
+ }
+}
+
+#endif /* LAYER_EVICTION_POLICY_OPS_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata.c b/src/spdk/ocf/src/metadata/metadata.c
new file mode 100644
index 000000000..b8f7c36d7
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+
+#include "metadata.h"
+#include "metadata_hash.h"
+#include "metadata_io.h"
+#include "../ocf_priv.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+
+#define OCF_METADATA_DEBUG 0
+
+#if 1 == OCF_METADATA_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#endif
+
+int ocf_metadata_init(struct ocf_cache *cache,
+ ocf_cache_line_size_t cache_line_size)
+{
+ struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *)
+ &cache->metadata.iface;
+ int ret;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ENV_BUG_ON(cache->metadata.iface_priv);
+
+ *iface = *metadata_hash_get_iface();
+ ret = cache->metadata.iface.init(cache, cache_line_size);
+ if (ret) {
+ ocf_metadata_io_deinit(cache);
+ return ret;
+ }
+
+ ret = ocf_metadata_concurrency_init(&cache->metadata.lock);
+ if (ret) {
+ if (cache->metadata.iface.deinit)
+ cache->metadata.iface.deinit(cache);
+
+ ocf_metadata_io_deinit(cache);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size,
+ ocf_cache_line_size_t cache_line_size,
+ ocf_metadata_layout_t layout)
+{
+ OCF_DEBUG_TRACE(cache);
+ return cache->metadata.iface.init_variable_size(cache, device_size,
+ cache_line_size, layout);
+}
+
+void ocf_metadata_init_hash_table(struct ocf_cache *cache)
+{
+ OCF_DEBUG_TRACE(cache);
+ cache->metadata.iface.init_hash_table(cache);
+}
+
+void ocf_metadata_init_collision(struct ocf_cache *cache)
+{
+ OCF_DEBUG_TRACE(cache);
+ cache->metadata.iface.init_collision(cache);
+}
+
+void ocf_metadata_deinit(struct ocf_cache *cache)
+{
+ OCF_DEBUG_TRACE(cache);
+
+ if (cache->metadata.iface.deinit) {
+ cache->metadata.iface.deinit(cache);
+ }
+
+ ocf_metadata_concurrency_deinit(&cache->metadata.lock);
+
+ ocf_metadata_io_deinit(cache);
+}
+
+void ocf_metadata_deinit_variable_size(struct ocf_cache *cache)
+{
+ OCF_DEBUG_TRACE(cache);
+
+ if (cache->metadata.iface.deinit_variable_size)
+ cache->metadata.iface.deinit_variable_size(cache);
+}
+
+size_t ocf_metadata_size_of(struct ocf_cache *cache)
+{
+ return cache->metadata.iface.size_of(cache);
+}
+
+void ocf_metadata_error(struct ocf_cache *cache)
+{
+ if (cache->device->metadata_error == 0)
+ ocf_cache_log(cache, log_err, "Metadata Error\n");
+
+ env_bit_clear(ocf_cache_state_running, &cache->cache_state);
+ cache->device->metadata_error = -1;
+}
+
+ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache)
+{
+ return cache->metadata.iface.pages(cache);
+}
+
+ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache)
+{
+ return cache->metadata.iface.cachelines(cache);
+}
+
+void ocf_metadata_flush_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ ocf_metadata_start_shared_access(&cache->metadata.lock);
+ cache->metadata.iface.flush_all(cache, cmpl, priv);
+ ocf_metadata_end_shared_access(&cache->metadata.lock);
+}
+
+void ocf_metadata_load_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+ cache->metadata.iface.load_all(cache, cmpl, priv);
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+}
+
+void ocf_metadata_load_recovery(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cache->metadata.iface.load_recovery(cache, cmpl, priv);
+}
+
+void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
+{
+ cache->metadata.iface.flush_mark(cache, req, map_idx, to_state,
+ start, stop);
+}
+
+void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
+ struct ocf_request *req, ocf_req_end_t complete)
+{
+ cache->metadata.iface.flush_do_asynch(cache, req, complete);
+}
+
+struct ocf_metadata_read_sb_ctx;
+
+typedef void (*ocf_metadata_read_sb_end_t)(
+ struct ocf_metadata_read_sb_ctx *context);
+
+struct ocf_metadata_read_sb_ctx {
+ struct ocf_superblock_config superblock;
+ ocf_metadata_read_sb_end_t cmpl;
+ ocf_ctx_t ctx;
+ void *priv1;
+ void *priv2;
+ int error;
+};
+
+static void ocf_metadata_read_sb_complete(struct ocf_io *io, int error)
+{
+ struct ocf_metadata_read_sb_ctx *context = io->priv1;
+ ctx_data_t *data = ocf_io_get_data(io);
+
+ if (!error) {
+ /* Read data from data into super block buffer */
+ ctx_data_rd_check(context->ctx, &context->superblock, data,
+ sizeof(context->superblock));
+ }
+
+ ctx_data_free(context->ctx, data);
+ ocf_io_put(io);
+
+ context->error = error;
+ context->cmpl(context);
+
+ env_free(context);
+}
+
+static int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
+ ocf_metadata_read_sb_end_t cmpl, void *priv1, void *priv2)
+{
+ struct ocf_metadata_read_sb_ctx *context;
+ size_t sb_pages = BYTES_TO_PAGES(sizeof(context->superblock));
+ ctx_data_t *data;
+ struct ocf_io *io;
+ int result = 0;
+
+ /* Allocate memory for first page of super block */
+ context = env_zalloc(sizeof(*context), ENV_MEM_NORMAL);
+ if (!context) {
+ ocf_log(ctx, log_err, "Memory allocation error");
+ return -OCF_ERR_NO_MEM;
+ }
+
+ context->cmpl = cmpl;
+ context->ctx = ctx;
+ context->priv1 = priv1;
+ context->priv2 = priv2;
+
+ /* Allocate resources for IO */
+ io = ocf_volume_new_io(volume, NULL, 0, sb_pages * PAGE_SIZE,
+ OCF_READ, 0, 0);
+ if (!io) {
+ ocf_log(ctx, log_err, "Memory allocation error");
+ result = -OCF_ERR_NO_MEM;
+ goto err_io;
+ }
+
+ data = ctx_data_alloc(ctx, sb_pages);
+ if (!data) {
+ ocf_log(ctx, log_err, "Memory allocation error");
+ result = -OCF_ERR_NO_MEM;
+ goto err_data;
+ }
+
+ /*
+ * Read first page of cache device in order to recover metadata
+ * properties
+ */
+ result = ocf_io_set_data(io, data, 0);
+ if (result) {
+ ocf_log(ctx, log_err, "Metadata IO configuration error\n");
+ result = -OCF_ERR_IO;
+ goto err_set_data;
+ }
+
+ ocf_io_set_cmpl(io, context, NULL, ocf_metadata_read_sb_complete);
+ ocf_volume_submit_io(io);
+
+ return 0;
+
+err_set_data:
+ ctx_data_free(ctx, data);
+err_data:
+ ocf_io_put(io);
+err_io:
+ env_free(context);
+ return result;
+}
+
+static void ocf_metadata_load_properties_cmpl(
+ struct ocf_metadata_read_sb_ctx *context)
+{
+ struct ocf_metadata_load_properties properties;
+ struct ocf_superblock_config *superblock = &context->superblock;
+ ocf_metadata_load_properties_end_t cmpl = context->priv1;
+ void *priv = context->priv2;
+ ocf_ctx_t ctx = context->ctx;
+
+ if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
+ ocf_log(ctx, log_info, "Cannot detect pre-existing metadata\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_METADATA, NULL);
+ }
+
+ if (METADATA_VERSION() != superblock->metadata_version) {
+ ocf_log(ctx, log_err, "Metadata version mismatch!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_METADATA_VER, NULL);
+ }
+
+ if (!ocf_cache_line_size_is_valid(superblock->line_size)) {
+ ocf_log(ctx, log_err, "ERROR: Invalid cache line size!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+ }
+
+ if ((unsigned)superblock->metadata_layout >= ocf_metadata_layout_max) {
+ ocf_log(ctx, log_err, "ERROR: Invalid metadata layout!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+ }
+
+ if (superblock->cache_mode >= ocf_cache_mode_max) {
+ ocf_log(ctx, log_err, "ERROR: Invalid cache mode!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+ }
+
+ if (superblock->clean_shutdown > ocf_metadata_clean_shutdown) {
+ ocf_log(ctx, log_err, "ERROR: Invalid shutdown status!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+ }
+
+ if (superblock->dirty_flushed > DIRTY_FLUSHED) {
+ ocf_log(ctx, log_err, "ERROR: Invalid flush status!\n");
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+ }
+
+ properties.line_size = superblock->line_size;
+ properties.layout = superblock->metadata_layout;
+ properties.cache_mode = superblock->cache_mode;
+ properties.shutdown_status = superblock->clean_shutdown;
+ properties.dirty_flushed = superblock->dirty_flushed;
+ properties.cache_name = superblock->name;
+
+ OCF_CMPL_RET(priv, 0, &properties);
+}
+
+void ocf_metadata_load_properties(ocf_volume_t volume,
+ ocf_metadata_load_properties_end_t cmpl, void *priv)
+{
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_metadata_read_sb(volume->cache->owner, volume,
+ ocf_metadata_load_properties_cmpl, cmpl, priv);
+ if (result)
+ OCF_CMPL_RET(priv, result, NULL);
+}
+
+static void ocf_metadata_probe_cmpl(struct ocf_metadata_read_sb_ctx *context)
+{
+ struct ocf_metadata_probe_status status;
+ struct ocf_superblock_config *superblock = &context->superblock;
+ ocf_metadata_probe_end_t cmpl = context->priv1;
+ void *priv = context->priv2;
+
+ if (superblock->magic_number != CACHE_MAGIC_NUMBER)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_METADATA, NULL);
+
+ if (METADATA_VERSION() != superblock->metadata_version)
+ OCF_CMPL_RET(priv, -OCF_ERR_METADATA_VER, NULL);
+
+ if (superblock->clean_shutdown > ocf_metadata_clean_shutdown)
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+
+ if (superblock->dirty_flushed > DIRTY_FLUSHED)
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, NULL);
+
+ status.clean_shutdown = (superblock->clean_shutdown !=
+ ocf_metadata_dirty_shutdown);
+ status.cache_dirty = (superblock->dirty_flushed == DIRTY_NOT_FLUSHED);
+ env_strncpy(status.cache_name, OCF_CACHE_NAME_SIZE, superblock->name,
+ OCF_CACHE_NAME_SIZE);
+
+ OCF_CMPL_RET(priv, 0, &status);
+}
+
+void ocf_metadata_probe(ocf_ctx_t ctx, ocf_volume_t volume,
+ ocf_metadata_probe_end_t cmpl, void *priv)
+{
+ int result;
+
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(volume);
+
+ result = ocf_metadata_read_sb(ctx, volume, ocf_metadata_probe_cmpl,
+ cmpl, priv);
+ if (result)
+ OCF_CMPL_RET(priv, result, NULL);
+}
+
+/* completion context for query_cores */
+struct ocf_metadata_query_cores_context
+{
+ ocf_metadata_probe_cores_end_t cmpl;
+ void *priv;
+};
+
+static void ocf_metadata_query_cores_end(void *_context, int error,
+ unsigned num_cores)
+{
+ struct ocf_metadata_query_cores_context *context = _context;
+
+ context->cmpl(context->priv, error, num_cores);
+ env_vfree(context);
+}
+
+void ocf_metadata_probe_cores(ocf_ctx_t ctx, ocf_volume_t volume,
+ struct ocf_volume_uuid *uuids, uint32_t uuids_count,
+ ocf_metadata_probe_cores_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_query_cores_context *context;
+ const struct ocf_metadata_iface *iface;
+
+ context = env_vzalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM, 0);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ iface = metadata_hash_get_iface();
+ iface->query_cores(ctx, volume, uuids, uuids_count,
+ ocf_metadata_query_cores_end, context);
+}
+
+
diff --git a/src/spdk/ocf/src/metadata/metadata.h b/src/spdk/ocf/src/metadata/metadata.h
new file mode 100644
index 000000000..f776c1d6f
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_H__
+#define __METADATA_H__
+
+#include "metadata_common.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "metadata_cleaning_policy.h"
+#include "metadata_eviction_policy.h"
+#include "metadata_partition.h"
+#include "metadata_hash.h"
+#include "metadata_superblock.h"
+#include "metadata_status.h"
+#include "metadata_collision.h"
+#include "metadata_core.h"
+#include "metadata_misc.h"
+
+#define INVALID 0
+#define VALID 1
+#define CLEAN 2
+#define DIRTY 3
+
+/**
+ * @brief Initialize metadata
+ *
+ * @param cache - Cache instance
+ * @param cache_line_size Cache line size
+ * @return 0 - Operation success otherwise failure
+ */
+int ocf_metadata_init(struct ocf_cache *cache,
+ ocf_cache_line_size_t cache_line_size);
+
+/**
+ * @brief Initialize per-cacheline metadata
+ *
+ * @param cache - Cache instance
+ * @param device_size - Device size in bytes
+ * @param cache_line_size Cache line size
+ * @return 0 - Operation success otherwise failure
+ */
+int ocf_metadata_init_variable_size(struct ocf_cache *cache,
+ uint64_t device_size, ocf_cache_line_size_t cache_line_size,
+ ocf_metadata_layout_t layout);
+
+/**
+ * @brief Initialize collision table
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_init_freelist_partition(struct ocf_cache *cache);
+
+/**
+ * @brief Initialize hash table
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_init_hash_table(struct ocf_cache *cache);
+
+/**
+ * @brief Initialize collision table
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_init_collision(struct ocf_cache *cache);
+
+/**
+ * @brief De-Initialize metadata
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_deinit(struct ocf_cache *cache);
+
+/**
+ * @brief De-Initialize per-cacheline metadata
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_deinit_variable_size(struct ocf_cache *cache);
+
+/**
+ * @brief Get memory footprint
+ *
+ * @param cache - Cache instance
+ * @return 0 - memory footprint
+ */
+size_t ocf_metadata_size_of(struct ocf_cache *cache);
+
+/**
+ * @brief Handle metadata error
+ *
+ * @param cache - Cache instance
+ */
+void ocf_metadata_error(struct ocf_cache *cache);
+
+/**
+ * @brief Get amount of cache lines
+ *
+ * @param cache - Cache instance
+ * @return Amount of cache lines (cache device lines - metadata space)
+ */
+ocf_cache_line_t
+ocf_metadata_get_cachelines_count(struct ocf_cache *cache);
+
+/**
+ * @brief Get amount of pages required for metadata
+ *
+ * @param cache - Cache instance
+ * @return Pages required for store metadata on cache device
+ */
+ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
+
+/**
+ * @brief Flush metadata
+ *
+ * @param cache - Cache instance
+ * @param cmpl - Completion callback
+ * @param priv - Completion context
+ */
+void ocf_metadata_flush_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/**
+ * @brief Mark specified cache line to be flushed
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line - cache line which to be flushed
+ */
+void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
+
+/**
+ * @brief Flush marked cache lines asynchronously
+ *
+ * @param cache - Cache instance
+ * @param queue - I/O queue to which metadata flush should be submitted
+ * @param remaining - request remaining
+ * @param complete - flushing request callback
+ * @param context - context that will be passed into callback
+ */
+void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
+ struct ocf_request *req, ocf_req_end_t complete);
+
+/**
+ * @brief Load metadata
+ *
+ * @param cache - Cache instance
+ * @param cmpl - Completion callback
+ * @param priv - Completion context
+ */
+void ocf_metadata_load_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/**
+ * @brief Load metadata required for recovery procedure
+ *
+ * @param cache Cache instance
+ * @param cmpl - Completion callback
+ * @param priv - Completion context
+ */
+void ocf_metadata_load_recovery(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+
+/**
+ * @brief Get reserved area lba
+ *
+ * @param cache Cache instance
+ */
+static inline uint64_t ocf_metadata_get_reserved_lba(ocf_cache_t cache)
+{
+ return cache->metadata.iface.get_reserved_lba(cache);
+}
+
+/*
+ * NOTE Hash table is specific for hash table metadata service implementation
+ * and should be used internally by metadata service.
+ * At the moment there is no high level metadata interface because of that
+ * temporary defined in this file.
+ */
+
+static inline ocf_cache_line_t
+ocf_metadata_get_hash(struct ocf_cache *cache, ocf_cache_line_t index)
+{
+ return cache->metadata.iface.get_hash(cache, index);
+}
+
+static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
+ ocf_cache_line_t index, ocf_cache_line_t line)
+{
+ cache->metadata.iface.set_hash(cache, index, line);
+}
+
+static inline ocf_cache_line_t ocf_metadata_entries_hash(
+ struct ocf_cache *cache)
+{
+ return cache->metadata.iface.entries_hash(cache);
+}
+
+struct ocf_metadata_load_properties {
+ enum ocf_metadata_shutdown_status shutdown_status;
+ uint8_t dirty_flushed;
+ ocf_metadata_layout_t layout;
+ ocf_cache_line_size_t line_size;
+ ocf_cache_mode_t cache_mode;
+ char *cache_name;
+};
+
+typedef void (*ocf_metadata_load_properties_end_t)(void *priv, int error,
+ struct ocf_metadata_load_properties *properties);
+
+void ocf_metadata_load_properties(ocf_volume_t volume,
+ ocf_metadata_load_properties_end_t cmpl, void *priv);
+
+static inline ocf_cache_line_t ocf_metadata_collision_table_entries(
+ struct ocf_cache *cache)
+{
+ return cache->device->collision_table_entries;
+}
+
+#endif /* METADATA_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_bit.h b/src/spdk/ocf/src/metadata/metadata_bit.h
new file mode 100644
index 000000000..0d5611136
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_bit.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/*******************************************************************************
+ * Sector mask getter
+ ******************************************************************************/
+
+static inline uint64_t _get_mask(uint8_t start, uint8_t stop)
+{
+ uint64_t mask = 0;
+
+ ENV_BUG_ON(start >= 64);
+ ENV_BUG_ON(stop >= 64);
+ ENV_BUG_ON(stop < start);
+
+ mask = ~mask;
+ mask >>= start + (63 - stop);
+ mask <<= start;
+
+ return mask;
+}
+
+#define _get_mask_u8(start, stop) _get_mask(start, stop)
+#define _get_mask_u16(start, stop) _get_mask(start, stop)
+#define _get_mask_u32(start, stop) _get_mask(start, stop)
+#define _get_mask_u64(start, stop) _get_mask(start, stop)
+
+typedef __uint128_t u128;
+
+static inline u128 _get_mask_u128(uint8_t start, uint8_t stop)
+{
+ u128 mask = 0;
+
+ ENV_BUG_ON(start >= 128);
+ ENV_BUG_ON(stop >= 128);
+ ENV_BUG_ON(stop < start);
+
+ mask = ~mask;
+ mask >>= start + (127 - stop);
+ mask <<= start;
+
+ return mask;
+}
+
+#define ocf_metadata_bit_struct(type) \
+struct ocf_metadata_map_##type { \
+ struct ocf_metadata_map map; \
+ type valid; \
+ type dirty; \
+} __attribute__((packed))
+
+#define ocf_metadata_bit_func(what, type) \
+static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \
+{ \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ const struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ if (all) { \
+ if (mask == (map[line].what & mask)) { \
+ return true; \
+ } else { \
+ return false; \
+ } \
+ } else { \
+ if (map[line].what & mask) { \
+ return true; \
+ } else { \
+ return false; \
+ } \
+ } \
+} \
+\
+static bool _ocf_metadata_test_out_##what##_##type(struct ocf_cache *cache, \
+ ocf_cache_line_t line, uint8_t start, uint8_t stop) \
+{ \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ const struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ if (map[line].what & ~mask) { \
+ return true; \
+ } else { \
+ return false; \
+ } \
+} \
+\
+static bool _ocf_metadata_clear_##what##_##type(struct ocf_cache *cache, \
+ ocf_cache_line_t line, uint8_t start, uint8_t stop) \
+{ \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ map[line].what &= ~mask; \
+\
+ if (map[line].what) { \
+ return true; \
+ } else { \
+ return false; \
+ } \
+} \
+\
+static bool _ocf_metadata_set_##what##_##type(struct ocf_cache *cache, \
+ ocf_cache_line_t line, uint8_t start, uint8_t stop) \
+{ \
+ bool result; \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ result = map[line].what ? true : false; \
+\
+ map[line].what |= mask; \
+\
+ return result; \
+} \
+\
+static bool _ocf_metadata_test_and_set_##what##_##type( \
+ struct ocf_cache *cache, ocf_cache_line_t line, \
+ uint8_t start, uint8_t stop, bool all) \
+{ \
+ bool test; \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ if (all) { \
+ if (mask == (map[line].what & mask)) { \
+ test = true; \
+ } else { \
+ test = false; \
+ } \
+ } else { \
+ if (map[line].what & mask) { \
+ test = true; \
+ } else { \
+ test = false; \
+ } \
+ } \
+\
+ map[line].what |= mask; \
+ return test; \
+} \
+\
+static bool _ocf_metadata_test_and_clear_##what##_##type( \
+ struct ocf_cache *cache, ocf_cache_line_t line, \
+ uint8_t start, uint8_t stop, bool all) \
+{ \
+ bool test; \
+ type mask = _get_mask_##type(start, stop); \
+\
+ struct ocf_metadata_hash_ctrl *ctrl = \
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
+\
+ struct ocf_metadata_raw *raw = \
+ &ctrl->raw_desc[metadata_segment_collision]; \
+\
+ struct ocf_metadata_map_##type *map = raw->mem_pool; \
+\
+ _raw_bug_on(raw, line); \
+\
+ if (all) { \
+ if (mask == (map[line].what & mask)) { \
+ test = true; \
+ } else { \
+ test = false; \
+ } \
+ } else { \
+ if (map[line].what & mask) { \
+ test = true; \
+ } else { \
+ test = false; \
+ } \
+ } \
+\
+ map[line].what &= ~mask; \
+ return test; \
+} \
+
+ocf_metadata_bit_struct(u8);
+ocf_metadata_bit_struct(u16);
+ocf_metadata_bit_struct(u32);
+ocf_metadata_bit_struct(u64);
+ocf_metadata_bit_struct(u128);
+
+ocf_metadata_bit_func(dirty, u8);
+ocf_metadata_bit_func(dirty, u16);
+ocf_metadata_bit_func(dirty, u32);
+ocf_metadata_bit_func(dirty, u64);
+ocf_metadata_bit_func(dirty, u128);
+
+ocf_metadata_bit_func(valid, u8);
+ocf_metadata_bit_func(valid, u16);
+ocf_metadata_bit_func(valid, u32);
+ocf_metadata_bit_func(valid, u64);
+ocf_metadata_bit_func(valid, u128);
diff --git a/src/spdk/ocf/src/metadata/metadata_cleaning_policy.h b/src/spdk/ocf/src/metadata/metadata_cleaning_policy.h
new file mode 100644
index 000000000..2efb288ff
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_cleaning_policy.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_CLEANING_POLICY_H__
+#define __METADATA_CLEANING_POLICY_H__
+
+/*
+ * GET
+ */
+static inline void
+ocf_metadata_get_cleaning_policy(struct ocf_cache *cache,
+ ocf_cache_line_t line, struct cleaning_policy_meta *policy)
+{
+ cache->metadata.iface.get_cleaning_policy(cache, line, policy);
+}
+
+/*
+ * SET
+ */
+static inline void
+ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
+ ocf_cache_line_t line, struct cleaning_policy_meta *policy)
+{
+ cache->metadata.iface.set_cleaning_policy(cache, line, policy);
+}
+
+#endif /* METADATA_CLEANING_POLICY_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_collision.c b/src/spdk/ocf/src/metadata/metadata_collision.c
new file mode 100644
index 000000000..cea5f4aaa
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_collision.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "metadata.h"
+#include "../utils/utils_cache_line.h"
+
+/*
+ *
+ */
+void ocf_metadata_add_to_collision(struct ocf_cache *cache,
+ ocf_core_id_t core_id, uint64_t core_line,
+ ocf_cache_line_t hash, ocf_cache_line_t cache_line)
+{
+ ocf_cache_line_t prev_cache_line = ocf_metadata_get_hash(cache, hash);
+ ocf_cache_line_t line_entries = cache->device->collision_table_entries;
+ ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
+
+ ENV_BUG_ON(!(hash < hash_entries));
+ ENV_BUG_ON(!(cache_line < line_entries));
+
+ /* Setup new node */
+ ocf_metadata_set_core_info(cache, cache_line, core_id,
+ core_line);
+
+ /* Update collision info:
+ * - next is set to value from hash table;
+ * - previous is set to collision table entries value
+ */
+ ocf_metadata_set_collision_info(cache, cache_line, prev_cache_line,
+ line_entries);
+
+ /* Update previous head */
+ if (prev_cache_line != line_entries) {
+ ocf_metadata_set_collision_prev(cache, prev_cache_line,
+ cache_line);
+ }
+
+ /* Update hash Table: hash table contains pointer to
+ * collision table so it contains indexes in collision table
+ */
+ ocf_metadata_set_hash(cache, hash, cache_line);
+}
+
+/*
+ *
+ */
+void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id)
+{
+ ocf_core_id_t core_id;
+ uint64_t core_sector;
+ ocf_cache_line_t hash_father;
+ ocf_cache_line_t prev_line, next_line;
+ ocf_cache_line_t line_entries = cache->device->collision_table_entries;
+ ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
+
+ ENV_BUG_ON(!(line < line_entries));
+
+ ocf_metadata_get_collision_info(cache, line, &next_line, &prev_line);
+
+ /* Update previous node if any. */
+ if (prev_line != line_entries)
+ ocf_metadata_set_collision_next(cache, prev_line, next_line);
+
+ /* Update next node if any. */
+ if (next_line != line_entries)
+ ocf_metadata_set_collision_prev(cache, next_line, prev_line);
+
+ ocf_metadata_get_core_info(cache, line, &core_id, &core_sector);
+
+ /* Update hash table, because if it was pointing to the given node it
+ * must now point to the given's node next
+ */
+ hash_father = ocf_metadata_hash_func(cache, core_sector, core_id);
+ ENV_BUG_ON(!(hash_father < hash_entries));
+
+ if (ocf_metadata_get_hash(cache, hash_father) == line)
+ ocf_metadata_set_hash(cache, hash_father, next_line);
+
+ ocf_metadata_set_collision_info(cache, line,
+ line_entries, line_entries);
+
+ ocf_metadata_set_core_info(cache, line,
+ OCF_CORE_MAX, ULLONG_MAX);
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_collision.h b/src/spdk/ocf/src/metadata/metadata_collision.h
new file mode 100644
index 000000000..33459b934
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_collision.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_COLLISION_H__
+#define __METADATA_COLLISION_H__
+
+/**
+ * @brief Metadata map structure
+ */
+
+struct ocf_metadata_list_info {
+ ocf_cache_line_t prev_col;
+ /*!< Previous cache line in collision list */
+ ocf_cache_line_t next_col;
+ /*!< Next cache line in collision list*/
+ ocf_cache_line_t partition_prev;
+ /*!< Previous cache line in the same partition*/
+ ocf_cache_line_t partition_next;
+ /*!< Next cache line in the same partition*/
+ ocf_part_id_t partition_id : 8;
+ /*!< ID of partition where is assigned this cache line*/
+} __attribute__((packed));
+
+/**
+ * @brief Metadata map structure
+ */
+
+struct ocf_metadata_map {
+ uint64_t core_line;
+ /*!< Core line addres on cache mapped by this strcture */
+
+ uint16_t core_id;
+ /*!< ID of core where is assigned this cache line*/
+
+ uint8_t status[];
+ /*!< Entry status structure e.g. valid, dirty...*/
+} __attribute__((packed));
+
+static inline ocf_cache_line_t ocf_metadata_map_lg2phy(
+ struct ocf_cache *cache, ocf_cache_line_t coll_idx)
+{
+ return cache->metadata.iface.layout_iface->lg2phy(cache,
+ coll_idx);
+}
+
+static inline ocf_cache_line_t ocf_metadata_map_phy2lg(
+ struct ocf_cache *cache, ocf_cache_line_t cache_line)
+{
+ return cache->metadata.iface.layout_iface->phy2lg(cache,
+ cache_line);
+}
+
+static inline void ocf_metadata_set_collision_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next, ocf_cache_line_t prev)
+{
+ cache->metadata.iface.set_collision_info(cache, line, next, prev);
+}
+
+static inline void ocf_metadata_set_collision_next(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next)
+{
+ cache->metadata.iface.set_collision_next(cache, line, next);
+}
+
+static inline void ocf_metadata_set_collision_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t prev)
+{
+ cache->metadata.iface.set_collision_prev(cache, line, prev);
+}
+
+static inline void ocf_metadata_get_collision_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t *next, ocf_cache_line_t *prev)
+{
+ cache->metadata.iface.get_collision_info(cache, line, next, prev);
+}
+
+static inline ocf_cache_line_t ocf_metadata_get_collision_next(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ ocf_cache_line_t next;
+
+ ocf_metadata_get_collision_info(cache, line, &next, NULL);
+ return next;
+}
+
+static inline ocf_cache_line_t ocf_metadata_get_collision_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ ocf_cache_line_t prev;
+
+ ocf_metadata_get_collision_info(cache, line, NULL, &prev);
+ return prev;
+}
+
+void ocf_metadata_add_to_collision(struct ocf_cache *cache,
+ ocf_core_id_t core_id, uint64_t core_line,
+ ocf_cache_line_t hash, ocf_cache_line_t cache_line);
+
+void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id);
+
+static inline void ocf_metadata_start_collision_shared_access(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ cache->metadata.iface.start_collision_shared_access(cache, line);
+}
+
+static inline void ocf_metadata_end_collision_shared_access(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ cache->metadata.iface.end_collision_shared_access(cache, line);
+}
+
+#endif /* METADATA_COLLISION_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_common.h b/src/spdk/ocf/src/metadata/metadata_common.h
new file mode 100644
index 000000000..25cddd445
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_common.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_COMMON_H__
+#define __METADATA_COMMON_H__
+
+typedef void (*ocf_metadata_end_t)(void *priv, int error);
+
+#endif /* __METADATA_COMMON_H__ */
+
diff --git a/src/spdk/ocf/src/metadata/metadata_core.h b/src/spdk/ocf/src/metadata/metadata_core.h
new file mode 100644
index 000000000..0004591d5
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_core.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_CORE_H__
+#define __METADATA_CORE_H__
+
+static inline void ocf_metadata_set_core_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t core_id,
+ uint64_t core_sector)
+{
+ cache->metadata.iface.set_core_info(cache, line, core_id,
+ core_sector);
+}
+
+static inline void ocf_metadata_get_core_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t *core_id,
+ uint64_t *core_sector)
+{
+ cache->metadata.iface.get_core_info(cache, line, core_id,
+ core_sector);
+}
+
+static inline void ocf_metadata_get_core_and_part_id(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_core_id_t *core_id, ocf_part_id_t *part_id)
+{
+ cache->metadata.iface.get_core_and_part_id(cache, line, core_id,
+ part_id);
+}
+
+static inline ocf_core_id_t ocf_metadata_get_core_id(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ return cache->metadata.iface.get_core_id(cache, line);
+}
+
+static inline struct ocf_metadata_uuid *ocf_metadata_get_core_uuid(
+ struct ocf_cache *cache, ocf_core_id_t core_id)
+{
+ return cache->metadata.iface.get_core_uuid(cache, core_id);
+}
+
+#endif /* METADATA_CORE_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_eviction_policy.h b/src/spdk/ocf/src/metadata/metadata_eviction_policy.h
new file mode 100644
index 000000000..436492f60
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_eviction_policy.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_EVICTION_H__
+#define __METADATA_EVICTION_H__
+
+static inline void ocf_metadata_get_evicition_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ union eviction_policy_meta *eviction)
+{
+ cache->metadata.iface.get_eviction_policy(cache, line, eviction);
+}
+
+/*
+ * SET
+ */
+static inline void ocf_metadata_set_evicition_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ union eviction_policy_meta *eviction)
+{
+ cache->metadata.iface.set_eviction_policy(cache, line, eviction);
+}
+
+#endif /* METADATA_EVICTION_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_hash.c b/src/spdk/ocf/src/metadata/metadata_hash.c
new file mode 100644
index 000000000..eccaaf120
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_hash.c
@@ -0,0 +1,2934 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_hash.h"
+#include "metadata_raw.h"
+#include "metadata_io.h"
+#include "metadata_status.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_pipeline.h"
+#include "../ocf_def_priv.h"
+#include "../ocf_priv.h"
+#include "../ocf_freelist.h"
+
+#define OCF_METADATA_HASH_DEBUG 0
+
+#if 1 == OCF_METADATA_HASH_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Metadata][Hash] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+#define METADATA_MEM_POOL(ctrl, section) ctrl->raw_desc[section].mem_pool
+
+static void ocf_metadata_hash_init_iface(struct ocf_cache *cache,
+ ocf_metadata_layout_t layout);
+
+#define OCF_METADATA_HASH_DIFF_MAX 1000
+
+enum {
+ ocf_metadata_status_type_valid = 0,
+ ocf_metadata_status_type_dirty,
+
+ ocf_metadata_status_type_max
+};
+
+static inline size_t ocf_metadata_status_sizeof(
+ const struct ocf_cache_line_settings *settings) {
+ /* Number of bytes required to mark cache line status */
+ size_t size = settings->sector_count / 8;
+
+ /* Number of types of status (valid, dirty, etc...) */
+ size *= ocf_metadata_status_type_max;
+
+ /* At the end we have size */
+ return size;
+}
+
+/*
+ * Hash metadata control structure
+ */
+struct ocf_metadata_hash_ctrl {
+ ocf_cache_line_t cachelines;
+ ocf_cache_line_t start_page;
+ ocf_cache_line_t count_pages;
+ uint32_t device_lines;
+ size_t mapping_size;
+ struct ocf_metadata_raw raw_desc[metadata_segment_max];
+};
+
+/*
+ * get entries for specified metadata hash type
+ */
+static ocf_cache_line_t ocf_metadata_hash_get_entries(
+ enum ocf_metadata_segment type,
+ ocf_cache_line_t cache_lines)
+{
+ ENV_BUG_ON(type >= metadata_segment_variable_size_start && cache_lines == 0);
+
+ switch (type) {
+ case metadata_segment_collision:
+ case metadata_segment_cleaning:
+ case metadata_segment_eviction:
+ case metadata_segment_list_info:
+ return cache_lines;
+
+ case metadata_segment_hash:
+ return OCF_DIV_ROUND_UP(cache_lines, 4);
+
+ case metadata_segment_sb_config:
+ return OCF_DIV_ROUND_UP(sizeof(struct ocf_superblock_config),
+ PAGE_SIZE);
+
+ case metadata_segment_sb_runtime:
+ return OCF_DIV_ROUND_UP(sizeof(struct ocf_superblock_runtime),
+ PAGE_SIZE);
+
+ case metadata_segment_reserved:
+ return 32;
+
+ case metadata_segment_part_config:
+ return OCF_IO_CLASS_MAX + 1;
+
+ case metadata_segment_part_runtime:
+ return OCF_IO_CLASS_MAX + 1;
+
+ case metadata_segment_core_config:
+ return OCF_CORE_MAX;
+
+ case metadata_segment_core_runtime:
+ return OCF_CORE_MAX;
+
+ case metadata_segment_core_uuid:
+ return OCF_CORE_MAX;
+
+ default:
+ break;
+ }
+
+ ENV_BUG();
+ return 0;
+}
+
+/*
+ * Get size of particular hash metadata type element
+ */
+static int64_t ocf_metadata_hash_get_element_size(
+ enum ocf_metadata_segment type,
+ const struct ocf_cache_line_settings *settings)
+{
+ int64_t size = 0;
+
+ ENV_BUG_ON(type >= metadata_segment_variable_size_start && !settings);
+
+ switch (type) {
+ case metadata_segment_eviction:
+ size = sizeof(union eviction_policy_meta);
+ break;
+
+ case metadata_segment_cleaning:
+ size = sizeof(struct cleaning_policy_meta);
+ break;
+
+ case metadata_segment_collision:
+ size = sizeof(struct ocf_metadata_map)
+ + ocf_metadata_status_sizeof(settings);
+ break;
+
+ case metadata_segment_list_info:
+ size = sizeof(struct ocf_metadata_list_info);
+ break;
+
+ case metadata_segment_sb_config:
+ size = PAGE_SIZE;
+ break;
+
+ case metadata_segment_sb_runtime:
+ size = PAGE_SIZE;
+ break;
+
+ case metadata_segment_reserved:
+ size = PAGE_SIZE;
+ break;
+
+ case metadata_segment_part_config:
+ size = sizeof(struct ocf_user_part_config);
+ break;
+
+ case metadata_segment_part_runtime:
+ size = sizeof(struct ocf_user_part_runtime);
+ break;
+
+ case metadata_segment_hash:
+ size = sizeof(ocf_cache_line_t);
+ break;
+
+ case metadata_segment_core_config:
+ size = sizeof(struct ocf_core_meta_config);
+ break;
+
+ case metadata_segment_core_runtime:
+ size = sizeof(struct ocf_core_meta_runtime);
+ break;
+
+ case metadata_segment_core_uuid:
+ size = sizeof(struct ocf_metadata_uuid);
+ break;
+
+ default:
+ break;
+
+ }
+
+ ENV_BUG_ON(size > PAGE_SIZE);
+
+ return size;
+}
+
+/*
+ * Metadata calculation exception handling.
+ *
+ * @param unused_lines - Unused pages
+ * @param device_lines - SSD Cache device pages amount
+ *
+ * @return true - Accept unused sapce
+ * @return false - unused space is not acceptable
+ */
+static bool ocf_metadata_hash_calculate_exception_hndl(ocf_cache_t cache,
+ int64_t unused_lines, int64_t device_lines)
+{
+ static bool warn;
+ int64_t utilization = 0;
+
+ if (!warn) {
+ ocf_cache_log(cache, log_warn,
+ "Metadata size calculation problem\n");
+ warn = true;
+ }
+
+ if (unused_lines < 0)
+ return false;
+
+ /*
+ * Accepted disk utilization is 90 % off SSD space
+ */
+ utilization = (device_lines - unused_lines) * 100 / device_lines;
+
+ if (utilization < 90)
+ return false;
+
+ return true;
+}
+
+/*
+ * Algorithm to calculate amount of cache lines taking into account required
+ * space for metadata
+ */
+static int ocf_metadata_hash_calculate_metadata_size(
+ struct ocf_cache *cache,
+ struct ocf_metadata_hash_ctrl *ctrl,
+ const struct ocf_cache_line_settings *settings)
+{
+ int64_t i_diff = 0, diff_lines = 0, cache_lines = ctrl->device_lines;
+ int64_t lowest_diff;
+ ocf_cache_line_t count_pages;
+ uint32_t i;
+
+ OCF_DEBUG_PARAM(cache, "Cache lines = %lld", cache_lines);
+
+ lowest_diff = cache_lines;
+
+ do {
+ count_pages = ctrl->count_pages;
+ for (i = metadata_segment_variable_size_start;
+ i < metadata_segment_max; i++) {
+ struct ocf_metadata_raw *raw = &ctrl->raw_desc[i];
+
+ /* Setup number of entries */
+ raw->entries
+ = ocf_metadata_hash_get_entries(i, cache_lines);
+
+ /*
+ * Setup SSD location and size
+ */
+ raw->ssd_pages_offset = count_pages;
+ raw->ssd_pages = OCF_DIV_ROUND_UP(raw->entries,
+ raw->entries_in_page);
+
+ /* Update offset for next container */
+ count_pages += ocf_metadata_raw_size_on_ssd(raw);
+ }
+
+ /*
+ * Check if max allowed iteration exceeded
+ */
+ if (i_diff >= OCF_METADATA_HASH_DIFF_MAX) {
+ /*
+ * Never should be here but try handle this exception
+ */
+ if (ocf_metadata_hash_calculate_exception_hndl(cache,
+ diff_lines, ctrl->device_lines)) {
+ break;
+ }
+
+ if (i_diff > (2 * OCF_METADATA_HASH_DIFF_MAX)) {
+ /*
+ * We tried, but we fallen, have to return error
+ */
+ ocf_cache_log(cache, log_err,
+ "Metadata size calculation ERROR\n");
+ return -1;
+ }
+ }
+
+ /* Calculate diff of cache lines */
+
+ /* Cache size in bytes */
+ diff_lines = ctrl->device_lines * settings->size;
+ /* Sub metadata size which is in 4 kiB unit */
+ diff_lines -= count_pages * PAGE_SIZE;
+ /* Convert back to cache lines */
+ diff_lines /= settings->size;
+ /* Calculate difference */
+ diff_lines -= cache_lines;
+
+ if (diff_lines > 0) {
+ if (diff_lines < lowest_diff)
+ lowest_diff = diff_lines;
+ else if (diff_lines == lowest_diff)
+ break;
+ }
+
+ /* Update new value of cache lines */
+ cache_lines += diff_lines;
+
+ OCF_DEBUG_PARAM(cache, "Diff pages = %lld", diff_lines);
+ OCF_DEBUG_PARAM(cache, "Cache lines = %lld", cache_lines);
+
+ i_diff++;
+
+ } while (diff_lines);
+
+ ctrl->count_pages = count_pages;
+ ctrl->cachelines = cache_lines;
+ OCF_DEBUG_PARAM(cache, "Cache lines = %u", ctrl->cachelines);
+
+ if (ctrl->device_lines < ctrl->cachelines)
+ return -1;
+
+ return 0;
+}
+
+static const char * const ocf_metadata_hash_raw_names[] = {
+ [metadata_segment_sb_config] = "Super block config",
+ [metadata_segment_sb_runtime] = "Super block runtime",
+ [metadata_segment_reserved] = "Reserved",
+ [metadata_segment_part_config] = "Part config",
+ [metadata_segment_part_runtime] = "Part runtime",
+ [metadata_segment_cleaning] = "Cleaning",
+ [metadata_segment_eviction] = "Eviction",
+ [metadata_segment_collision] = "Collision",
+ [metadata_segment_list_info] = "List info",
+ [metadata_segment_hash] = "Hash",
+ [metadata_segment_core_config] = "Core config",
+ [metadata_segment_core_runtime] = "Core runtime",
+ [metadata_segment_core_uuid] = "Core UUID",
+};
+#if 1 == OCF_METADATA_HASH_DEBUG
+/*
+ * Debug info functions prints metadata and raw containers information
+ */
+static void ocf_metadata_hash_raw_info(struct ocf_cache *cache,
+ struct ocf_metadata_hash_ctrl *ctrl)
+{
+ uint64_t capacity = 0;
+ uint64_t capacity_sum = 0;
+ uint32_t i = 0;
+ const char *unit;
+
+ for (i = 0; i < metadata_segment_max; i++) {
+ struct ocf_metadata_raw *raw = &(ctrl->raw_desc[i]);
+
+ OCF_DEBUG_PARAM(cache, "Raw : name = %s",
+ ocf_metadata_hash_raw_names[i]);
+ OCF_DEBUG_PARAM(cache, " : metadata type = %u", i);
+ OCF_DEBUG_PARAM(cache, " : raw type = %u",
+ raw->raw_type);
+ OCF_DEBUG_PARAM(cache, " : entry size = %u",
+ raw->entry_size);
+ OCF_DEBUG_PARAM(cache, " : entries = %llu",
+ raw->entries);
+ OCF_DEBUG_PARAM(cache, " : entries in page = %u",
+ raw->entries_in_page);
+ OCF_DEBUG_PARAM(cache, " : page offset = %llu",
+ raw->ssd_pages_offset);
+ OCF_DEBUG_PARAM(cache, " : pages = %llu",
+ raw->ssd_pages);
+ }
+
+ /* Provide capacity info */
+ for (i = 0; i < metadata_segment_max; i++) {
+ capacity = ocf_metadata_raw_size_of(cache,
+ &(ctrl->raw_desc[i]));
+
+ capacity_sum += capacity;
+
+ if (capacity / MiB) {
+ capacity = capacity / MiB;
+ unit = "MiB";
+ } else {
+ unit = "KiB";
+ capacity = capacity / KiB;
+
+ }
+
+ OCF_DEBUG_PARAM(cache, "%s capacity %llu %s",
+ ocf_metadata_hash_raw_names[i], capacity, unit);
+ }
+}
+#else
+#define ocf_metadata_hash_raw_info(cache, ctrl)
+#endif
+
+/*
+ * Deinitialize hash metadata interface
+ */
+static void ocf_metadata_hash_deinit_variable_size(struct ocf_cache *cache)
+{
+
+ int result = 0;
+ uint32_t i = 0;
+
+ struct ocf_metadata_hash_ctrl *ctrl = (struct ocf_metadata_hash_ctrl *)
+ cache->metadata.iface_priv;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock);
+
+ /*
+ * De initialize RAW types
+ */
+ for (i = metadata_segment_variable_size_start;
+ i < metadata_segment_max; i++) {
+ result |= ocf_metadata_raw_deinit(cache,
+ &(ctrl->raw_desc[i]));
+ }
+}
+
+static inline void ocf_metadata_config_init(struct ocf_cache *cache,
+ struct ocf_cache_line_settings *settings, size_t size)
+{
+ ENV_BUG_ON(!ocf_cache_line_size_is_valid(size));
+
+ ENV_BUG_ON(env_memset(settings, sizeof(*settings), 0));
+
+ settings->size = size;
+ settings->sector_count = BYTES_TO_SECTORS(settings->size);
+ settings->sector_start = 0;
+ settings->sector_end = settings->sector_count - 1;
+
+ OCF_DEBUG_PARAM(cache, "Cache line size = %lu, bits count = %llu, "
+ "status size = %lu",
+ settings->size, settings->sector_count,
+ ocf_metadata_status_sizeof(settings));
+}
+
+static void ocf_metadata_hash_deinit(struct ocf_cache *cache)
+{
+ int result = 0;
+ uint32_t i;
+
+ struct ocf_metadata_hash_ctrl *ctrl = (struct ocf_metadata_hash_ctrl *)
+ cache->metadata.iface_priv;
+
+ for (i = 0; i < metadata_segment_fixed_size_max; i++) {
+ result |= ocf_metadata_raw_deinit(cache,
+ &(ctrl->raw_desc[i]));
+ }
+
+ env_vfree(ctrl);
+ cache->metadata.iface_priv = NULL;
+
+ if (result)
+ ENV_BUG();
+}
+
+static struct ocf_metadata_hash_ctrl *ocf_metadata_hash_ctrl_init(
+ bool metadata_volatile)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ uint32_t page = 0;
+ uint32_t i = 0;
+
+ ctrl = env_vzalloc(sizeof(*ctrl));
+ if (!ctrl)
+ return NULL;
+
+ /* Initial setup of RAW containers */
+ for (i = 0; i < metadata_segment_fixed_size_max; i++) {
+ struct ocf_metadata_raw *raw = &ctrl->raw_desc[i];
+
+ raw->metadata_segment = i;
+
+ /* Default type for metadata RAW container */
+ raw->raw_type = metadata_raw_type_ram;
+
+ if (metadata_volatile) {
+ raw->raw_type = metadata_raw_type_volatile;
+ } else if (i == metadata_segment_core_uuid) {
+ raw->raw_type = metadata_raw_type_dynamic;
+ }
+
+ /* Entry size configuration */
+ raw->entry_size
+ = ocf_metadata_hash_get_element_size(i, NULL);
+ raw->entries_in_page = PAGE_SIZE / raw->entry_size;
+
+ /* Setup number of entries */
+ raw->entries = ocf_metadata_hash_get_entries(i, 0);
+
+ /*
+ * Setup SSD location and size
+ */
+ raw->ssd_pages_offset = page;
+ raw->ssd_pages = OCF_DIV_ROUND_UP(raw->entries,
+ raw->entries_in_page);
+
+ /* Update offset for next container */
+ page += ocf_metadata_raw_size_on_ssd(raw);
+ }
+
+ ctrl->count_pages = page;
+
+ return ctrl;
+}
+
+int ocf_metadata_hash_init(struct ocf_cache *cache,
+ ocf_cache_line_size_t cache_line_size)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ struct ocf_metadata *metadata = &cache->metadata;
+ struct ocf_cache_line_settings *settings =
+ (struct ocf_cache_line_settings *)&metadata->settings;
+ struct ocf_core_meta_config *core_meta_config;
+ struct ocf_core_meta_runtime *core_meta_runtime;
+ struct ocf_user_part_config *part_config;
+ struct ocf_user_part_runtime *part_runtime;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ uint32_t i = 0;
+ int result = 0;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ENV_WARN_ON(metadata->iface_priv);
+
+ ocf_metadata_config_init(cache, settings, cache_line_size);
+
+ ctrl = ocf_metadata_hash_ctrl_init(metadata->is_volatile);
+ if (!ctrl)
+ return -OCF_ERR_NO_MEM;
+ metadata->iface_priv = ctrl;
+
+ for (i = 0; i < metadata_segment_fixed_size_max; i++) {
+ result |= ocf_metadata_raw_init(cache, NULL, NULL,
+ &(ctrl->raw_desc[i]));
+ if (result)
+ break;
+ }
+
+ if (result) {
+ ocf_metadata_hash_deinit(cache);
+ return result;
+ }
+
+ cache->conf_meta = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ /* Set partition metadata */
+ part_config = METADATA_MEM_POOL(ctrl, metadata_segment_part_config);
+ part_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_part_runtime);
+
+ for (i = 0; i < OCF_IO_CLASS_MAX + 1; i++) {
+ cache->user_parts[i].config = &part_config[i];
+ cache->user_parts[i].runtime = &part_runtime[i];
+ }
+
+ /* Set core metadata */
+ core_meta_config = METADATA_MEM_POOL(ctrl,
+ metadata_segment_core_config);
+ core_meta_runtime = METADATA_MEM_POOL(ctrl,
+ metadata_segment_core_runtime);
+
+ for_each_core_all(cache, core, core_id) {
+ core->conf_meta = &core_meta_config[core_id];
+ core->runtime_meta = &core_meta_runtime[core_id];
+ }
+
+ return 0;
+}
+
+/* metadata segment data + iterators */
+struct query_cores_data
+{
+ /* array of data */
+ ctx_data_t *data;
+ /* current metadata entry counter */
+ uint32_t entry;
+ /* number of entries per page */
+ uint32_t entries_in_page;
+};
+
+/* query cores context */
+struct query_cores_context
+{
+ ocf_ctx_t ctx;
+
+ struct ocf_superblock_config superblock;
+ struct ocf_metadata_uuid muuid;
+
+ struct {
+ struct query_cores_data core_uuids;
+ struct query_cores_data core_config;
+ struct query_cores_data superblock;
+ } data;
+
+ env_atomic count;
+ env_atomic error;
+
+ /* OCF entry point parameters */
+ struct {
+ struct ocf_volume_uuid *uuids;
+ uint32_t uuids_count;
+ void *priv;
+ ocf_metadata_query_cores_end_t cmpl;
+ } params;
+};
+
+/* copy next metadata entry from data to memory buffer */
+static void ocf_metadata_hash_query_cores_data_read(ocf_ctx_t ctx,
+ struct query_cores_data *data,
+ void *buf, uint32_t size)
+{
+ if (data->entry > 0 && data->entry % data->entries_in_page == 0) {
+ ctx_data_seek_check(ctx, data->data,
+ ctx_data_seek_current,
+ PAGE_SIZE - data->entries_in_page * size);
+ }
+
+ ctx_data_rd_check(ctx, buf, data->data, size);
+
+ ++data->entry;
+}
+
+static void ocf_metadata_query_cores_end(struct query_cores_context *context,
+ int error)
+{
+ ocf_ctx_t ctx = context->ctx;
+ unsigned i, core_idx;
+ struct ocf_metadata_uuid *muuid = &context->muuid;
+ struct ocf_core_meta_config core_config;
+ unsigned core_count = 0;
+ unsigned long valid_core_bitmap[(OCF_CORE_MAX /
+ (sizeof(unsigned long) * 8)) + 1];
+ unsigned out_cores;
+
+ if (error)
+ env_atomic_cmpxchg(&context->error, 0, error);
+
+ if (env_atomic_dec_return(&context->count))
+ return;
+
+ error = env_atomic_read(&context->error);
+ if (error)
+ goto exit;
+
+ /* read superblock */
+ ctx_data_rd_check(ctx, &context->superblock,
+ context->data.superblock.data,
+ sizeof(context->superblock));
+
+ if (context->superblock.magic_number != CACHE_MAGIC_NUMBER) {
+ error = -OCF_ERR_NO_METADATA;
+ goto exit;
+ }
+
+ env_memset(&valid_core_bitmap, sizeof(valid_core_bitmap), 0);
+
+ /* read valid cores from core config segment */
+ for (i = 0; i < OCF_CORE_MAX; i++) {
+ ocf_metadata_hash_query_cores_data_read(ctx,
+ &context->data.core_config,
+ &core_config, sizeof(core_config));
+ if (core_config.valid) {
+ env_bit_set(i, valid_core_bitmap);
+ ++core_count;
+ }
+ }
+
+ /* read core uuids */
+ out_cores = OCF_MIN(core_count, context->params.uuids_count);
+ for (i = 0, core_idx = 0; i < OCF_CORE_MAX && core_idx < out_cores;
+ i++) {
+ ocf_metadata_hash_query_cores_data_read(ctx,
+ &context->data.core_uuids,
+ muuid, sizeof(*muuid));
+
+ if (!env_bit_test(i, valid_core_bitmap))
+ continue;
+
+ if (muuid->size > OCF_VOLUME_UUID_MAX_SIZE) {
+ error = -OCF_ERR_INVAL;
+ goto exit;
+ }
+ if (muuid->size > context->params.uuids[core_idx].size) {
+ error = -OCF_ERR_INVAL;
+ goto exit;
+ }
+
+ error = env_memcpy(context->params.uuids[core_idx].data,
+ context->params.uuids[core_idx].size,
+ muuid->data, muuid->size);
+ if (error)
+ goto exit;
+ context->params.uuids[core_idx].size = muuid->size;
+
+ ++core_idx;
+ }
+
+exit:
+ /* provide actual core count to completion */
+ context->params.cmpl(context->params.priv, error, core_count);
+
+ /* free data */
+ ctx_data_free(ctx, context->data.core_uuids.data);
+ ctx_data_free(ctx, context->data.core_config.data);
+ ctx_data_free(ctx, context->data.superblock.data);
+
+ env_secure_free(context, sizeof(*context));
+}
+
+static void ocf_metadata_query_cores_end_io(struct ocf_io *io, int error)
+{
+ struct query_cores_context *context = io->priv1;
+
+ ocf_io_put(io);
+ ocf_metadata_query_cores_end(context, error);
+}
+
+static int ocf_metadata_query_cores_io(ocf_volume_t volume,
+ struct query_cores_context *context, ctx_data_t *data,
+ uint32_t offset, uint64_t page, uint32_t num_pages)
+{
+ struct ocf_io *io;
+ int err;
+
+ env_atomic_inc(&context->count);
+
+ /* Allocate new IO */
+ io = ocf_volume_new_io(volume, NULL,
+ PAGES_TO_BYTES(page),
+ PAGES_TO_BYTES(num_pages),
+ OCF_READ, 0, 0);
+ if (!io) {
+ err = -OCF_ERR_NO_MEM;
+ goto exit_error;
+ }
+
+ /* Setup IO */
+ ocf_io_set_cmpl(io, context, NULL,
+ ocf_metadata_query_cores_end_io);
+ err = ocf_io_set_data(io, data, PAGES_TO_BYTES(offset));
+ if (err) {
+ ocf_io_put(io);
+ goto exit_error;
+ }
+
+ ocf_volume_submit_io(io);
+
+ return 0;
+
+exit_error:
+ env_atomic_dec(&context->count);
+ return err;
+}
+
+int ocf_metadata_query_cores_segment_io(
+ struct query_cores_context *context,
+ ocf_ctx_t owner,
+ ocf_volume_t volume,
+ enum ocf_metadata_segment segment,
+ struct ocf_metadata_hash_ctrl *ctrl,
+ struct query_cores_data *segment_data)
+{
+ uint32_t pages_left;
+ uint32_t pages;
+ uint32_t addr;
+ uint32_t offset;
+ uint32_t io_count;
+ uint32_t i;
+ uint32_t max_pages_per_io;
+ int err = 0;
+ unsigned int max_io_size = ocf_volume_get_max_io_size(volume);
+
+ if (!max_io_size) {
+ err = -OCF_ERR_INVAL;
+ goto exit;
+ }
+
+ max_pages_per_io = max_io_size / PAGE_SIZE;
+
+ /* Allocate data */
+ segment_data->data = ctx_data_alloc(owner,
+ ctrl->raw_desc[segment].ssd_pages);
+ if (!segment_data->data) {
+ err = -OCF_ERR_NO_MEM;
+ goto exit;
+ }
+
+ segment_data->entries_in_page = ctrl->raw_desc[segment].entries_in_page;
+
+ io_count = OCF_DIV_ROUND_UP(ctrl->raw_desc[segment].ssd_pages,
+ max_pages_per_io);
+
+ /* submit segment data I/O */
+ pages_left = ctrl->raw_desc[segment].ssd_pages;
+ addr = ctrl->raw_desc[segment].ssd_pages_offset;
+ offset = 0;
+ i = 0;
+ while (pages_left) {
+ ENV_BUG_ON(i >= io_count);
+
+ pages = OCF_MIN(pages_left, max_pages_per_io);
+
+ err = ocf_metadata_query_cores_io(volume, context,
+ segment_data->data, offset, addr, pages);
+ if (err)
+ goto exit;
+
+ addr += pages;
+ offset += pages;
+ pages_left -= pages;
+ ++i;
+ }
+
+exit:
+ return err;
+}
+
+void ocf_metadata_hash_query_cores(ocf_ctx_t owner, ocf_volume_t volume,
+ struct ocf_volume_uuid *uuid, uint32_t count,
+ ocf_metadata_query_cores_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ struct query_cores_context *context;
+ int err;
+
+ if (count > OCF_CORE_MAX)
+ OCF_CMPL_RET(priv, -OCF_ERR_INVAL, 0);
+
+ /* intialize query context */
+ context = env_secure_alloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM, 0);
+
+ ENV_BUG_ON(env_memset(context, sizeof(*context), 0));
+ context->ctx = owner;
+ context->params.cmpl = cmpl;
+ context->params.priv = priv;
+ context->params.uuids = uuid;
+ context->params.uuids_count = count;
+ env_atomic_set(&context->count, 1);
+
+ ctrl = ocf_metadata_hash_ctrl_init(false);
+ if (!ctrl) {
+ err = -OCF_ERR_NO_MEM;
+ goto exit;
+ }
+
+ /* superblock I/O */
+ err = ocf_metadata_query_cores_segment_io(context, owner,
+ volume, metadata_segment_sb_config, ctrl,
+ &context->data.superblock);
+ if (err)
+ goto exit;
+
+ /* core config I/O */
+ err = ocf_metadata_query_cores_segment_io(context, owner,
+ volume, metadata_segment_core_uuid, ctrl,
+ &context->data.core_uuids);
+ if (err)
+ goto exit;
+
+ /* core uuid I/O */
+ err = ocf_metadata_query_cores_segment_io(context, owner,
+ volume, metadata_segment_core_config, ctrl,
+ &context->data.core_config);
+ if (err)
+ goto exit;
+exit:
+ env_vfree(ctrl);
+ ocf_metadata_query_cores_end(context, err);
+}
+
+static void ocf_metadata_hash_flush_lock_collision_page(struct ocf_cache *cache,
+ struct ocf_metadata_raw *raw, uint32_t page)
+
+{
+ ocf_collision_start_exclusive_access(&cache->metadata.lock,
+ page);
+}
+
+static void ocf_metadata_hash_flush_unlock_collision_page(
+ struct ocf_cache *cache, struct ocf_metadata_raw *raw,
+ uint32_t page)
+
+{
+ ocf_collision_end_exclusive_access(&cache->metadata.lock,
+ page);
+}
+
+/*
+ * Initialize hash metadata interface
+ */
+static int ocf_metadata_hash_init_variable_size(struct ocf_cache *cache,
+ uint64_t device_size, ocf_cache_line_size_t cache_line_size,
+ ocf_metadata_layout_t layout)
+{
+ int result = 0;
+ uint32_t i = 0;
+ ocf_cache_line_t line;
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ struct ocf_cache_line_settings *settings =
+ (struct ocf_cache_line_settings *)&cache->metadata.settings;
+ ocf_flush_page_synch_t lock_page, unlock_page;
+ uint64_t device_lines;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ENV_WARN_ON(!cache->metadata.iface_priv);
+
+ ctrl = cache->metadata.iface_priv;
+
+ device_lines = device_size / cache_line_size;
+ if (device_lines >= (ocf_cache_line_t)(-1)){
+ /* TODO: This is just a rough check. Most optimal one would be
+ * located in calculate_metadata_size. */
+ ocf_cache_log(cache, log_err, "Device exceeds maximum suported size "
+ "with this cache line size. Try bigger cache line size.");
+ return -OCF_ERR_INVAL_CACHE_DEV;
+ }
+
+ ctrl->device_lines = device_lines;
+
+ if (settings->size != cache_line_size)
+ /* Re-initialize settings with different cache line size */
+ ocf_metadata_config_init(cache, settings, cache_line_size);
+
+ ctrl->mapping_size = ocf_metadata_status_sizeof(settings)
+ + sizeof(struct ocf_metadata_map);
+
+ ocf_metadata_hash_init_iface(cache, layout);
+
+ /* Initial setup of dynamic size RAW containers */
+ for (i = metadata_segment_variable_size_start;
+ i < metadata_segment_max; i++) {
+ struct ocf_metadata_raw *raw = &ctrl->raw_desc[i];
+
+ raw->metadata_segment = i;
+
+ /* Default type for metadata RAW container */
+ raw->raw_type = metadata_raw_type_ram;
+
+ if (cache->device->init_mode == ocf_init_mode_metadata_volatile) {
+ raw->raw_type = metadata_raw_type_volatile;
+ } else if (i == metadata_segment_collision &&
+ ocf_volume_is_atomic(&cache->device->volume)) {
+ raw->raw_type = metadata_raw_type_atomic;
+ }
+
+ /* Entry size configuration */
+ raw->entry_size
+ = ocf_metadata_hash_get_element_size(i, settings);
+ raw->entries_in_page = PAGE_SIZE / raw->entry_size;
+ }
+
+ if (0 != ocf_metadata_hash_calculate_metadata_size(cache, ctrl,
+ settings)) {
+ return -1;
+ }
+
+ OCF_DEBUG_PARAM(cache, "Metadata begin pages = %u", ctrl->start_page);
+ OCF_DEBUG_PARAM(cache, "Metadata count pages = %u", ctrl->count_pages);
+ OCF_DEBUG_PARAM(cache, "Metadata end pages = %u", ctrl->start_page
+ + ctrl->count_pages);
+
+ /*
+ * Initialize all dynamic size RAW types
+ */
+ for (i = metadata_segment_variable_size_start;
+ i < metadata_segment_max; i++) {
+ if (i == metadata_segment_collision) {
+ lock_page =
+ ocf_metadata_hash_flush_lock_collision_page;
+ unlock_page =
+ ocf_metadata_hash_flush_unlock_collision_page;
+ } else {
+ lock_page = unlock_page = NULL;
+ }
+
+ result |= ocf_metadata_raw_init(cache, lock_page, unlock_page,
+ &(ctrl->raw_desc[i]));
+
+ if (result)
+ goto finalize;
+ }
+
+ for (i = 0; i < metadata_segment_max; i++) {
+ ocf_cache_log(cache, log_info, "%s offset : %llu kiB\n",
+ ocf_metadata_hash_raw_names[i],
+ ctrl->raw_desc[i].ssd_pages_offset
+ * PAGE_SIZE / KiB);
+ if (i == metadata_segment_sb_config) {
+ ocf_cache_log(cache, log_info, "%s size : %lu B\n",
+ ocf_metadata_hash_raw_names[i],
+ offsetof(struct ocf_superblock_config, checksum)
+ + sizeof(((struct ocf_superblock_config *)0)
+ ->checksum));
+ } else if (i == metadata_segment_sb_runtime) {
+ ocf_cache_log(cache, log_info, "%s size : %lu B\n",
+ ocf_metadata_hash_raw_names[i],
+ sizeof(struct ocf_superblock_runtime));
+ } else {
+ ocf_cache_log(cache, log_info, "%s size : %llu kiB\n",
+ ocf_metadata_hash_raw_names[i],
+ ctrl->raw_desc[i].ssd_pages
+ * PAGE_SIZE / KiB);
+ }
+ }
+
+finalize:
+ if (result) {
+ /*
+ * Hash De-Init also contains RAW deinitialization
+ */
+ ocf_metadata_hash_deinit_variable_size(cache);
+ return result;
+ }
+
+ cache->device->runtime_meta = METADATA_MEM_POOL(ctrl,
+ metadata_segment_sb_runtime);
+
+ cache->device->collision_table_entries = ctrl->cachelines;
+
+ cache->device->hash_table_entries =
+ ctrl->raw_desc[metadata_segment_hash].entries;
+
+ cache->device->metadata_offset = ctrl->count_pages * PAGE_SIZE;
+
+ cache->conf_meta->cachelines = ctrl->cachelines;
+ cache->conf_meta->line_size = cache_line_size;
+
+ ocf_metadata_hash_raw_info(cache, ctrl);
+
+ ocf_cache_log(cache, log_info, "Cache line size: %llu kiB\n",
+ settings->size / KiB);
+
+ ocf_cache_log(cache, log_info, "Metadata capacity: %llu MiB\n",
+ (uint64_t)ocf_metadata_size_of(cache) / MiB);
+
+ /*
+ * Self test of metadata
+ */
+ for (line = 0; line < cache->device->collision_table_entries; line++) {
+ ocf_cache_line_t phy, lg;
+
+ phy = ocf_metadata_map_lg2phy(cache, line);
+ lg = ocf_metadata_map_phy2lg(cache, phy);
+
+ if (line != lg) {
+ result = -OCF_ERR_INVAL;
+ break;
+ }
+ env_cond_resched();
+ }
+
+ if (result == 0) {
+ ocf_cache_log(cache, log_info,
+ "OCF metadata self-test PASSED\n");
+ } else {
+ ocf_cache_log(cache, log_err,
+ "OCF metadata self-test ERROR\n");
+ }
+
+ result = ocf_metadata_concurrency_attached_init(&cache->metadata.lock,
+ cache, ctrl->raw_desc[metadata_segment_hash].entries,
+ (uint32_t)ctrl->raw_desc[metadata_segment_collision].
+ ssd_pages);
+ if (result) {
+ ocf_cache_log(cache, log_err, "Failed to initialize attached "
+ "metadata concurrency\n");
+ ocf_metadata_hash_deinit_variable_size(cache);
+ return result;
+ }
+
+ return 0;
+}
+
+static inline void _ocf_init_collision_entry(struct ocf_cache *cache,
+ ocf_cache_line_t idx)
+{
+ ocf_cache_line_t invalid_idx = cache->device->collision_table_entries;
+
+ ocf_metadata_set_collision_info(cache, idx, invalid_idx, invalid_idx);
+ ocf_metadata_set_core_info(cache, idx,
+ OCF_CORE_MAX, ULONG_MAX);
+ metadata_init_status_bits(cache, idx);
+}
+
+/*
+ * Initialize collision table
+ */
+static void ocf_metadata_hash_init_collision(struct ocf_cache *cache)
+{
+ unsigned int i;
+ unsigned int step = 0;
+
+ for (i = 0; i < cache->device->collision_table_entries; i++) {
+ _ocf_init_collision_entry(cache, i);
+ OCF_COND_RESCHED_DEFAULT(step);
+ }
+}
+
+/*
+ * Initialize hash table
+ */
+static void ocf_metadata_hash_init_hash_table(struct ocf_cache *cache)
+{
+ unsigned int i;
+ unsigned int hash_table_entries = cache->device->hash_table_entries;
+ ocf_cache_line_t invalid_idx = cache->device->collision_table_entries;
+
+ /* Init hash table */
+ for (i = 0; i < hash_table_entries; i++) {
+ /* hash_table contains indexes from collision_table
+ * thus it shall be initialized in improper values
+ * from collision_table
+ **/
+ ocf_metadata_set_hash(cache, i, invalid_idx);
+ }
+
+}
+
+/*
+ * Get count of pages that is dedicated for metadata
+ */
+static ocf_cache_line_t ocf_metadata_hash_pages(struct ocf_cache *cache)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ return ctrl->count_pages;
+}
+
+/*
+ * Get amount of cache lines
+ */
+static ocf_cache_line_t ocf_metadata_hash_cachelines(
+ struct ocf_cache *cache)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ return ctrl->cachelines;
+}
+
+static size_t ocf_metadata_hash_size_of(struct ocf_cache *cache)
+{
+ uint32_t i = 0;
+ size_t size = 0;
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ /*
+ * Get size of all RAW metadata container
+ */
+ for (i = 0; i < metadata_segment_max; i++) {
+ size += ocf_metadata_raw_size_of(cache,
+ &(ctrl->raw_desc[i]));
+ }
+
+ /* Get additional part of memory footprint */
+
+ /* Cache concurrency mechnism */
+ size += ocf_cache_line_concurrency_size_of(cache);
+
+ return size;
+}
+
+/*******************************************************************************
+ * Super Block
+ ******************************************************************************/
+
+struct ocf_metadata_hash_context {
+ ocf_metadata_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ struct ocf_metadata_raw segment_copy[metadata_segment_fixed_size_max];
+};
+
+static void ocf_metadata_hash_generic_complete(void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static void ocf_medatata_hash_load_segment(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ ocf_metadata_raw_load_all(cache, &ctrl->raw_desc[segment],
+ ocf_metadata_hash_generic_complete, context);
+}
+
+static void ocf_medatata_hash_store_segment(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ ocf_cache_t cache = context->cache;
+ int error;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ context->segment_copy[segment].mem_pool =
+ env_malloc(ctrl->raw_desc[segment].mem_pool_limit, ENV_MEM_NORMAL);
+ if (!context->segment_copy[segment].mem_pool)
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_NO_MEM);
+
+ error = env_memcpy(context->segment_copy[segment].mem_pool,
+ ctrl->raw_desc[segment].mem_pool_limit, METADATA_MEM_POOL(ctrl, segment),
+ ctrl->raw_desc[segment].mem_pool_limit);
+ if (error) {
+ env_free(context->segment_copy[segment].mem_pool);
+ context->segment_copy[segment].mem_pool = NULL;
+ OCF_PL_FINISH_RET(pipeline, error);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_check_crc_sb_config(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+ int segment = metadata_segment_sb_config;
+ uint32_t crc;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ crc = env_crc32(0, (void *)sb_config,
+ offsetof(struct ocf_superblock_config, checksum));
+
+ if (crc != sb_config->checksum[segment]) {
+ /* Checksum does not match */
+ ocf_cache_log(cache, log_err,
+ "Loading %s ERROR, invalid checksum",
+ ocf_metadata_hash_raw_names[segment]);
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_check_crc(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+ uint32_t crc;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ crc = ocf_metadata_raw_checksum(cache, &(ctrl->raw_desc[segment]));
+
+ if (crc != sb_config->checksum[segment]) {
+ /* Checksum does not match */
+ ocf_cache_log(cache, log_err,
+ "Loading %s ERROR, invalid checksum",
+ ocf_metadata_hash_raw_names[segment]);
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_load_superblock_post(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+ struct ocf_metadata_uuid *muuid;
+ struct ocf_volume_uuid uuid;
+ ocf_volume_type_t volume_type;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ for_each_core_metadata(cache, core, core_id) {
+ muuid = ocf_metadata_get_core_uuid(cache, core_id);
+ uuid.data = muuid->data;
+ uuid.size = muuid->size;
+
+ volume_type = ocf_ctx_get_volume_type(cache->owner,
+ core->conf_meta->type);
+
+ /* Initialize core volume */
+ ocf_volume_init(&core->volume, volume_type, &uuid, false);
+ }
+
+ /* Restore all dynamics items */
+
+ if (sb_config->core_count > OCF_CORE_MAX) {
+ ocf_cache_log(cache, log_err,
+ "Loading cache state ERROR, invalid cores count\n");
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
+ }
+
+ if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) {
+ ocf_cache_log(cache, log_err,
+ "Loading cache state ERROR, invalid partition count\n");
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_metadata_hash_load_sb_restore(
+ struct ocf_metadata_hash_context *context)
+{
+ ocf_cache_t cache = context->cache;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ int segment, error;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ for (segment = metadata_segment_sb_config;
+ segment < metadata_segment_fixed_size_max; segment++) {
+ if (!context->segment_copy[segment].mem_pool)
+ continue;
+
+ error = env_memcpy(METADATA_MEM_POOL(ctrl, segment),
+ ctrl->raw_desc[segment].mem_pool_limit,
+ context->segment_copy[segment].mem_pool,
+ ctrl->raw_desc[segment].mem_pool_limit);
+ ENV_BUG_ON(error);
+ }
+}
+
+static void ocf_metadata_hash_load_superblock_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int segment;
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
+ ocf_metadata_error(cache);
+ ocf_metadata_hash_load_sb_restore(context);
+ }
+
+ for (segment = metadata_segment_sb_config;
+ segment < metadata_segment_fixed_size_max; segment++) {
+ if (context->segment_copy[segment].mem_pool)
+ env_free(context->segment_copy[segment].mem_pool);
+ }
+
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_sb_store_segment_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_config),
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_part_config),
+ OCF_PL_ARG_INT(metadata_segment_part_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_sb_load_segment_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_config),
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_part_config),
+ OCF_PL_ARG_INT(metadata_segment_part_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_sb_check_crc_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_part_config),
+ OCF_PL_ARG_INT(metadata_segment_part_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_load_sb_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_superblock_finish,
+ .steps = {
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_store_segment,
+ ocf_metadata_hash_load_sb_store_segment_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_load_segment,
+ ocf_metadata_hash_load_sb_load_segment_args),
+ OCF_PL_STEP(ocf_medatata_hash_check_crc_sb_config),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_check_crc,
+ ocf_metadata_hash_load_sb_check_crc_args),
+ OCF_PL_STEP(ocf_medatata_hash_load_superblock_post),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Super Block - Load, This function has to prevent to pointers overwrite
+ */
+static void ocf_metadata_hash_load_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ struct ocf_superblock_runtime *sb_runtime;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ ENV_BUG_ON(!ctrl);
+
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+ ENV_BUG_ON(!sb_config);
+
+ sb_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_sb_runtime);
+ ENV_BUG_ON(!sb_runtime);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_sb_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_flush_superblock_prepare(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ /* Synchronize core objects types */
+ for_each_core_metadata(cache, core, core_id) {
+ core->conf_meta->type = ocf_ctx_get_volume_type_id(
+ cache->owner, core->volume.type);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_calculate_crc_sb_config(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ sb_config->checksum[metadata_segment_sb_config] = env_crc32(0,
+ (void *)sb_config,
+ offsetof(struct ocf_superblock_config, checksum));
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_calculate_crc(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ sb_config->checksum[segment] = ocf_metadata_raw_checksum(cache,
+ &(ctrl->raw_desc[segment]));
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_flush_segment(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ ocf_metadata_raw_flush_all(cache, &ctrl->raw_desc[segment],
+ ocf_metadata_hash_generic_complete, context);
+}
+
+static void ocf_metadata_hash_flush_superblock_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error)
+ ocf_metadata_error(cache);
+
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_flush_sb_calculate_crc_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_part_config),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_arg ocf_metadata_hash_flush_sb_flush_segment_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_config),
+ OCF_PL_ARG_INT(metadata_segment_part_config),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_flush_sb_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_flush_superblock_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_medatata_hash_flush_superblock_prepare),
+ OCF_PL_STEP(ocf_medatata_hash_calculate_crc_sb_config),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_calculate_crc,
+ ocf_metadata_hash_flush_sb_calculate_crc_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_flush_segment,
+ ocf_metadata_hash_flush_sb_flush_segment_args),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Super Block - FLUSH
+ */
+static void ocf_metadata_hash_flush_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_flush_sb_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+/**
+ * @brief Super Block - Set Shutdown Status
+ *
+ * to get shutdown status, one needs to call ocf_metadata_load_properties.
+ * @param shutdown_status - status to be assigned to cache.
+ *
+ * @return Operation status (0 success, otherwise error)
+ */
+static void ocf_metadata_hash_set_shutdown_status(ocf_cache_t cache,
+ enum ocf_metadata_shutdown_status shutdown_status,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *superblock;
+
+ OCF_DEBUG_TRACE(cache);
+
+ /*
+ * Get metadata hash service control structure
+ */
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ /*
+ * Get super block
+ */
+ superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ /* Set shutdown status */
+ superblock->clean_shutdown = shutdown_status;
+ superblock->magic_number = CACHE_MAGIC_NUMBER;
+
+ /* Flush superblock */
+ ocf_metadata_hash_flush_superblock(cache, cmpl, priv);
+}
+
+/*******************************************************************************
+ * RESERVED AREA
+ ******************************************************************************/
+
+static uint64_t ocf_metadata_hash_get_reserved_lba(
+ struct ocf_cache *cache)
+{
+ struct ocf_metadata_hash_ctrl *ctrl;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ return ctrl->raw_desc[metadata_segment_reserved].ssd_pages_offset *
+ PAGE_SIZE;
+}
+
+/*******************************************************************************
+ * FLUSH AND LOAD ALL
+ ******************************************************************************/
+
+static void ocf_medatata_hash_flush_all_set_status_complete(
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static void ocf_medatata_hash_flush_all_set_status(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ enum ocf_metadata_shutdown_status shutdown_status =
+ ocf_pipeline_arg_get_int(arg);
+
+ ocf_metadata_hash_set_shutdown_status(cache, shutdown_status,
+ ocf_medatata_hash_flush_all_set_status_complete,
+ context);
+}
+
+static void ocf_metadata_hash_flush_all_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n");
+ ocf_metadata_error(cache);
+ goto out;
+ }
+
+ ocf_cache_log(cache, log_info, "Done saving cache state!\n");
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_flush_all_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_part_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_runtime),
+ OCF_PL_ARG_INT(metadata_segment_cleaning),
+ OCF_PL_ARG_INT(metadata_segment_eviction),
+ OCF_PL_ARG_INT(metadata_segment_collision),
+ OCF_PL_ARG_INT(metadata_segment_list_info),
+ OCF_PL_ARG_INT(metadata_segment_hash),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_flush_all_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_flush_all_finish,
+ .steps = {
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_flush_all_set_status,
+ ocf_metadata_dirty_shutdown),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_flush_segment,
+ ocf_metadata_hash_flush_all_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_calculate_crc,
+ ocf_metadata_hash_flush_all_args),
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_flush_all_set_status,
+ ocf_metadata_clean_shutdown),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Flush all metadata
+ */
+static void ocf_metadata_hash_flush_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_flush_all_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+/*
+ * Flush specified cache line
+ */
+static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t map_idx, int to_state,
+ uint8_t start, uint8_t stop)
+{
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ /*
+ * Mark all required metadata elements to make given metadata cache
+ * line persistent in case of recovery
+ */
+
+ /* Collision table to get mapping cache line to HDD sector*/
+ ocf_metadata_raw_flush_mark(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]),
+ req, map_idx, to_state, start, stop);
+}
+
+/*
+ * Flush specified cache lines asynchronously
+ */
+static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
+ struct ocf_request *req, ocf_req_end_t complete)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl = NULL;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ /*
+ * Flush all required metadata elements to make given metadata cache
+ * line persistent in case of recovery
+ */
+
+ env_atomic_inc(&req->req_remaining); /* Core device IO */
+
+ result |= ocf_metadata_raw_flush_do_asynch(cache, req,
+ &(ctrl->raw_desc[metadata_segment_collision]),
+ complete);
+
+ if (result) {
+ ocf_metadata_error(cache);
+ ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n");
+ }
+}
+
+static void ocf_metadata_hash_load_all_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
+ ocf_metadata_error(cache);
+ goto out;
+ }
+
+ ocf_cache_log(cache, log_info, "Done loading cache state\n");
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_all_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_core_runtime),
+ OCF_PL_ARG_INT(metadata_segment_cleaning),
+ OCF_PL_ARG_INT(metadata_segment_eviction),
+ OCF_PL_ARG_INT(metadata_segment_collision),
+ OCF_PL_ARG_INT(metadata_segment_list_info),
+ OCF_PL_ARG_INT(metadata_segment_hash),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_load_all_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_all_finish,
+ .steps = {
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_load_segment,
+ ocf_metadata_hash_load_all_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_check_crc,
+ ocf_metadata_hash_load_all_args),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Load all metadata
+ */
+static void ocf_metadata_hash_load_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_all_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
+ ocf_core_id_t core_id, uint64_t core_line,
+ ocf_cache_line_t cache_line)
+{
+ ocf_core_t core = ocf_cache_get_core(cache, core_id);
+ ocf_part_id_t part_id;
+ ocf_cache_line_t hash_index;
+
+ part_id = PARTITION_DEFAULT;
+
+ ocf_metadata_add_to_partition(cache, part_id, cache_line);
+
+ hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
+ ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
+ cache_line);
+
+ ocf_eviction_init_cache_line(cache, cache_line, part_id);
+
+ ocf_eviction_set_hot_cache_line(cache, cache_line);
+
+ env_atomic_inc(&core->runtime_meta->cached_clines);
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+
+ if (metadata_test_dirty(cache, cache_line)) {
+ env_atomic_inc(&core->runtime_meta->dirty_clines);
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+ env_atomic64_cmpxchg(&core->runtime_meta->dirty_since,
+ 0, env_get_tick_count());
+ }
+}
+
+static void _recovery_invalidate_clean_sec(struct ocf_cache *cache,
+ ocf_cache_line_t cline)
+{
+ uint8_t i;
+
+ for (i = ocf_line_start_sector(cache);
+ i <= ocf_line_end_sector(cache); i++) {
+ if (!metadata_test_dirty_one(cache, cline, i)) {
+ /* Invalidate clear sectors */
+ metadata_clear_valid_sec_one(cache, cline, i);
+ }
+ }
+}
+
+static void _recovery_reset_cline_metadata(struct ocf_cache *cache,
+ ocf_cache_line_t cline)
+{
+ ocf_cleaning_t clean_policy_type;
+
+ ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX);
+
+ metadata_clear_valid(cache, cline);
+
+ clean_policy_type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max);
+
+ if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
+ cleaning_policy_ops[clean_policy_type].
+ init_cache_block(cache, cline);
+}
+
+static void _recovery_rebuild_metadata(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ bool dirty_only = ocf_pipeline_arg_get_int(arg);
+ ocf_cache_t cache = context->cache;
+ ocf_cache_line_t cline;
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+ unsigned char step = 0;
+ const uint64_t collision_table_entries =
+ ocf_metadata_collision_table_entries(cache);
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ for (cline = 0; cline < collision_table_entries; cline++) {
+ ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
+ if (core_id != OCF_CORE_MAX &&
+ (!dirty_only || metadata_test_dirty(cache,
+ cline))) {
+ /* Rebuild metadata for mapped cache line */
+ _recovery_rebuild_cline_metadata(cache, core_id,
+ core_line, cline);
+ if (dirty_only)
+ _recovery_invalidate_clean_sec(cache, cline);
+ } else {
+ /* Reset metadata for not mapped or clean cache line */
+ _recovery_reset_cline_metadata(cache, cline);
+ }
+
+ OCF_COND_RESCHED(step, 128);
+ }
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_metadata_hash_load_recovery_legacy_finish(
+ ocf_pipeline_t pipeline, void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Metadata read for recovery FAILURE\n");
+ ocf_metadata_error(cache);
+ goto out;
+ }
+
+ ocf_cache_log(cache, log_info, "Done loading cache state\n");
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_properties
+ocf_metadata_hash_load_recovery_legacy_pl_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_recovery_legacy_finish,
+ .steps = {
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_load_segment,
+ metadata_segment_collision),
+ OCF_PL_STEP_ARG_INT(_recovery_rebuild_metadata, true),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+static void _ocf_metadata_hash_load_recovery_legacy(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_recovery_legacy_pl_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq(
+ struct ocf_cache *cache, ocf_seq_no_t seq_no)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ if (seq_no == OCF_SEQ_NO_INVALID)
+ return OCF_CORE_ID_INVALID;
+
+ for_each_core_all(cache, core, core_id) {
+ if (core->conf_meta->seq_no == seq_no)
+ break;
+ }
+
+ return core_id;
+}
+
+static void ocf_metadata_hash_load_atomic_metadata_complete(
+ ocf_cache_t cache, void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static int ocf_metadata_hash_load_atomic_metadata_drain(void *priv,
+ uint64_t sector_addr, uint32_t sector_no, ctx_data_t *data)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ struct ocf_atomic_metadata meta;
+ ocf_cache_line_t line = 0;
+ uint8_t pos = 0;
+ ocf_seq_no_t core_seq_no = OCF_SEQ_NO_INVALID;
+ ocf_core_id_t core_id = OCF_CORE_ID_INVALID;
+ uint64_t core_line = 0;
+ bool core_line_ok = false;
+ uint32_t i;
+
+ for (i = 0; i < sector_no; i++) {
+ ctx_data_rd_check(cache->owner, &meta, data, sizeof(meta));
+
+ line = (sector_addr + i) / ocf_line_sectors(cache);
+ line = ocf_metadata_map_phy2lg(cache, line);
+ pos = (sector_addr + i) % ocf_line_sectors(cache);
+ core_seq_no = meta.core_seq_no;
+ core_line = meta.core_line;
+
+ /* Look for core with sequence number same as cache line */
+ core_id = _ocf_metadata_hash_find_core_by_seq(
+ cache, core_seq_no);
+
+ if (pos == 0)
+ core_line_ok = false;
+
+ if (meta.valid && core_id != OCF_CORE_ID_INVALID) {
+ if (!core_line_ok) {
+ ocf_metadata_set_core_info(cache, line,
+ core_id, core_line);
+ core_line_ok = true;
+ }
+
+ metadata_set_valid_sec_one(cache, line, pos);
+ meta.dirty ?
+ metadata_set_dirty_sec_one(cache, line, pos) :
+ metadata_clear_dirty_sec_one(cache, line, pos);
+ }
+ }
+
+ return 0;
+}
+
+static void ocf_medatata_hash_load_atomic_metadata(
+ ocf_pipeline_t pipeline, void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int result;
+
+ result = metadata_io_read_i_atomic(cache, cache->mngt_queue,
+ context, ocf_metadata_hash_load_atomic_metadata_drain,
+ ocf_metadata_hash_load_atomic_metadata_complete);
+ if (result) {
+ ocf_metadata_error(cache);
+ ocf_cache_log(cache, log_err,
+ "Metadata read for recovery FAILURE\n");
+ OCF_PL_FINISH_RET(pipeline, result);
+ }
+}
+
+static void ocf_metadata_hash_load_recovery_atomic_finish(
+ ocf_pipeline_t pipeline, void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Metadata read for recovery FAILURE\n");
+ ocf_metadata_error(cache);
+ }
+
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_properties
+ocf_metadata_hash_load_recovery_atomic_pl_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_recovery_atomic_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_medatata_hash_load_atomic_metadata),
+ OCF_PL_STEP_ARG_INT(_recovery_rebuild_metadata, false),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * RAM Implementation - Load all metadata elements from SSD
+ */
+static void _ocf_metadata_hash_load_recovery_atomic(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_recovery_atomic_pl_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+/*
+ * Load for recovery - Load only data that is required for recovery procedure
+ */
+static void ocf_metadata_hash_load_recovery(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ OCF_DEBUG_TRACE(cache);
+
+ if (ocf_volume_is_atomic(&cache->device->volume))
+ _ocf_metadata_hash_load_recovery_atomic(cache, cmpl, priv);
+ else
+ _ocf_metadata_hash_load_recovery_legacy(cache, cmpl, priv);
+}
+
+/*******************************************************************************
+ * Core Info
+ ******************************************************************************/
+static void ocf_metadata_hash_get_core_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t *core_id,
+ uint64_t *core_sector)
+{
+ const struct ocf_metadata_map *collision;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ collision = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]), line);
+ if (collision) {
+ if (core_id)
+ *core_id = collision->core_id;
+ if (core_sector)
+ *core_sector = collision->core_line;
+ } else {
+ ocf_metadata_error(cache);
+
+ if (core_id)
+ *core_id = OCF_CORE_MAX;
+ if (core_sector)
+ *core_sector = ULLONG_MAX;
+ }
+}
+
+static void ocf_metadata_hash_set_core_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t core_id,
+ uint64_t core_sector)
+{
+ struct ocf_metadata_map *collision;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ collision = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]), line);
+
+ if (collision) {
+ collision->core_id = core_id;
+ collision->core_line = core_sector;
+ } else {
+ ocf_metadata_error(cache);
+ }
+}
+
+static ocf_core_id_t ocf_metadata_hash_get_core_id(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ const struct ocf_metadata_map *collision;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ collision = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]), line);
+
+ if (collision)
+ return collision->core_id;
+
+ ocf_metadata_error(cache);
+ return OCF_CORE_MAX;
+}
+
+static struct ocf_metadata_uuid *ocf_metadata_hash_get_core_uuid(
+ struct ocf_cache *cache, ocf_core_id_t core_id)
+{
+ struct ocf_metadata_uuid *muuid;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ muuid = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_core_uuid]), core_id);
+
+ if (!muuid)
+ ocf_metadata_error(cache);
+
+ return muuid;
+}
+
+/*******************************************************************************
+ * Core and part id
+ ******************************************************************************/
+
+static void ocf_metadata_hash_get_core_and_part_id(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_core_id_t *core_id, ocf_part_id_t *part_id)
+{
+ const struct ocf_metadata_map *collision;
+ const struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ collision = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]), line);
+
+ info = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (collision && info) {
+ if (core_id)
+ *core_id = collision->core_id;
+ if (part_id)
+ *part_id = info->partition_id;
+ } else {
+ ocf_metadata_error(cache);
+ if (core_id)
+ *core_id = OCF_CORE_MAX;
+ if (part_id)
+ *part_id = PARTITION_DEFAULT;
+ }
+}
+/*******************************************************************************
+ * Hash Table
+ ******************************************************************************/
+
+/*
+ * Hash Table - Get
+ */
+static ocf_cache_line_t ocf_metadata_hash_get_hash(
+ struct ocf_cache *cache, ocf_cache_line_t index)
+{
+ ocf_cache_line_t line = cache->device->collision_table_entries;
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_get(cache,
+ &(ctrl->raw_desc[metadata_segment_hash]), index, &line);
+
+ if (result)
+ ocf_metadata_error(cache);
+
+ return line;
+}
+
+/*
+ * Hash Table - Set
+ */
+static void ocf_metadata_hash_set_hash(struct ocf_cache *cache,
+ ocf_cache_line_t index, ocf_cache_line_t line)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_set(cache,
+ &(ctrl->raw_desc[metadata_segment_hash]), index, &line);
+
+ if (result)
+ ocf_metadata_error(cache);
+}
+
+/*******************************************************************************
+ * Cleaning Policy
+ ******************************************************************************/
+
+/*
+ * Cleaning policy - Get
+ */
+static void ocf_metadata_hash_get_cleaning_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ struct cleaning_policy_meta *cleaning_policy)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_get(cache,
+ &(ctrl->raw_desc[metadata_segment_cleaning]), line,
+ cleaning_policy);
+
+ if (result)
+ ocf_metadata_error(cache);
+}
+
+/*
+ * Cleaning policy - Set
+ */
+static void ocf_metadata_hash_set_cleaning_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ struct cleaning_policy_meta *cleaning_policy)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_set(cache,
+ &(ctrl->raw_desc[metadata_segment_cleaning]), line,
+ cleaning_policy);
+
+ if (result)
+ ocf_metadata_error(cache);
+}
+
+/*******************************************************************************
+ * Eviction policy
+ ******************************************************************************/
+
+/*
+ * Eviction policy - Get
+ */
+static void ocf_metadata_hash_get_eviction_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ union eviction_policy_meta *eviction_policy)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_get(cache,
+ &(ctrl->raw_desc[metadata_segment_eviction]), line,
+ eviction_policy);
+
+ if (result)
+ ocf_metadata_error(cache);
+}
+
+/*
+ * Cleaning policy - Set
+ */
+static void ocf_metadata_hash_set_eviction_policy(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ union eviction_policy_meta *eviction_policy)
+{
+ int result = 0;
+ struct ocf_metadata_hash_ctrl *ctrl
+ = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ result = ocf_metadata_raw_set(cache,
+ &(ctrl->raw_desc[metadata_segment_eviction]), line,
+ eviction_policy);
+
+ if (result)
+ ocf_metadata_error(cache);
+}
+
+/*******************************************************************************
+ * Collision
+ ******************************************************************************/
+static ocf_cache_line_t ocf_metadata_hash_map_lg2phy_seq(
+ struct ocf_cache *cache, ocf_cache_line_t coll_idx)
+{
+ return coll_idx;
+}
+
+static ocf_cache_line_t ocf_metadata_hash_map_phy2lg_seq(
+ struct ocf_cache *cache, ocf_cache_line_t cache_line)
+{
+ return cache_line;
+}
+
+static ocf_cache_line_t ocf_metadata_hash_map_lg2phy_striping(
+ struct ocf_cache *cache, ocf_cache_line_t coll_idx)
+{
+ ocf_cache_line_t cache_line = 0, offset = 0;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ unsigned int entries_in_page =
+ ctrl->raw_desc[metadata_segment_collision].entries_in_page;
+ unsigned int pages =
+ ctrl->raw_desc[metadata_segment_collision].ssd_pages;
+ ocf_cache_line_t collision_table_entries =
+ cache->device->collision_table_entries;
+ ocf_cache_line_t delta =
+ (entries_in_page * pages) - collision_table_entries;
+ unsigned int row = coll_idx % entries_in_page;
+
+ if (row > entries_in_page - delta)
+ offset = row - (entries_in_page - delta);
+ else
+ offset = 0;
+
+ cache_line = (row * pages) + (coll_idx / entries_in_page) - offset;
+ return cache_line;
+}
+
+static ocf_cache_line_t ocf_metadata_hash_map_phy2lg_striping(
+ struct ocf_cache *cache, ocf_cache_line_t cache_line)
+{
+ ocf_cache_line_t coll_idx = 0;
+
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ struct ocf_metadata_raw *raw =
+ &ctrl->raw_desc[metadata_segment_collision];
+
+ unsigned int pages = raw->ssd_pages;
+ unsigned int entries_in_page = raw->entries_in_page;
+ unsigned int entries_in_last_page = raw->entries % entries_in_page ?:
+ entries_in_page;
+
+ unsigned int row = 0, coll = 0;
+
+ unsigned int last = entries_in_last_page * pages;
+
+ if (cache_line < last) {
+ row = cache_line % pages;
+ coll = cache_line / pages;
+ } else {
+ cache_line -= last;
+ row = cache_line % (pages - 1);
+ coll = cache_line / (pages - 1) + entries_in_last_page;
+ }
+
+ coll_idx = (row * entries_in_page) + coll;
+
+ return coll_idx;
+}
+
+static void ocf_metadata_hash_set_collision_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next, ocf_cache_line_t prev)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info) {
+ info->next_col = next;
+ info->prev_col = prev;
+ } else {
+ ocf_metadata_error(cache);
+ }
+}
+
+static void ocf_metadata_hash_set_collision_next(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info)
+ info->next_col = next;
+ else
+ ocf_metadata_error(cache);
+}
+
+static void ocf_metadata_hash_set_collision_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t prev)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info)
+ info->prev_col = prev;
+ else
+ ocf_metadata_error(cache);
+}
+
+static void ocf_metadata_hash_get_collision_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t *next, ocf_cache_line_t *prev)
+{
+ const struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ ENV_BUG_ON(NULL == next && NULL == prev);
+
+ info = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+ if (info) {
+ if (next)
+ *next = info->next_col;
+ if (prev)
+ *prev = info->prev_col;
+ } else {
+ ocf_metadata_error(cache);
+
+ if (next)
+ *next = cache->device->collision_table_entries;
+ if (prev)
+ *prev = cache->device->collision_table_entries;
+ }
+}
+
+void ocf_metadata_hash_start_collision_shared_access(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ struct ocf_metadata_raw *raw =
+ &ctrl->raw_desc[metadata_segment_collision];
+ uint32_t page = ocf_metadata_raw_page(raw, line);
+
+ ocf_collision_start_shared_access(&cache->metadata.lock, page);
+}
+
+void ocf_metadata_hash_end_collision_shared_access(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ struct ocf_metadata_raw *raw =
+ &ctrl->raw_desc[metadata_segment_collision];
+ uint32_t page = ocf_metadata_raw_page(raw, line);
+
+ ocf_collision_end_shared_access(&cache->metadata.lock, page);
+}
+
+/*******************************************************************************
+ * Partition
+ ******************************************************************************/
+
+static void ocf_metadata_hash_get_partition_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
+ ocf_cache_line_t *prev_line)
+{
+ const struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_rd_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info) {
+ if (part_id)
+ *part_id = info->partition_id;
+ if (next_line)
+ *next_line = info->partition_next;
+ if (prev_line)
+ *prev_line = info->partition_prev;
+ } else {
+ ocf_metadata_error(cache);
+ if (part_id)
+ *part_id = PARTITION_DEFAULT;
+ if (next_line)
+ *next_line = cache->device->collision_table_entries;
+ if (prev_line)
+ *prev_line = cache->device->collision_table_entries;
+ }
+}
+
+static void ocf_metadata_hash_set_partition_next(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next_line)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info)
+ info->partition_next = next_line;
+ else
+ ocf_metadata_error(cache);
+}
+
+static void ocf_metadata_hash_set_partition_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t prev_line)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info)
+ info->partition_prev = prev_line;
+ else
+ ocf_metadata_error(cache);
+}
+
+static void ocf_metadata_hash_set_partition_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_part_id_t part_id, ocf_cache_line_t next_line,
+ ocf_cache_line_t prev_line)
+{
+ struct ocf_metadata_list_info *info;
+ struct ocf_metadata_hash_ctrl *ctrl =
+ (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+
+ info = ocf_metadata_raw_wr_access(cache,
+ &(ctrl->raw_desc[metadata_segment_list_info]), line);
+
+ if (info) {
+ info->partition_id = part_id;
+ info->partition_next = next_line;
+ info->partition_prev = prev_line;
+ } else {
+ ocf_metadata_error(cache);
+ }
+}
+
+/*******************************************************************************
+ * Hash Metadata interface definition
+ ******************************************************************************/
+
+static const struct ocf_metadata_iface metadata_hash_iface = {
+ .init = ocf_metadata_hash_init,
+ .deinit = ocf_metadata_hash_deinit,
+ .query_cores = ocf_metadata_hash_query_cores,
+ .init_variable_size = ocf_metadata_hash_init_variable_size,
+ .deinit_variable_size = ocf_metadata_hash_deinit_variable_size,
+ .init_hash_table = ocf_metadata_hash_init_hash_table,
+ .init_collision = ocf_metadata_hash_init_collision,
+
+ .layout_iface = NULL,
+ .pages = ocf_metadata_hash_pages,
+ .cachelines = ocf_metadata_hash_cachelines,
+ .size_of = ocf_metadata_hash_size_of,
+
+ /*
+ * Load all, flushing all, etc...
+ */
+ .flush_all = ocf_metadata_hash_flush_all,
+ .flush_mark = ocf_metadata_hash_flush_mark,
+ .flush_do_asynch = ocf_metadata_hash_flush_do_asynch,
+ .load_all = ocf_metadata_hash_load_all,
+ .load_recovery = ocf_metadata_hash_load_recovery,
+
+ /*
+ * Super Block
+ */
+ .set_shutdown_status = ocf_metadata_hash_set_shutdown_status,
+ .flush_superblock = ocf_metadata_hash_flush_superblock,
+ .load_superblock = ocf_metadata_hash_load_superblock,
+
+ /*
+ * Reserved area
+ */
+ .get_reserved_lba = ocf_metadata_hash_get_reserved_lba,
+
+ /*
+ * Core Info
+ */
+ .set_core_info = ocf_metadata_hash_set_core_info,
+ .get_core_info = ocf_metadata_hash_get_core_info,
+ .get_core_id = ocf_metadata_hash_get_core_id,
+ .get_core_uuid = ocf_metadata_hash_get_core_uuid,
+
+ /*
+ * Core and part id
+ */
+
+ .get_core_and_part_id = ocf_metadata_hash_get_core_and_part_id,
+
+ /*
+ * Collision Info
+ */
+ .get_collision_info = ocf_metadata_hash_get_collision_info,
+ .set_collision_info = ocf_metadata_hash_set_collision_info,
+ .set_collision_next = ocf_metadata_hash_set_collision_next,
+ .set_collision_prev = ocf_metadata_hash_set_collision_prev,
+ .start_collision_shared_access =
+ ocf_metadata_hash_start_collision_shared_access,
+ .end_collision_shared_access =
+ ocf_metadata_hash_end_collision_shared_access,
+
+ /*
+ * Partition Info
+ */
+ .get_partition_info = ocf_metadata_hash_get_partition_info,
+ .set_partition_next = ocf_metadata_hash_set_partition_next,
+ .set_partition_prev = ocf_metadata_hash_set_partition_prev,
+ .set_partition_info = ocf_metadata_hash_set_partition_info,
+
+ /*
+ * Hash Table
+ */
+ .get_hash = ocf_metadata_hash_get_hash,
+ .set_hash = ocf_metadata_hash_set_hash,
+
+ /*
+ * Cleaning Policy
+ */
+ .get_cleaning_policy = ocf_metadata_hash_get_cleaning_policy,
+ .set_cleaning_policy = ocf_metadata_hash_set_cleaning_policy,
+
+ /*
+ * Eviction Policy
+ */
+ .get_eviction_policy = ocf_metadata_hash_get_eviction_policy,
+ .set_eviction_policy = ocf_metadata_hash_set_eviction_policy,
+};
+
+/*******************************************************************************
+ * Bitmap status
+ ******************************************************************************/
+
+#include "metadata_bit.h"
+
+static const struct ocf_metadata_layout_iface layout_ifaces[ocf_metadata_layout_max] = {
+ [ocf_metadata_layout_striping] = {
+ .lg2phy = ocf_metadata_hash_map_lg2phy_striping,
+ .phy2lg = ocf_metadata_hash_map_phy2lg_striping
+ },
+ [ocf_metadata_layout_seq] = {
+ .lg2phy = ocf_metadata_hash_map_lg2phy_seq,
+ .phy2lg = ocf_metadata_hash_map_phy2lg_seq
+ }
+};
+
+
+static void ocf_metadata_hash_init_iface(struct ocf_cache *cache,
+ ocf_metadata_layout_t layout)
+{
+ struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *)
+ &cache->metadata.iface;
+
+ ENV_BUG_ON(layout >= ocf_metadata_layout_max || layout < 0);
+
+ /* Initialize metadata location interface*/
+ if (cache->device->init_mode == ocf_init_mode_metadata_volatile)
+ layout = ocf_metadata_layout_seq;
+ iface->layout_iface = &layout_ifaces[layout];
+
+ /* Initialize bit status function */
+
+ switch (cache->metadata.settings.size) {
+ case ocf_cache_line_size_4:
+ iface->test_dirty = _ocf_metadata_test_dirty_u8;
+ iface->test_out_dirty = _ocf_metadata_test_out_dirty_u8;
+ iface->clear_dirty = _ocf_metadata_clear_dirty_u8;
+ iface->set_dirty = _ocf_metadata_set_dirty_u8;
+ iface->test_and_set_dirty = _ocf_metadata_test_and_set_dirty_u8;
+ iface->test_and_clear_dirty =
+ _ocf_metadata_test_and_clear_dirty_u8;
+ iface->test_valid = _ocf_metadata_test_valid_u8;
+ iface->test_out_valid = _ocf_metadata_test_out_valid_u8;
+ iface->clear_valid = _ocf_metadata_clear_valid_u8;
+ iface->set_valid = _ocf_metadata_set_valid_u8;
+ iface->test_and_set_valid = _ocf_metadata_test_and_set_valid_u8;
+ iface->test_and_clear_valid =
+ _ocf_metadata_test_and_clear_valid_u8;
+ break;
+
+ case ocf_cache_line_size_8:
+ iface->test_dirty = _ocf_metadata_test_dirty_u16;
+ iface->test_out_dirty = _ocf_metadata_test_out_dirty_u16;
+ iface->clear_dirty = _ocf_metadata_clear_dirty_u16;
+ iface->set_dirty = _ocf_metadata_set_dirty_u16;
+ iface->test_and_set_dirty =
+ _ocf_metadata_test_and_set_dirty_u16;
+ iface->test_and_clear_dirty =
+ _ocf_metadata_test_and_clear_dirty_u16;
+ iface->test_valid = _ocf_metadata_test_valid_u16;
+ iface->test_out_valid = _ocf_metadata_test_out_valid_u16;
+ iface->clear_valid = _ocf_metadata_clear_valid_u16;
+ iface->set_valid = _ocf_metadata_set_valid_u16;
+ iface->test_and_set_valid =
+ _ocf_metadata_test_and_set_valid_u16;
+ iface->test_and_clear_valid =
+ _ocf_metadata_test_and_clear_valid_u16;
+ break;
+
+ case ocf_cache_line_size_16:
+ iface->test_dirty = _ocf_metadata_test_dirty_u32;
+ iface->test_out_dirty = _ocf_metadata_test_out_dirty_u32;
+ iface->clear_dirty = _ocf_metadata_clear_dirty_u32;
+ iface->set_dirty = _ocf_metadata_set_dirty_u32;
+ iface->test_and_set_dirty =
+ _ocf_metadata_test_and_set_dirty_u32;
+ iface->test_and_clear_dirty =
+ _ocf_metadata_test_and_clear_dirty_u32;
+ iface->test_valid = _ocf_metadata_test_valid_u32;
+ iface->test_out_valid = _ocf_metadata_test_out_valid_u32;
+ iface->clear_valid = _ocf_metadata_clear_valid_u32;
+ iface->set_valid = _ocf_metadata_set_valid_u32;
+ iface->test_and_set_valid =
+ _ocf_metadata_test_and_set_valid_u32;
+ iface->test_and_clear_valid =
+ _ocf_metadata_test_and_clear_valid_u32;
+ break;
+ case ocf_cache_line_size_32:
+ iface->test_dirty = _ocf_metadata_test_dirty_u64;
+ iface->test_out_dirty = _ocf_metadata_test_out_dirty_u64;
+ iface->clear_dirty = _ocf_metadata_clear_dirty_u64;
+ iface->set_dirty = _ocf_metadata_set_dirty_u64;
+ iface->test_and_set_dirty =
+ _ocf_metadata_test_and_set_dirty_u64;
+ iface->test_and_clear_dirty =
+ _ocf_metadata_test_and_clear_dirty_u64;
+ iface->test_valid = _ocf_metadata_test_valid_u64;
+ iface->test_out_valid = _ocf_metadata_test_out_valid_u64;
+ iface->clear_valid = _ocf_metadata_clear_valid_u64;
+ iface->set_valid = _ocf_metadata_set_valid_u64;
+ iface->test_and_set_valid =
+ _ocf_metadata_test_and_set_valid_u64;
+ iface->test_and_clear_valid =
+ _ocf_metadata_test_and_clear_valid_u64;
+ break;
+
+ case ocf_cache_line_size_64:
+ iface->test_dirty = _ocf_metadata_test_dirty_u128;
+ iface->test_out_dirty = _ocf_metadata_test_out_dirty_u128;
+ iface->clear_dirty = _ocf_metadata_clear_dirty_u128;
+ iface->set_dirty = _ocf_metadata_set_dirty_u128;
+ iface->test_and_set_dirty =
+ _ocf_metadata_test_and_set_dirty_u128;
+ iface->test_and_clear_dirty =
+ _ocf_metadata_test_and_clear_dirty_u128;
+ iface->test_valid = _ocf_metadata_test_valid_u128;
+ iface->test_out_valid = _ocf_metadata_test_out_valid_u128;
+ iface->clear_valid = _ocf_metadata_clear_valid_u128;
+ iface->set_valid = _ocf_metadata_set_valid_u128;
+ iface->test_and_set_valid =
+ _ocf_metadata_test_and_set_valid_u128;
+ iface->test_and_clear_valid =
+ _ocf_metadata_test_and_clear_valid_u128;
+ break;
+
+ default:
+ ENV_BUG();
+ break;
+ }
+}
+
+/*
+ * Get metadata hash interface
+ */
+const struct ocf_metadata_iface *metadata_hash_get_iface(void)
+{
+ return &metadata_hash_iface;
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_hash.h b/src/spdk/ocf/src/metadata/metadata_hash.h
new file mode 100644
index 000000000..b19f3a9a2
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_hash.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_HASH_H__
+#define __METADATA_HASH_H__
+
+/**
+ * @file metadata_.h
+ * @brief Metadata Service - Hash Implementation
+ */
+
+#include "../ocf_request.h"
+/**
+ * @brief Metada hash elements type
+ */
+enum ocf_metadata_segment {
+ metadata_segment_sb_config = 0, /*!< Super block conf */
+ metadata_segment_sb_runtime, /*!< Super block runtime */
+ metadata_segment_reserved, /*!< Reserved space on disk */
+ metadata_segment_part_config, /*!< Part Config Metadata */
+ metadata_segment_part_runtime, /*!< Part Runtime Metadata */
+ metadata_segment_core_config, /*!< Core Config Metadata */
+ metadata_segment_core_runtime, /*!< Core Runtime Metadata */
+ metadata_segment_core_uuid, /*!< Core UUID */
+ /* .... new fixed size sections go here */
+
+ metadata_segment_fixed_size_max,
+ metadata_segment_variable_size_start = metadata_segment_fixed_size_max,
+
+ /* sections with size dependent on cache device size go here: */
+ metadata_segment_cleaning = /*!< Cleaning policy */
+ metadata_segment_variable_size_start,
+ metadata_segment_eviction, /*!< Eviction policy */
+ metadata_segment_collision, /*!< Collision */
+ metadata_segment_list_info, /*!< Collision */
+ metadata_segment_hash, /*!< Hash */
+ /* .... new variable size sections go here */
+
+ metadata_segment_max, /*!< MAX */
+};
+
+/**
+ * @brief Get metadata interface implementation
+ *
+ * @return metadata interface
+ */
+const struct ocf_metadata_iface *metadata_hash_get_iface(void);
+
+#endif /* METADATA_HASH_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_io.c b/src/spdk/ocf/src/metadata/metadata_io.c
new file mode 100644
index 000000000..bc442b687
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_io.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "metadata.h"
+#include "metadata_io.h"
+#include "../ocf_priv.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
+#include "../engine/engine_bf.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../ocf_request.h"
+#include "../ocf_def_priv.h"
+
+#define OCF_METADATA_IO_DEBUG 0
+
+#if 1 == OCF_METADATA_IO_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
+
+#define OCF_DEBUG_MSG(cache, msg) \
+ ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
+ __func__, msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+struct metadata_io_read_i_atomic_context {
+ struct ocf_request *req;
+ ctx_data_t *data;
+ ocf_cache_t cache;
+ uint64_t count;
+ uint64_t curr_offset;
+ uint64_t curr_count;
+
+ ocf_metadata_atomic_io_event_t drain_hndl;
+ ocf_metadata_io_end_t compl_hndl;
+ void *priv;
+};
+
+static void metadata_io_read_i_atomic_complete(
+ struct metadata_io_read_i_atomic_context *context, int error)
+{
+ context->compl_hndl(context->cache, context->priv, error);
+
+ ctx_data_free(context->cache->owner, context->data);
+ ocf_req_put(context->req);
+ env_vfree(context);
+}
+
+/*
+ * Iterative read end callback
+ */
+static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
+{
+ struct metadata_io_read_i_atomic_context *context = io->priv1;
+
+ OCF_DEBUG_TRACE(ocf_volume_get_cache(ocf_io_get_volume(io)));
+
+ ocf_io_put(io);
+
+ if (error) {
+ metadata_io_read_i_atomic_complete(context, error);
+ return;
+ }
+
+ context->drain_hndl(context->priv, context->curr_offset,
+ context->curr_count, context->data);
+
+ context->count -= context->curr_count;
+ context->curr_offset += context->curr_count;
+
+ if (context->count > 0)
+ ocf_engine_push_req_front(context->req, true);
+ else
+ metadata_io_read_i_atomic_complete(context, 0);
+}
+
+int metadata_io_read_i_atomic_step(struct ocf_request *req)
+{
+ struct metadata_io_read_i_atomic_context *context = req->priv;
+ ocf_cache_t cache = context->cache;
+ uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
+ struct ocf_io *io;
+ int result = 0;
+
+ /* Get sectors count of this IO iteration */
+ context->curr_count = OCF_MIN(max_sectors_count, context->count);
+
+ /* Reset position in data buffer */
+ ctx_data_seek(cache->owner, context->data, ctx_data_seek_begin, 0);
+
+ /* Allocate new IO */
+ io = ocf_new_cache_io(cache, req->io_queue,
+ cache->device->metadata_offset +
+ SECTORS_TO_BYTES(context->curr_offset),
+ SECTORS_TO_BYTES(context->curr_count), OCF_READ, 0, 0);
+
+ if (!io) {
+ metadata_io_read_i_atomic_complete(context, -OCF_ERR_NO_MEM);
+ return 0;
+ }
+
+ /* Setup IO */
+ ocf_io_set_cmpl(io, context, NULL, metadata_io_read_i_atomic_step_end);
+ result = ocf_io_set_data(io, context->data, 0);
+ if (result) {
+ ocf_io_put(io);
+ metadata_io_read_i_atomic_complete(context, result);
+ return 0;
+ }
+
+ /* Submit IO */
+ ocf_volume_submit_metadata(io);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_metadata_io_read_i_atomic_step = {
+ .read = metadata_io_read_i_atomic_step,
+ .write = metadata_io_read_i_atomic_step,
+};
+
+/*
+ * Iterative read request
+ */
+int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
+ ocf_metadata_atomic_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ struct metadata_io_read_i_atomic_context *context;
+ uint64_t io_sectors_count = cache->device->collision_table_entries *
+ ocf_line_sectors(cache);
+
+ OCF_DEBUG_TRACE(cache);
+
+ context = env_vzalloc(sizeof(*context));
+ if (!context)
+ return -OCF_ERR_NO_MEM;
+
+ context->req = ocf_req_new(queue, NULL, 0, 0, 0);
+ if (!context->req) {
+ env_vfree(context);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ context->req->info.internal = true;
+ context->req->io_if = &_io_if_metadata_io_read_i_atomic_step;
+ context->req->priv = context;
+
+ /* Allocate one 4k page for metadata*/
+ context->data = ctx_data_alloc(cache->owner, 1);
+ if (!context->data) {
+ ocf_req_put(context->req);
+ env_vfree(context);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ context->cache = cache;
+ context->count = io_sectors_count;
+ context->curr_offset = 0;
+ context->curr_count = 0;
+ context->drain_hndl = drain_hndl;
+ context->compl_hndl = compl_hndl;
+ context->priv = priv;
+
+ ocf_engine_push_req_front(context->req, true);
+
+ return 0;
+}
+
+static void metadata_io_req_fill(struct metadata_io_request *m_req)
+{
+ ocf_cache_t cache = m_req->cache;
+ int i;
+
+ for (i = 0; i < m_req->count; i++) {
+ m_req->on_meta_fill(cache, m_req->data,
+ m_req->page + i, m_req->context);
+ }
+}
+
+static void metadata_io_req_drain(struct metadata_io_request *m_req)
+{
+ ocf_cache_t cache = m_req->cache;
+ int i;
+
+ for (i = 0; i < m_req->count; i++) {
+ m_req->on_meta_drain(cache, m_req->data,
+ m_req->page + i, m_req->context);
+ }
+}
+
+static void metadata_io_io_end(struct metadata_io_request *m_req, int error);
+
+static void metadata_io_io_cmpl(struct ocf_io *io, int error)
+{
+ metadata_io_io_end(io->priv1, error);
+ ocf_io_put(io);
+}
+
+static int metadata_io_restart_req(struct ocf_request *req)
+{
+ struct metadata_io_request *m_req = req->priv;
+ ocf_cache_t cache = req->cache;
+ struct ocf_io *io;
+ int ret;
+
+ /* Fill with the latest metadata. */
+ if (m_req->req.rw == OCF_WRITE) {
+ ocf_metadata_start_shared_access(&cache->metadata.lock);
+ metadata_io_req_fill(m_req);
+ ocf_metadata_end_shared_access(&cache->metadata.lock);
+ }
+
+ io = ocf_new_cache_io(cache, req->io_queue,
+ PAGES_TO_BYTES(m_req->page),
+ PAGES_TO_BYTES(m_req->count),
+ m_req->req.rw, 0, 0);
+ if (!io) {
+ metadata_io_io_end(m_req, -OCF_ERR_NO_MEM);
+ return 0;
+ }
+
+ /* Setup IO */
+ ocf_io_set_cmpl(io, m_req, NULL, metadata_io_io_cmpl);
+ ctx_data_seek(cache->owner, m_req->data, ctx_data_seek_begin, 0);
+ ret = ocf_io_set_data(io, m_req->data, 0);
+ if (ret) {
+ ocf_io_put(io);
+ metadata_io_io_end(m_req, ret);
+ return ret;
+ }
+ ocf_volume_submit_io(io);
+ return 0;
+}
+
+static struct ocf_io_if metadata_io_restart_if = {
+ .read = metadata_io_restart_req,
+ .write = metadata_io_restart_req,
+};
+
+static void metadata_io_req_advance(struct metadata_io_request *m_req);
+
+/*
+ * Iterative asynchronous write callback
+ */
+static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+ ocf_cache_t cache = m_req->cache;
+
+ OCF_CHECK_NULL(a_req);
+ OCF_CHECK_NULL(a_req->on_complete);
+
+ if (error) {
+ a_req->error = a_req->error ?: error;
+ } else {
+ if (m_req->req.rw == OCF_READ)
+ metadata_io_req_drain(m_req);
+ }
+
+ OCF_DEBUG_PARAM(cache, "Page = %u", m_req->page);
+
+ metadata_io_req_advance(m_req);
+
+ env_atomic_set(&m_req->finished, 1);
+ ocf_metadata_updater_kick(cache);
+}
+
+static void metadata_io_req_submit(struct metadata_io_request *m_req)
+{
+ env_atomic_set(&m_req->finished, 0);
+ metadata_updater_submit(m_req);
+}
+
+void metadata_io_req_end(struct metadata_io_request *m_req)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+ ocf_cache_t cache = m_req->cache;
+
+ if (env_atomic_dec_return(&a_req->req_remaining) == 0)
+ a_req->on_complete(cache, a_req->context, a_req->error);
+
+ ctx_data_free(cache->owner, m_req->data);
+}
+
+void metadata_io_req_finalize(struct metadata_io_request *m_req)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+
+ if (env_atomic_dec_return(&a_req->req_active) == 0)
+ env_vfree(a_req);
+}
+
+static uint32_t metadata_io_max_page(ocf_cache_t cache)
+{
+ return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE;
+}
+
+static void metadata_io_req_advance(struct metadata_io_request *m_req)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+ uint32_t max_count = metadata_io_max_page(m_req->cache);
+ uint32_t curr;
+
+ if (a_req->error) {
+ metadata_io_req_end(m_req);
+ return;
+ }
+
+ curr = env_atomic_inc_return(&a_req->req_current);
+
+ if (curr >= OCF_DIV_ROUND_UP(a_req->count, max_count)) {
+ m_req->count = 0;
+ metadata_io_req_end(m_req);
+ return;
+ }
+
+ m_req->page = a_req->page + curr * max_count;
+ m_req->count = OCF_MIN(a_req->count - curr * max_count, max_count);
+}
+
+static void metadata_io_req_start(struct metadata_io_request *m_req)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+
+ env_atomic_inc(&a_req->req_remaining);
+ env_atomic_inc(&a_req->req_active);
+
+ metadata_io_req_advance(m_req);
+
+ if (m_req->count == 0) {
+ metadata_io_req_finalize(m_req);
+ return;
+ }
+
+ metadata_io_req_submit(m_req);
+}
+
+void metadata_io_req_complete(struct metadata_io_request *m_req)
+{
+ struct metadata_io_request_asynch *a_req = m_req->asynch;
+
+ if (m_req->count == 0 || a_req->error) {
+ metadata_io_req_finalize(m_req);
+ return;
+ }
+
+ metadata_io_req_submit(m_req);
+}
+
+/*
+ * Iterative write request asynchronously
+ */
+static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t io_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ struct metadata_io_request_asynch *a_req;
+ struct metadata_io_request *m_req;
+ uint32_t max_count = metadata_io_max_page(cache);
+ uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count);
+ uint32_t req_count = OCF_MIN(io_count, METADATA_IO_REQS_LIMIT);
+ int i;
+
+ if (count == 0)
+ return 0;
+
+ a_req = env_vzalloc_flags(sizeof(*a_req), ENV_MEM_NOIO);
+ if (!a_req)
+ return -OCF_ERR_NO_MEM;
+
+ env_atomic_set(&a_req->req_remaining, 1);
+ env_atomic_set(&a_req->req_active, 1);
+ env_atomic_set(&a_req->req_current, -1);
+ a_req->on_complete = compl_hndl;
+ a_req->context = context;
+ a_req->page = page;
+ a_req->count = count;
+
+ /* IO Requests initialization */
+ for (i = 0; i < req_count; i++) {
+ m_req = &a_req->reqs[i];
+
+ m_req->asynch = a_req;
+ m_req->cache = cache;
+ m_req->context = context;
+ m_req->on_meta_fill = io_hndl;
+ m_req->on_meta_drain = io_hndl;
+ m_req->req.io_if = &metadata_io_restart_if;
+ m_req->req.io_queue = queue;
+ m_req->req.cache = cache;
+ m_req->req.priv = m_req;
+ m_req->req.info.internal = true;
+ m_req->req.rw = dir;
+ m_req->req.map = LIST_POISON1;
+
+ /* If req_count == io_count and count is not multiple of
+ * max_count, for last we can allocate data smaller that
+ * max_count as we are sure it will never be resubmitted.
+ */
+ m_req->data = ctx_data_alloc(cache->owner,
+ OCF_MIN(max_count, count - i * max_count));
+ if (!m_req->data)
+ goto err;
+ }
+
+
+ for (i = 0; i < req_count; i++)
+ metadata_io_req_start(&a_req->reqs[i]);
+
+ if (env_atomic_dec_return(&a_req->req_remaining) == 0)
+ compl_hndl(cache, context, a_req->error);
+
+ if (env_atomic_dec_return(&a_req->req_active) == 0)
+ env_vfree(a_req);
+
+ return 0;
+
+err:
+ while (i--)
+ ctx_data_free(cache->owner, a_req->reqs[i].data);
+ env_vfree(a_req);
+
+ return -OCF_ERR_NO_MEM;
+}
+
+int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t fill_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
+ page, count, fill_hndl, compl_hndl);
+}
+
+int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ return metadata_io_i_asynch(cache, queue, OCF_READ, context,
+ page, count, drain_hndl, compl_hndl);
+}
+
+int ocf_metadata_io_init(ocf_cache_t cache)
+{
+ return ocf_metadata_updater_init(cache);
+}
+
+void ocf_metadata_io_deinit(ocf_cache_t cache)
+{
+ ocf_metadata_updater_stop(cache);
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_io.h b/src/spdk/ocf/src/metadata/metadata_io.h
new file mode 100644
index 000000000..f3ae2fa68
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_io.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_IO_H__
+#define __METADATA_IO_H__
+
+/**
+ * @file metadata_io.h
+ * @brief Metadata IO utilities
+ */
+
+/**
+ * @brief Metadata IO event
+ *
+ * The client of metadata IO service if informed trough this event:
+ * - on completion of read from cache device
+ * - on fill data which will be written into cache device
+ *
+ * @param data[in,out] Environment data for read ot write IO
+ * @param page[in] Page which is issued
+ * @param context[in] context caller
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error which will bee finally returned to the caller
+ */
+typedef int (*ocf_metadata_io_event_t)(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *context);
+
+/**
+ * @brief Metadata write end callback
+ *
+ * @param cache - Cache instance
+ * @param context - Read context
+ * @param error - error
+ * @param page - page that was written
+ */
+typedef void (*ocf_metadata_io_end_t)(ocf_cache_t cache,
+ void *context, int error);
+
+struct metadata_io_request_asynch;
+
+/*
+ * IO request context
+ */
+struct metadata_io_request {
+ ocf_cache_t cache;
+ void *context;
+ uint32_t page;
+ uint32_t count;
+ ocf_metadata_io_event_t on_meta_fill;
+ ocf_metadata_io_event_t on_meta_drain;
+ ctx_data_t *data;
+ int error;
+ struct metadata_io_request_asynch *asynch;
+ env_atomic finished;
+
+ struct ocf_request req;
+ struct list_head list;
+};
+
+#define METADATA_IO_REQS_LIMIT 128
+
+/*
+ * Asynchronous IO request context
+ */
+struct metadata_io_request_asynch {
+ struct metadata_io_request reqs[METADATA_IO_REQS_LIMIT];
+ void *context;
+ int error;
+ env_atomic req_remaining;
+ env_atomic req_active;
+ env_atomic req_current;
+ uint32_t page;
+ uint32_t count;
+ ocf_metadata_io_end_t on_complete;
+};
+
+void metadata_io_req_complete(struct metadata_io_request *m_req);
+
+/**
+ * @brief Metadata read end callback
+ *
+ * @param cache Cache instance
+ * @param sector_addr Begin sector of metadata
+ * @param sector_no Number of sectors
+ * @param data Data environment buffer with atomic metadata
+ *
+ * @retval 0 Success
+ * @retval Non-zero Error which will bee finally returned to the caller
+ */
+typedef int (*ocf_metadata_atomic_io_event_t)(void *priv, uint64_t sector_addr,
+ uint32_t sector_no, ctx_data_t *data);
+
+/**
+ * @brief Iterative asynchronous read atomic metadata
+ *
+ * @param cache - Cache instance
+ * @param queue - Queue to be used for IO
+ * @param context - Read context
+ * @param drain_hndl - Drain callback
+ * @param compl_hndl - All IOs completed callback
+ *
+ * @return 0 - No errors, otherwise error occurred
+ */
+int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, ocf_metadata_atomic_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl);
+
+/**
+ * @brief Iterative asynchronous pages write
+ *
+ * @param cache - Cache instance
+ * @param queue - Queue to be used for IO
+ * @param context - Read context
+ * @param page - Start page of SSD (cache device) where data will be written
+ * @param count - Counts of page to be processed
+ * @param fill_hndl - Fill callback
+ * @param compl_hndl - All IOs completed callback
+ *
+ * @return 0 - No errors, otherwise error occurred
+ */
+int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t fill_hndl,
+ ocf_metadata_io_end_t compl_hndl);
+
+/**
+ * @brief Iterative asynchronous pages read
+ *
+ * @param cache - Cache instance
+ * @param queue - Queue to be used for IO
+ * @param context - Read context
+ * @param page - Start page of SSD (cache device) where data will be read
+ * @param count - Counts of page to be processed
+ * @param drain_hndl - Drain callback
+ * @param compl_hndl - All IOs completed callback
+ *
+ * @return 0 - No errors, otherwise error occurred
+ */
+int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl);
+
+/**
+ * Function for initializing metadata io.
+ */
+int ocf_metadata_io_init(ocf_cache_t cache);
+
+/**
+ * Function for deinitializing metadata io.
+ */
+void ocf_metadata_io_deinit(ocf_cache_t cache);
+
+#endif /* METADATA_IO_UTILS_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_misc.c b/src/spdk/ocf/src/metadata/metadata_misc.c
new file mode 100644
index 000000000..b51a147ad
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_misc.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "metadata.h"
+#include "../ocf_freelist.h"
+#include "../utils/utils_cache_line.h"
+
+static bool _is_cache_line_acting(struct ocf_cache *cache,
+ uint32_t cache_line, ocf_core_id_t core_id,
+ uint64_t start_line, uint64_t end_line)
+{
+ ocf_core_id_t tmp_core_id;
+ uint64_t core_line;
+
+ ocf_metadata_get_core_info(cache, cache_line,
+ &tmp_core_id, &core_line);
+
+ if (core_id != OCF_CORE_ID_INVALID) {
+ if (core_id != tmp_core_id)
+ return false;
+
+ if (core_line < start_line || core_line > end_line)
+ return false;
+
+ } else if (tmp_core_id == OCF_CORE_ID_INVALID) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Iterates over cache lines that belong to the core device with
+ * core ID = core_id whose core byte addresses are in the range
+ * [start_byte, end_byte] and applies actor(cache, cache_line) to all
+ * matching cache lines
+ *
+ * set partition_id to PARTITION_INVALID to not care about partition_id
+ *
+ * METADATA lock must be held before calling this function
+ */
+int ocf_metadata_actor(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_core_id_t core_id,
+ uint64_t start_byte, uint64_t end_byte,
+ ocf_metadata_actor_t actor)
+{
+ uint32_t step = 0;
+ ocf_cache_line_t i, next_i;
+ uint64_t start_line, end_line;
+ int ret = 0;
+
+ start_line = ocf_bytes_2_lines(cache, start_byte);
+ end_line = ocf_bytes_2_lines(cache, end_byte);
+
+ if (part_id != PARTITION_INVALID) {
+ for (i = cache->user_parts[part_id].runtime->head;
+ i != cache->device->collision_table_entries;
+ i = next_i) {
+ next_i = ocf_metadata_get_partition_next(cache, i);
+
+ if (_is_cache_line_acting(cache, i, core_id,
+ start_line, end_line)) {
+ if (ocf_cache_line_is_used(cache, i))
+ ret = -OCF_ERR_AGAIN;
+ else
+ actor(cache, i);
+ }
+
+ OCF_COND_RESCHED_DEFAULT(step);
+ }
+ } else {
+ for (i = 0; i < cache->device->collision_table_entries; ++i) {
+ if (_is_cache_line_acting(cache, i, core_id,
+ start_line, end_line)) {
+ if (ocf_cache_line_is_used(cache, i))
+ ret = -OCF_ERR_AGAIN;
+ else
+ actor(cache, i);
+ }
+
+ OCF_COND_RESCHED_DEFAULT(step);
+ }
+ }
+
+ return ret;
+}
+
+/* the caller must hold the relevant cache block concurrency reader lock
+ * and the metadata lock
+ */
+void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ ocf_part_id_t partition_id =
+ ocf_metadata_get_partition_id(cache, cache_line);
+
+ ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
+
+ ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
+
+ ocf_freelist_put_cache_line(cache->freelist, cache_line);
+}
+
+static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
+ uint32_t cache_line)
+{
+ ocf_metadata_start_collision_shared_access(cache, cache_line);
+
+ set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache),
+ cache_line);
+
+ /*
+ * This is especially for removing inactive core
+ */
+ metadata_clear_dirty(cache, cache_line);
+
+ ocf_metadata_end_collision_shared_access(cache, cache_line);
+}
+
+/* caller must hold metadata lock
+ * set core_id to -1 to clean the whole cache device
+ */
+int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
+ uint64_t start_byte, uint64_t end_byte)
+{
+ return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
+ start_byte, end_byte, _ocf_metadata_sparse_cache_line);
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_misc.h b/src/spdk/ocf/src/metadata/metadata_misc.h
new file mode 100644
index 000000000..51cdad778
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_misc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_MISC_H__
+#define __METADATA_MISC_H__
+
+/* Hash function intentionally returns consecutive (modulo @hash_table_entries)
+ * values for consecutive @core_line_num. This way it is trivial to sort all
+ * core lines within a single request in ascending hash value order. This kind
+ * of sorting is required to assure that (future) hash bucket metadata locks are
+ * always acquired in fixed order, eliminating the risk of dead locks.
+ */
+static inline ocf_cache_line_t ocf_metadata_hash_func(ocf_cache_t cache,
+ uint64_t core_line_num, ocf_core_id_t core_id)
+{
+ const unsigned int entries = cache->device->hash_table_entries;
+
+ return (ocf_cache_line_t) ((core_line_num + (core_id * (entries / 32)))
+ % entries);
+}
+
+void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
+ ocf_cache_line_t cache_line);
+
+int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
+ uint64_t start_byte, uint64_t end_byte);
+
+typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
+ ocf_cache_line_t cache_line);
+
+int ocf_metadata_actor(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_core_id_t core_id,
+ uint64_t start_byte, uint64_t end_byte,
+ ocf_metadata_actor_t actor);
+
+#endif /* __METADATA_MISC_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_partition.c b/src/spdk/ocf/src/metadata/metadata_partition.c
new file mode 100644
index 000000000..437f400a1
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_partition.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "metadata.h"
+#include "../utils/utils_part.h"
+
+/* Sets the given collision_index as the new _head_ of the Partition list. */
+static void update_partition_head(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_cache_line_t line)
+{
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ part->runtime->head = line;
+}
+
+/* Adds the given collision_index to the _head_ of the Partition list */
+void ocf_metadata_add_to_partition(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_cache_line_t line)
+{
+ ocf_cache_line_t line_head;
+ ocf_cache_line_t line_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ ENV_BUG_ON(!(line < line_entries));
+
+ ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
+
+ /* First node to be added/ */
+ if (!part->runtime->curr_size) {
+
+ update_partition_head(cache, part_id, line);
+ ocf_metadata_set_partition_info(cache, line, part_id,
+ line_entries, line_entries);
+
+ if (!ocf_part_is_valid(part)) {
+ /* Partition becomes empty, and is not valid
+ * update list of partitions
+ */
+ ocf_part_sort(cache);
+ }
+
+ } else {
+ /* Not the first node to be added. */
+ line_head = part->runtime->head;
+
+ ENV_BUG_ON(!(line_head < line_entries));
+
+ ocf_metadata_set_partition_info(cache, line, part_id,
+ line_head, line_entries);
+
+ ocf_metadata_set_partition_prev(cache, line_head, line);
+
+ update_partition_head(cache, part_id, line);
+ }
+
+ part->runtime->curr_size++;
+
+ ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
+}
+
+/* Deletes the node with the given collision_index from the Partition list */
+void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_cache_line_t line)
+{
+ int is_head, is_tail;
+ ocf_cache_line_t prev_line, next_line;
+ uint32_t line_entries = cache->device->collision_table_entries;
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ ENV_BUG_ON(!(line < line_entries));
+
+ ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
+
+ /* Get Partition info */
+ ocf_metadata_get_partition_info(cache, line, NULL,
+ &next_line, &prev_line);
+
+ /* Find out if this node is Partition _head_ */
+ is_head = (prev_line == line_entries);
+ is_tail = (next_line == line_entries);
+
+ /* Case 1: If we are head and there is only one node. So unlink node
+ * and set that there is no node left in the list.
+ */
+ if (is_head && (part->runtime->curr_size == 1)) {
+ ocf_metadata_set_partition_info(cache, line,
+ part_id, line_entries, line_entries);
+
+ update_partition_head(cache, part_id, line_entries);
+
+ if (!ocf_part_is_valid(part)) {
+ /* Partition becomes not empty, and is not valid
+ * update list of partitions
+ */
+ ocf_part_sort(cache);
+ }
+
+ } else if (is_head) {
+ /* Case 2: else if this collision_index is partition list head,
+ * but many nodes, update head and return
+ */
+ ENV_BUG_ON(!(next_line < line_entries));
+ update_partition_head(cache, part_id, next_line);
+
+ ocf_metadata_set_partition_next(cache, line, line_entries);
+
+ ocf_metadata_set_partition_prev(cache, next_line,
+ line_entries);
+ } else if (is_tail) {
+ /* Case 3: else if this collision_index is partition list tail
+ */
+ ENV_BUG_ON(!(prev_line < line_entries));
+
+ ocf_metadata_set_partition_prev(cache, line, line_entries);
+
+ ocf_metadata_set_partition_next(cache, prev_line,
+ line_entries);
+ } else {
+ /* Case 4: else this collision_index is a middle node.
+ * There is no change to the head and the tail pointers.
+ */
+
+ ENV_BUG_ON(!(next_line < line_entries));
+ ENV_BUG_ON(!(prev_line < line_entries));
+
+ /* Update prev and next nodes */
+ ocf_metadata_set_partition_next(cache, prev_line, next_line);
+
+ ocf_metadata_set_partition_prev(cache, next_line, prev_line);
+
+ /* Update the given node */
+ ocf_metadata_set_partition_info(cache, line, part_id,
+ line_entries, line_entries);
+ }
+
+ part->runtime->curr_size--;
+
+ ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_partition.h b/src/spdk/ocf/src/metadata/metadata_partition.h
new file mode 100644
index 000000000..00605e0cf
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_partition.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_PARTITION_H__
+#define __METADATA_PARTITION_H__
+
+#include "metadata_partition_structs.h"
+#include "../ocf_cache_priv.h"
+
+#define PARTITION_DEFAULT 0
+#define PARTITION_INVALID ((ocf_part_id_t)-1)
+#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
+
+static inline void ocf_metadata_get_partition_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
+ ocf_cache_line_t *prev_line)
+{
+ cache->metadata.iface.get_partition_info(cache, line, part_id,
+ next_line, prev_line);
+}
+
+static inline ocf_part_id_t ocf_metadata_get_partition_id(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ ocf_part_id_t part_id;
+
+ ocf_metadata_get_partition_info(cache, line, &part_id, NULL, NULL);
+
+ return part_id;
+}
+
+static inline ocf_cache_line_t ocf_metadata_get_partition_next(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ ocf_cache_line_t next;
+
+ ocf_metadata_get_partition_info(cache, line, NULL, &next, NULL);
+
+ return next;
+}
+
+static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ ocf_cache_line_t prev;
+
+ ocf_metadata_get_partition_info(cache, line, NULL, NULL, &prev);
+
+ return prev;
+}
+
+static inline void ocf_metadata_set_partition_next(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t next_line)
+{
+ cache->metadata.iface.set_partition_next(cache, line, next_line);
+}
+
+static inline void ocf_metadata_set_partition_prev(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_cache_line_t prev_line)
+{
+ cache->metadata.iface.set_partition_prev(cache, line, prev_line);
+}
+
+static inline void ocf_metadata_set_partition_info(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ ocf_part_id_t part_id, ocf_cache_line_t next_line,
+ ocf_cache_line_t prev_line)
+{
+ cache->metadata.iface.set_partition_info(cache, line, part_id,
+ next_line, prev_line);
+}
+
+void ocf_metadata_add_to_partition(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_cache_line_t line);
+
+void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
+ ocf_part_id_t part_id, ocf_cache_line_t line);
+
+#endif /* __METADATA_PARTITION_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_partition_structs.h b/src/spdk/ocf/src/metadata/metadata_partition_structs.h
new file mode 100644
index 000000000..58d7e56c3
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_partition_structs.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_PARTITION_STRUCTS_H__
+#define __METADATA_PARTITION_STRUCTS_H__
+
+#include "../utils/utils_list.h"
+#include "../cleaning/cleaning.h"
+#include "../eviction/eviction.h"
+
+struct ocf_user_part_config {
+ char name[OCF_IO_CLASS_NAME_MAX];
+ uint32_t min_size;
+ uint32_t max_size;
+ int16_t priority;
+ ocf_cache_mode_t cache_mode;
+ struct {
+ uint8_t valid : 1;
+ uint8_t added : 1;
+ uint8_t eviction : 1;
+ /*!< This bits is setting during partition sorting,
+ * and means that can evict from this partition
+ */
+ } flags;
+};
+
+struct ocf_user_part_runtime {
+ uint32_t curr_size;
+ uint32_t head;
+ struct eviction_policy eviction;
+ struct cleaning_policy cleaning;
+};
+
+struct ocf_user_part {
+ struct ocf_user_part_config *config;
+ struct ocf_user_part_runtime *runtime;
+
+ struct ocf_lst_entry lst_valid;
+};
+
+
+#endif /* __METADATA_PARTITION_STRUCTS_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_raw.c b/src/spdk/ocf/src/metadata/metadata_raw.c
new file mode 100644
index 000000000..5a57b27d4
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_hash.h"
+#include "metadata_raw.h"
+#include "metadata_io.h"
+#include "metadata_raw_atomic.h"
+#include "../ocf_def_priv.h"
+#include "../ocf_priv.h"
+
+#define OCF_METADATA_RAW_DEBUG 0
+
+#if 1 == OCF_METADATA_RAW_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(log_info, "[Metadata][Raw] %s\n", __func__)
+
+#define OCF_DEBUG_MSG(cache, msg) \
+ ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - %s\n", \
+ __func__, msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+/*******************************************************************************
+ * Common RAW Implementation
+ ******************************************************************************/
+/*
+ * Check if page is valid for specified RAW descriptor
+ */
+static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
+{
+ ENV_BUG_ON(page < raw->ssd_pages_offset);
+ ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
+
+ return true;
+}
+
+/*******************************************************************************
+ * RAW RAM Implementation
+ ******************************************************************************/
+#define _RAW_RAM_ADDR(raw, line) \
+ (raw->mem_pool + (((uint64_t)raw->entry_size * (line))))
+
+#define _RAW_RAM_PAGE(raw, line) \
+ ((line) / raw->entries_in_page)
+
+#define _RAW_RAM_PAGE_SSD(raw, line) \
+ (raw->ssd_pages_offset + _RAW_RAM_PAGE(raw, line))
+
+#define _RAW_RAM_ADDR_PAGE(raw, line) \
+ (_RAW_RAM_ADDR(raw, \
+ _RAW_RAM_PAGE(raw, line) * raw->entries_in_page))
+
+#define _RAW_RAM_GET(raw, line, data) \
+ env_memcpy(data, raw->entry_size, _RAW_RAM_ADDR(raw, (line)), \
+ raw->entry_size)
+
+#define _RAW_RAM_SET(raw, line, data) \
+ env_memcpy(_RAW_RAM_ADDR(raw, line), raw->entry_size, \
+ data, raw->entry_size)
+
+
+
+/*
+ * RAM Implementation - De-Initialize
+ */
+static int _raw_ram_deinit(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ OCF_DEBUG_TRACE(cache);
+
+ if (raw->mem_pool) {
+ env_secure_free(raw->mem_pool, raw->mem_pool_limit);
+ raw->mem_pool = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * RAM Implementation - Initialize
+ */
+static int _raw_ram_init(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw)
+{
+ size_t mem_pool_size;
+
+ OCF_DEBUG_TRACE(cache);
+
+ /* Allocate memory pool for entries */
+ mem_pool_size = raw->ssd_pages;
+ mem_pool_size *= PAGE_SIZE;
+ raw->mem_pool_limit = mem_pool_size;
+ raw->mem_pool = env_secure_alloc(mem_pool_size);
+ if (!raw->mem_pool)
+ return -OCF_ERR_NO_MEM;
+ ENV_BUG_ON(env_memset(raw->mem_pool, mem_pool_size, 0));
+
+ raw->lock_page = lock_page_pfn;
+ raw->unlock_page = unlock_page_pfn;
+
+ return 0;
+}
+
+/*
+ * RAM Implementation - Size of
+ */
+static size_t _raw_ram_size_of(ocf_cache_t cache, struct ocf_metadata_raw *raw)
+{
+ size_t size;
+
+ size = raw->ssd_pages;
+ size *= PAGE_SIZE;
+
+ return size;
+}
+
+/*
+ * RAM Implementation - Size on SSD
+ */
+static uint32_t _raw_ram_size_on_ssd(struct ocf_metadata_raw *raw)
+{
+ const size_t alignment = 128 * KiB / PAGE_SIZE;
+
+ return OCF_DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
+}
+
+/*
+ * RAM Implementation - Checksum
+ */
+static uint32_t _raw_ram_checksum(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ uint64_t i;
+ uint32_t step = 0;
+ uint32_t crc = 0;
+
+ for (i = 0; i < raw->ssd_pages; i++) {
+ crc = env_crc32(crc, raw->mem_pool + PAGE_SIZE * i, PAGE_SIZE);
+ OCF_COND_RESCHED(step, 10000);
+ }
+
+ return crc;
+}
+
+/*
+ * RAM Implementation - Entry page number
+ */
+uint32_t _raw_ram_page(struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ ENV_BUG_ON(entry >= raw->entries);
+
+ return _RAW_RAM_PAGE(raw, entry);
+}
+
+/*
+ * RAM Implementation - Get entry
+ */
+static int _raw_ram_get(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data)
+{
+ ENV_BUG_ON(!_raw_is_valid(raw, entry));
+
+ return _RAW_RAM_GET(raw, entry, data);
+}
+
+/*
+ * RAM Implementation - Read only entry access
+ */
+static void *_raw_ram_access(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ ENV_BUG_ON(!_raw_is_valid(raw, entry));
+
+ return _RAW_RAM_ADDR(raw, entry);
+}
+
+/*
+ * RAM Implementation - Set Entry
+ */
+static int _raw_ram_set(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data)
+{
+ ENV_BUG_ON(!_raw_is_valid(raw, entry));
+
+ return _RAW_RAM_SET(raw, entry, data);
+}
+
+struct _raw_ram_load_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
+/*
+ * RAM Implementation - Load all IO callback
+ */
+static int _raw_ram_load_all_drain(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *priv)
+{
+ struct _raw_ram_load_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint32_t size = raw->entry_size * raw->entries_in_page;
+ ocf_cache_line_t line;
+ uint32_t raw_page;
+
+ ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
+ ENV_BUG_ON(size > PAGE_SIZE);
+
+ raw_page = page - raw->ssd_pages_offset;
+ line = raw_page * raw->entries_in_page;
+
+ OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
+
+ ctx_data_rd_check(cache->owner, _RAW_RAM_ADDR(raw, line), data, size);
+ ctx_data_seek(cache->owner, data, ctx_data_seek_current,
+ PAGE_SIZE - size);
+
+ return 0;
+}
+
+static void _raw_ram_load_all_complete(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct _raw_ram_load_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
+/*
+ * RAM Implementation - Load all metadata elements from SSD
+ */
+static void _raw_ram_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct _raw_ram_load_all_context *context;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ context = env_vmalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_read_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ _raw_ram_load_all_drain, _raw_ram_load_all_complete);
+ if (result)
+ _raw_ram_load_all_complete(cache, context, result);
+}
+
+struct _raw_ram_flush_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
+/*
+ * RAM Implementation - Flush IO callback - Fill page
+ */
+static int _raw_ram_flush_all_fill(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *priv)
+{
+ struct _raw_ram_flush_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint32_t size = raw->entry_size * raw->entries_in_page;
+ ocf_cache_line_t line;
+ uint32_t raw_page;
+
+ ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
+ ENV_BUG_ON(size > PAGE_SIZE);
+
+ raw_page = page - raw->ssd_pages_offset;
+ line = raw_page * raw->entries_in_page;
+
+ OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
+
+ if (raw->lock_page)
+ raw->lock_page(cache, raw, raw_page);
+ ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
+ if (raw->unlock_page)
+ raw->unlock_page(cache, raw, raw_page);
+
+ ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
+
+ return 0;
+}
+
+static void _raw_ram_flush_all_complete(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct _raw_ram_flush_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
+/*
+ * RAM Implementation - Flush all elements
+ */
+static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct _raw_ram_flush_all_context *context;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ context = env_vmalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ _raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
+ if (result)
+ _raw_ram_flush_all_complete(cache, context, result);
+}
+
+/*
+ * RAM RAM Implementation - Mark to Flush
+ */
+static void _raw_ram_flush_mark(ocf_cache_t cache,
+ struct ocf_request *req, uint32_t map_idx, int to_state,
+ uint8_t start, uint8_t stop)
+{
+ if (to_state == DIRTY || to_state == CLEAN) {
+ req->map[map_idx].flush = true;
+ req->info.flush_metadata = true;
+ }
+}
+
+/*******************************************************************************
+ * RAM RAM Implementation - Do Flush Asynchronously
+ ******************************************************************************/
+struct _raw_ram_flush_ctx {
+ struct ocf_metadata_raw *raw;
+ struct ocf_request *req;
+ ocf_req_end_t complete;
+ env_atomic flush_req_cnt;
+ int error;
+};
+
+static void _raw_ram_flush_do_asynch_io_complete(ocf_cache_t cache,
+ void *context, int error)
+{
+ struct _raw_ram_flush_ctx *ctx = context;
+
+ if (error) {
+ ctx->error = error;
+ ocf_metadata_error(cache);
+ }
+
+ if (env_atomic_dec_return(&ctx->flush_req_cnt))
+ return;
+
+ OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
+
+ /* Call metadata flush completed call back */
+ ctx->req->error |= ctx->error;
+ ctx->complete(ctx->req, ctx->error);
+
+ env_free(ctx);
+}
+
+/*
+ * RAM Implementation - Flush IO callback - Fill page
+ */
+static int _raw_ram_flush_do_asynch_fill(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *context)
+{
+ ocf_cache_line_t line;
+ uint32_t raw_page;
+ struct _raw_ram_flush_ctx *ctx = context;
+ struct ocf_metadata_raw *raw = NULL;
+ uint32_t size;
+
+ ENV_BUG_ON(!ctx);
+
+ raw = ctx->raw;
+ ENV_BUG_ON(!raw);
+
+ size = raw->entry_size * raw->entries_in_page;
+ ENV_BUG_ON(size > PAGE_SIZE);
+
+ raw_page = page - raw->ssd_pages_offset;
+ line = raw_page * raw->entries_in_page;
+
+ OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
+
+ if (raw->lock_page)
+ raw->lock_page(cache, raw, raw_page);
+ ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
+ if (raw->unlock_page)
+ raw->unlock_page(cache, raw, raw_page);
+
+ ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
+
+ return 0;
+}
+
+/*
+ * RAM RAM Implementation - Do Flush
+ */
+
+int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
+{
+ uint32_t *page1 = (uint32_t *)item1;
+ uint32_t *page2 = (uint32_t *)item2;
+
+ if (*page1 > *page2)
+ return 1;
+
+ if (*page1 < *page2)
+ return -1;
+
+ return 0;
+}
+
+static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
+ uint32_t *pages_tab, struct ocf_metadata_raw *raw,
+ int *pages_to_flush) {
+ int i, j = 0;
+ int line_no = req->core_line_count;
+ struct ocf_map_info *map;
+
+ for (i = 0; i < line_no; i++) {
+ map = &req->map[i];
+ if (map->flush) {
+ pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
+ j++;
+ }
+ }
+
+ *pages_to_flush = j;
+}
+
+static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ int result = 0, i;
+ uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
+ uint32_t *pages_tab;
+ int line_no = req->core_line_count;
+ int pages_to_flush;
+ uint32_t start_page = 0;
+ uint32_t count = 0;
+ struct _raw_ram_flush_ctx *ctx;
+
+ ENV_BUG_ON(!complete);
+
+ OCF_DEBUG_TRACE(cache);
+
+ if (!req->info.flush_metadata) {
+ /* Nothing to flush call flush callback */
+ complete(req, 0);
+ return 0;
+ }
+
+ ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
+ if (!ctx) {
+ complete(req, -OCF_ERR_NO_MEM);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ ctx->req = req;
+ ctx->complete = complete;
+ ctx->raw = raw;
+ env_atomic_set(&ctx->flush_req_cnt, 1);
+
+ if (line_no <= MAX_STACK_TAB_SIZE) {
+ pages_tab = __pages_tab;
+ } else {
+ pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
+ if (!pages_tab) {
+ env_free(ctx);
+ complete(req, -OCF_ERR_NO_MEM);
+ return -OCF_ERR_NO_MEM;
+ }
+ }
+
+ /* While sorting in progress keep request remaining equal to 1,
+ * to prevent freeing of asynchronous context
+ */
+
+ __raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
+ &pages_to_flush);
+
+ env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
+ _raw_ram_flush_do_page_cmp, NULL);
+
+ i = 0;
+ while (i < pages_to_flush) {
+ start_page = pages_tab[i];
+ count = 1;
+
+ while (true) {
+ if ((i + 1) >= pages_to_flush)
+ break;
+
+ if (pages_tab[i] == pages_tab[i + 1]) {
+ i++;
+ continue;
+ }
+
+ if ((pages_tab[i] + 1) != pages_tab[i + 1])
+ break;
+
+ i++;
+ count++;
+ }
+
+
+ env_atomic_inc(&ctx->flush_req_cnt);
+
+ result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
+ raw->ssd_pages_offset + start_page, count,
+ _raw_ram_flush_do_asynch_fill,
+ _raw_ram_flush_do_asynch_io_complete);
+
+ if (result)
+ break;
+
+ i++;
+ }
+
+ _raw_ram_flush_do_asynch_io_complete(cache, ctx, result);
+
+ if (line_no > MAX_STACK_TAB_SIZE)
+ env_free(pages_tab);
+
+ return result;
+}
+
+/*******************************************************************************
+ * RAW Interfaces definitions
+ ******************************************************************************/
+#include "metadata_raw_dynamic.h"
+#include "metadata_raw_volatile.h"
+
+static const struct raw_iface IRAW[metadata_raw_type_max] = {
+ [metadata_raw_type_ram] = {
+ .init = _raw_ram_init,
+ .deinit = _raw_ram_deinit,
+ .size_of = _raw_ram_size_of,
+ .size_on_ssd = _raw_ram_size_on_ssd,
+ .checksum = _raw_ram_checksum,
+ .page = _raw_ram_page,
+ .get = _raw_ram_get,
+ .set = _raw_ram_set,
+ .access = _raw_ram_access,
+ .load_all = _raw_ram_load_all,
+ .flush_all = _raw_ram_flush_all,
+ .flush_mark = _raw_ram_flush_mark,
+ .flush_do_asynch = _raw_ram_flush_do_asynch,
+ },
+ [metadata_raw_type_dynamic] = {
+ .init = raw_dynamic_init,
+ .deinit = raw_dynamic_deinit,
+ .size_of = raw_dynamic_size_of,
+ .size_on_ssd = raw_dynamic_size_on_ssd,
+ .checksum = raw_dynamic_checksum,
+ .page = raw_dynamic_page,
+ .get = raw_dynamic_get,
+ .set = raw_dynamic_set,
+ .access = raw_dynamic_access,
+ .load_all = raw_dynamic_load_all,
+ .flush_all = raw_dynamic_flush_all,
+ .flush_mark = raw_dynamic_flush_mark,
+ .flush_do_asynch = raw_dynamic_flush_do_asynch,
+ },
+ [metadata_raw_type_volatile] = {
+ .init = _raw_ram_init,
+ .deinit = _raw_ram_deinit,
+ .size_of = _raw_ram_size_of,
+ .size_on_ssd = raw_volatile_size_on_ssd,
+ .checksum = raw_volatile_checksum,
+ .page = _raw_ram_page,
+ .get = _raw_ram_get,
+ .set = _raw_ram_set,
+ .access = _raw_ram_access,
+ .load_all = raw_volatile_load_all,
+ .flush_all = raw_volatile_flush_all,
+ .flush_mark = raw_volatile_flush_mark,
+ .flush_do_asynch = raw_volatile_flush_do_asynch,
+ },
+ [metadata_raw_type_atomic] = {
+ .init = _raw_ram_init,
+ .deinit = _raw_ram_deinit,
+ .size_of = _raw_ram_size_of,
+ .size_on_ssd = _raw_ram_size_on_ssd,
+ .checksum = _raw_ram_checksum,
+ .page = _raw_ram_page,
+ .get = _raw_ram_get,
+ .set = _raw_ram_set,
+ .access = _raw_ram_access,
+ .load_all = _raw_ram_load_all,
+ .flush_all = _raw_ram_flush_all,
+ .flush_mark = raw_atomic_flush_mark,
+ .flush_do_asynch = raw_atomic_flush_do_asynch,
+ },
+};
+
+/*******************************************************************************
+ * RAW Top interface implementation
+ ******************************************************************************/
+
+int ocf_metadata_raw_init(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw)
+{
+ ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
+ ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
+
+ raw->iface = &(IRAW[raw->raw_type]);
+ return raw->iface->init(cache, lock_page_pfn, unlock_page_pfn, raw);
+}
+
+int ocf_metadata_raw_deinit(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ int result;
+
+ if (!raw->iface)
+ return 0;
+
+ result = raw->iface->deinit(cache, raw);
+ raw->iface = NULL;
+
+ return result;
+}
+
+size_t ocf_metadata_raw_size_on_ssd(struct ocf_metadata_raw* raw)
+{
+ ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
+ ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
+
+ return IRAW[raw->raw_type].size_on_ssd(raw);
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_raw.h b/src/spdk/ocf/src/metadata/metadata_raw.h
new file mode 100644
index 000000000..c9d46fbe8
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_RAW_H__
+#define __METADATA_RAW_H__
+
+/**
+ * @file metadata_raw.h
+ * @brief Metadata RAW container implementation
+ */
+
+/**
+ * @brief Metadata raw type
+ */
+enum ocf_metadata_raw_type {
+ /**
+ * @brief Default implementation with support of
+ * flushing to/landing from SSD
+ */
+ metadata_raw_type_ram = 0,
+
+ /**
+ * @brief Dynamic implementation, elements are allocated when first
+ * time called
+ */
+ metadata_raw_type_dynamic,
+
+ /**
+ * @brief This containers does not flush metadata on SSD and does not
+ * Support loading from SSD
+ */
+ metadata_raw_type_volatile,
+
+ /**
+ * @brief Implementation for atomic device used as cache
+ */
+ metadata_raw_type_atomic,
+
+ metadata_raw_type_max, /*!< MAX */
+ metadata_raw_type_min = metadata_raw_type_ram /*!< MAX */
+};
+
+struct ocf_metadata_raw;
+
+/**
+ * @brief Container page lock/unlock callback
+ */
+typedef void (*ocf_flush_page_synch_t)(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t page);
+
+/**
+ * @brief RAW instance descriptor
+ */
+struct ocf_metadata_raw {
+ /**
+ * @name Metadata and RAW types
+ */
+ enum ocf_metadata_segment metadata_segment; /*!< Metadata segment */
+ enum ocf_metadata_raw_type raw_type; /*!< RAW implementation type */
+
+ /**
+ * @name Metdata elements description
+ */
+ uint32_t entry_size; /*!< Size of particular entry */
+ uint32_t entries_in_page; /*!< Numbers of entries in one page*/
+ uint64_t entries; /*!< Numbers of entries */
+
+ /**
+ * @name Location on cache device description
+ */
+ uint64_t ssd_pages_offset; /*!< SSD (Cache device) Page offset */
+ uint64_t ssd_pages; /*!< Numbers of pages that are required */
+
+ const struct raw_iface *iface; /*!< RAW container interface*/
+
+ /**
+ * @name Private RAW elements
+ */
+ void *mem_pool; /*!< Private memory pool*/
+
+ size_t mem_pool_limit; /*! Current memory pool size (limit) */
+
+ void *priv; /*!< Private data - context */
+
+ ocf_flush_page_synch_t lock_page; /*!< Page lock callback */
+ ocf_flush_page_synch_t unlock_page; /*!< Page unlock callback */
+};
+
+/**
+ * RAW container interface
+ */
+struct raw_iface {
+ int (*init)(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw);
+
+ int (*deinit)(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+ size_t (*size_of)(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+ /**
+ * @brief Return size which metadata take on cache device
+ *
+ * @param raw RAW container of metadata
+ *
+ * @return Number of pages (4 kiB) on cache device
+ */
+ uint32_t (*size_on_ssd)(struct ocf_metadata_raw *raw);
+
+ uint32_t (*checksum)(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+ uint32_t (*page)(struct ocf_metadata_raw *raw, uint32_t entry);
+
+ int (*get)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data);
+
+ int (*set)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data);
+
+ void* (*access)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry);
+
+ void (*load_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ void (*flush_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ void (*flush_mark)(ocf_cache_t cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start,
+ uint8_t stop);
+
+ int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
+ struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+};
+
+/**
+ * @brief Initialize RAW instance
+ *
+ * @param cache - Cache instance
+ * @param lock_page_pfn - Optional page lock callback
+ * @param lock_page_pfn - Optional page unlock callback
+ * @param raw - RAW descriptor
+ * @return 0 - Operation success, otherwise error
+ */
+int ocf_metadata_raw_init(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw);
+
+/**
+ * @brief De-Initialize RAW instance
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @return 0 - Operation success, otherwise error
+ */
+int ocf_metadata_raw_deinit(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+/**
+ * @brief Get memory footprint
+ *
+ * @param cache Cache instance
+ * @param raw RAW descriptor
+ * @return Memory footprint
+ */
+static inline size_t ocf_metadata_raw_size_of(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ if (!raw->iface)
+ return 0;
+
+ return raw->iface->size_of(cache, raw);
+}
+
+/**
+ * @brief Get SSD footprint
+ *
+ * @param raw - RAW descriptor
+ * @return Size on SSD
+ */
+size_t ocf_metadata_raw_size_on_ssd(struct ocf_metadata_raw* raw);
+
+/**
+ * @brief Calculate metadata checksum
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @return Checksum
+ */
+static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache,
+ struct ocf_metadata_raw* raw)
+{
+ return raw->iface->checksum(cache, raw);
+}
+
+/**
+ * @brief Calculate entry page index
+ *
+ * @param raw - RAW descriptor
+ * @param entry - Entry number
+ * @return Page index
+ */
+static inline uint32_t ocf_metadata_raw_page(struct ocf_metadata_raw* raw,
+ uint32_t entry)
+{
+ return raw->iface->page(raw, entry);
+}
+
+/**
+ * @brief Get specified element of metadata
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param entry - Entry to be get
+ * @param data - Data where metadata entry will be copied into
+ * @return 0 - Operation success, otherwise error
+ */
+static inline int ocf_metadata_raw_get(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry, void *data)
+{
+ return raw->iface->get(cache, raw, entry, data);
+}
+
+/**
+ * @brief Access specified element of metadata directly
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param entry - Entry to be get
+ * @param data - Data where metadata entry will be copied into
+ * @return 0 - Point to accessed data, in case of error NULL
+ */
+static inline void *ocf_metadata_raw_wr_access(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ return raw->iface->access(cache, raw, entry);
+}
+
+/**
+ * @brief Access specified element of metadata directly
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param entry - Entry to be get
+ * @param data - Data where metadata entry will be copied into
+ * @return 0 - Point to accessed data, in case of error NULL
+ */
+static inline const void *ocf_metadata_raw_rd_access( ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ return raw->iface->access(cache, raw, entry);
+}
+
+/**
+ * @brief Set specified element of metadata
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param entry - Entry to be set
+ * @param data - Data taht will be copied into metadata entry
+ * @return 0 - Operation success, otherwise error
+ */
+static inline int ocf_metadata_raw_set(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry, void *data)
+{
+ return raw->iface->set(cache, raw, entry, data);
+}
+
+/**
+ * @brief Load all entries from SSD cache (cahce cache)
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param cmpl - Completion callback
+ * @param priv - Completion callback context
+ */
+static inline void ocf_metadata_raw_load_all(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ raw->iface->load_all(cache, raw, cmpl, priv);
+}
+
+/**
+ * @brief Flush all entries for into SSD cache (cahce cache)
+ *
+ * @param cache - Cache instance
+ * @param raw - RAW descriptor
+ * @param cmpl - Completion callback
+ * @param priv - Completion callback context
+ */
+static inline void ocf_metadata_raw_flush_all(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ raw->iface->flush_all(cache, raw, cmpl, priv);
+}
+
+
+static inline void ocf_metadata_raw_flush_mark(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
+{
+ raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop);
+}
+
+static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ return raw->iface->flush_do_asynch(cache, req, raw, complete);
+}
+
+/*
+ * Check if line is valid for specified RAW descriptor
+ */
+static inline bool _raw_is_valid(struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ if (!raw)
+ return false;
+
+ if (entry >= raw->entries)
+ return false;
+
+ return true;
+}
+
+static inline void _raw_bug_on(struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ ENV_BUG_ON(!_raw_is_valid(raw, entry));
+}
+
+#define MAX_STACK_TAB_SIZE 32
+
+int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2);
+
+#endif /* METADATA_RAW_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_atomic.c b/src/spdk/ocf/src/metadata/metadata_raw_atomic.c
new file mode 100644
index 000000000..46e2907ec
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_atomic.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_io.h"
+#include "metadata_hash.h"
+#include "metadata_raw.h"
+#include "metadata_raw_atomic.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_def_priv.h"
+
+#define OCF_METADATA_RAW_ATOMIC_DEBUG 0
+
+#if 1 == OCF_METADATA_RAW_ATOMIC_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s\n", __func__)
+
+#define OCF_DEBUG_MSG(cache, msg) \
+ ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - %s\n", \
+ __func__, msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+struct _raw_atomic_flush_ctx {
+ struct ocf_request *req;
+ ocf_req_end_t complete;
+ env_atomic flush_req_cnt;
+};
+
+static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
+ int error)
+{
+ if (error)
+ ctx->req->error = error;
+
+ if (env_atomic_dec_return(&ctx->flush_req_cnt))
+ return;
+
+ if (ctx->req->error)
+ ocf_metadata_error(ctx->req->cache);
+
+ /* Call metadata flush completed call back */
+ OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
+
+ ctx->complete(ctx->req, ctx->req->error);
+
+ env_free(ctx);
+}
+
+static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
+{
+ struct _raw_atomic_flush_ctx *ctx = io->priv1;
+
+ ocf_io_put(io); /* Release IO */
+
+ _raw_atomic_io_discard_cmpl(ctx, error);
+}
+
+static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
+ uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
+{
+ struct ocf_request *req = context;
+ struct ocf_io *io;
+
+ io = ocf_new_cache_io(cache, NULL, start_addr, len, OCF_WRITE, 0, 0);
+ if (!io) {
+ req->error = -OCF_ERR_NO_MEM;
+ return req->error;
+ }
+
+ OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u",
+ start_line, len);
+
+ env_atomic_inc(&ctx->flush_req_cnt);
+
+ ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end);
+
+ if (cache->device->volume.features.discard_zeroes)
+ ocf_volume_submit_discard(io);
+ else
+ ocf_volume_submit_write_zeroes(io);
+
+ return req->error;
+}
+
+void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
+{
+ if (to_state == INVALID) {
+ req->map[map_idx].flush = true;
+ req->map[map_idx].start_flush = start;
+ req->map[map_idx].stop_flush = stop;
+ req->info.flush_metadata = true;
+ }
+}
+
+#define MAX_STACK_TAB_SIZE 32
+
+static inline void _raw_atomic_add_page(struct ocf_cache *cache,
+ uint32_t *clines_tab, uint64_t line, int *idx)
+{
+ clines_tab[*idx] = ocf_metadata_map_lg2phy(cache, line);
+ (*idx)++;
+}
+
+static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
+ struct ocf_request *req, int map_idx,
+ struct _raw_atomic_flush_ctx *ctx)
+{
+ struct ocf_map_info *map = &req->map[map_idx];
+ uint32_t len = 0;
+ uint64_t start_addr;
+ int result = 0;
+
+ start_addr = ocf_metadata_map_lg2phy(cache, map->coll_idx);
+ start_addr *= ocf_line_size(cache);
+ start_addr += cache->device->metadata_offset;
+
+ start_addr += SECTORS_TO_BYTES(map->start_flush);
+ len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
+ len += SECTORS_TO_BYTES(1);
+
+ result = _raw_atomic_io_discard_do(cache, req, start_addr, len, ctx);
+
+ return result;
+}
+
+int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
+ struct ocf_metadata_raw *raw, ocf_req_end_t complete)
+{
+ int result = 0, i;
+ uint32_t __clines_tab[MAX_STACK_TAB_SIZE];
+ uint32_t *clines_tab;
+ int clines_to_flush = 0;
+ uint32_t len = 0;
+ int line_no = req->core_line_count;
+ struct ocf_map_info *map;
+ uint64_t start_addr;
+ struct _raw_atomic_flush_ctx *ctx;
+
+ ENV_BUG_ON(!complete);
+
+ if (!req->info.flush_metadata) {
+ /* Nothing to flush call flush callback */
+ complete(req, 0);
+ return 0;
+ }
+
+ ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
+ if (!ctx) {
+ complete(req, -OCF_ERR_NO_MEM);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ ctx->req = req;
+ ctx->complete = complete;
+ env_atomic_set(&ctx->flush_req_cnt, 1);
+
+ if (line_no == 1) {
+ map = &req->map[0];
+ if (map->flush && map->status != LOOKUP_MISS) {
+ result = _raw_atomic_flush_do_asynch_sec(cache, req,
+ 0, ctx);
+ }
+ _raw_atomic_io_discard_cmpl(ctx, result);
+ return result;
+ }
+
+ if (line_no <= MAX_STACK_TAB_SIZE) {
+ clines_tab = __clines_tab;
+ } else {
+ clines_tab = env_zalloc(sizeof(*clines_tab) * line_no,
+ ENV_MEM_NOIO);
+ if (!clines_tab) {
+ complete(req, -OCF_ERR_NO_MEM);
+ env_free(ctx);
+ return -OCF_ERR_NO_MEM;
+ }
+ }
+
+ for (i = 0; i < line_no; i++) {
+ map = &req->map[i];
+
+ if (!map->flush || map->status == LOOKUP_MISS)
+ continue;
+
+ if (i == 0) {
+ /* First */
+ if (map->start_flush) {
+ _raw_atomic_flush_do_asynch_sec(cache, req, i,
+ ctx);
+ } else {
+ _raw_atomic_add_page(cache, clines_tab,
+ map->coll_idx, &clines_to_flush);
+ }
+ } else if (i == (line_no - 1)) {
+ /* Last */
+ if (map->stop_flush != ocf_line_end_sector(cache)) {
+ _raw_atomic_flush_do_asynch_sec(cache, req,
+ i, ctx);
+ } else {
+ _raw_atomic_add_page(cache, clines_tab,
+ map->coll_idx, &clines_to_flush);
+ }
+ } else {
+ /* Middle */
+ _raw_atomic_add_page(cache, clines_tab, map->coll_idx,
+ &clines_to_flush);
+ }
+
+ }
+
+ env_sort(clines_tab, clines_to_flush, sizeof(*clines_tab),
+ _raw_ram_flush_do_page_cmp, NULL);
+
+ i = 0;
+ while (i < clines_to_flush) {
+ start_addr = clines_tab[i];
+ start_addr *= ocf_line_size(cache);
+ start_addr += cache->device->metadata_offset;
+ len = ocf_line_size(cache);
+
+ while (true) {
+ if ((i + 1) >= clines_to_flush)
+ break;
+
+ if ((clines_tab[i] + 1) != clines_tab[i + 1])
+ break;
+
+ i++;
+ len += ocf_line_size(cache);
+ }
+
+ result |= _raw_atomic_io_discard_do(cache, req, start_addr,
+ len, ctx);
+
+ if (result)
+ break;
+
+ i++;
+ }
+
+ _raw_atomic_io_discard_cmpl(ctx, result);
+
+ if (line_no > MAX_STACK_TAB_SIZE)
+ env_free(clines_tab);
+
+ return result;
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_atomic.h b/src/spdk/ocf/src/metadata/metadata_raw_atomic.h
new file mode 100644
index 000000000..cbd5bfe5d
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_atomic.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_RAW_ATOMIC_H__
+#define __METADATA_RAW_ATOMIC_H__
+
+void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
+
+int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
+ struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+
+#endif /* __METADATA_RAW_ATOMIC_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_dynamic.c b/src/spdk/ocf/src/metadata/metadata_raw_dynamic.c
new file mode 100644
index 000000000..d558b6c14
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_dynamic.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_hash.h"
+#include "metadata_raw.h"
+#include "metadata_raw_dynamic.h"
+#include "metadata_io.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
+#include "../utils/utils_io.h"
+#include "../ocf_request.h"
+#include "../ocf_def_priv.h"
+#include "../ocf_priv.h"
+
+#define OCF_METADATA_RAW_DEBUG 0
+
+#if 1 == OCF_METADATA_RAW_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s\n", __func__)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+/*******************************************************************************
+ * Common RAW Implementation
+ ******************************************************************************/
+
+/*
+ * Check if page is valid for specified RAW descriptor
+ */
+static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
+{
+ ENV_BUG_ON(page < raw->ssd_pages_offset);
+ ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
+
+ return true;
+}
+
+/*******************************************************************************
+ * RAW dynamic Implementation
+ ******************************************************************************/
+
+#define _RAW_DYNAMIC_PAGE(raw, line) \
+ ((line) / raw->entries_in_page)
+
+#define _RAW_DYNAMIC_PAGE_OFFSET(raw, line) \
+ ((line % raw->entries_in_page) * raw->entry_size)
+
+/*
+ * RAW DYNAMIC control structure
+ */
+struct _raw_ctrl {
+ env_mutex lock;
+ env_atomic count;
+ void *pages[];
+};
+
+static void *_raw_dynamic_get_item(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ void *new = NULL;
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ uint32_t page = _RAW_DYNAMIC_PAGE(raw, entry);
+
+ ENV_BUG_ON(!_raw_is_valid(raw, entry));
+
+ OCF_DEBUG_PARAM(cache, "Accessing item %u on page %u", entry, page);
+
+ if (!ctrl->pages[page]) {
+ /* No page, allocate one, and set*/
+
+ /* This RAW container has some restrictions and need to check
+ * this limitation:
+ * 1. no atomic context when allocation
+ * 2. Only one allocator in time
+ */
+
+ ENV_BUG_ON(env_in_interrupt());
+
+ env_mutex_lock(&ctrl->lock);
+
+ if (ctrl->pages[page]) {
+ /* Page has been already allocated, skip allocation */
+ goto _raw_dynamic_get_item_SKIP;
+ }
+
+ OCF_DEBUG_PARAM(cache, "New page allocation - %u", page);
+
+ new = env_secure_alloc(PAGE_SIZE);
+ if (new) {
+ ENV_BUG_ON(env_memset(new, PAGE_SIZE, 0));
+ ctrl->pages[page] = new;
+ env_atomic_inc(&ctrl->count);
+ }
+
+_raw_dynamic_get_item_SKIP:
+
+ env_mutex_unlock(&ctrl->lock);
+ }
+
+ if (ctrl->pages[page])
+ return ctrl->pages[page] + _RAW_DYNAMIC_PAGE_OFFSET(raw, entry);
+
+ return NULL;
+}
+
+/*
+* RAM DYNAMIC Implementation - De-Initialize
+*/
+int raw_dynamic_deinit(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ uint32_t i;
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+
+ if (!ctrl)
+ return 0;
+
+ OCF_DEBUG_TRACE(cache);
+
+ for (i = 0; i < raw->ssd_pages; i++)
+ env_secure_free(ctrl->pages[i], PAGE_SIZE);
+
+ env_mutex_destroy(&ctrl->lock);
+
+ env_vfree(ctrl);
+ raw->priv = NULL;
+
+ return 0;
+}
+
+/*
+ * RAM DYNAMIC Implementation - Initialize
+ */
+int raw_dynamic_init(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw)
+{
+ struct _raw_ctrl *ctrl;
+ size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
+
+ OCF_DEBUG_TRACE(cache);
+
+ if (raw->entry_size > PAGE_SIZE)
+ return -1;
+
+ ctrl = env_vmalloc(size);
+ if (!ctrl)
+ return -1;
+
+ ENV_BUG_ON(env_memset(ctrl, size, 0));
+
+ if (env_mutex_init(&ctrl->lock)) {
+ env_vfree(ctrl);
+ return -1;
+ }
+
+ raw->priv = ctrl;
+
+ raw->lock_page = lock_page_pfn;
+ raw->unlock_page = unlock_page_pfn;
+
+ return 0;
+}
+
+/*
+ * RAW DYNAMIC Implementation - Size of
+ */
+size_t raw_dynamic_size_of(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ size_t size;
+
+ /* Size of allocated items */
+ size = env_atomic_read(&ctrl->count);
+ size *= PAGE_SIZE;
+
+ /* Size of control structure */
+ size += sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
+
+ OCF_DEBUG_PARAM(cache, "Count = %d, Size = %lu",
+ env_atomic_read(&ctrl->count), size);
+
+ return size;
+}
+
+/*
+ * RAW DYNAMIC Implementation - Size on SSD
+ */
+uint32_t raw_dynamic_size_on_ssd(struct ocf_metadata_raw *raw)
+{
+ const size_t alignment = 128 * KiB / PAGE_SIZE;
+
+ return OCF_DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
+}
+
+/*
+ * RAM DYNAMIC Implementation - Checksum
+ */
+uint32_t raw_dynamic_checksum(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ uint64_t i;
+ uint32_t step = 0;
+ uint32_t crc = 0;
+
+ for (i = 0; i < raw->ssd_pages; i++) {
+ if (ctrl->pages[i])
+ crc = env_crc32(crc, ctrl->pages[i], PAGE_SIZE);
+ OCF_COND_RESCHED(step, 10000);
+ }
+
+ return crc;
+}
+
+/*
+ * RAM DYNAMIC Implementation - Entry page number
+ */
+uint32_t raw_dynamic_page(struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ ENV_BUG_ON(entry >= raw->entries);
+
+ return _RAW_DYNAMIC_PAGE(raw, entry);
+}
+
+/*
+* RAM DYNAMIC Implementation - Get
+*/
+int raw_dynamic_get(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data)
+{
+ void *item = _raw_dynamic_get_item(cache, raw, entry);
+
+ if (!item) {
+ ENV_BUG_ON(env_memset(data, raw->entry_size, 0));
+ ocf_metadata_error(cache);
+ return -1;
+ }
+
+ return env_memcpy(data, raw->entry_size, item, raw->entry_size);
+}
+
+/*
+* RAM DYNAMIC Implementation - Set
+*/
+int raw_dynamic_set(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data)
+{
+ void *item = _raw_dynamic_get_item(cache, raw, entry);
+
+ if (!item) {
+ ocf_metadata_error(cache);
+ return -1;
+ }
+
+ return env_memcpy(item, raw->entry_size, data, raw->entry_size);
+}
+
+/*
+* RAM DYNAMIC Implementation - access
+*/
+void *raw_dynamic_access(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry)
+{
+ return _raw_dynamic_get_item(cache, raw, entry);
+}
+
+/*
+* RAM DYNAMIC Implementation - Load all
+*/
+#define RAW_DYNAMIC_LOAD_PAGES 128
+
+struct raw_dynamic_load_all_context {
+ struct ocf_metadata_raw *raw;
+ struct ocf_request *req;
+ ocf_cache_t cache;
+ struct ocf_io *io;
+ ctx_data_t *data;
+ uint8_t *zpage;
+ uint8_t *page;
+ uint64_t i;
+ int error;
+
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
+static void raw_dynamic_load_all_complete(
+ struct raw_dynamic_load_all_context *context, int error)
+{
+ context->cmpl(context->priv, error);
+
+ ocf_req_put(context->req);
+ env_secure_free(context->page, PAGE_SIZE);
+ env_free(context->zpage);
+ ctx_data_free(context->cache->owner, context->data);
+ env_vfree(context);
+}
+
+static int raw_dynamic_load_all_update(struct ocf_request *req);
+
+static const struct ocf_io_if _io_if_raw_dynamic_load_all_update = {
+ .read = raw_dynamic_load_all_update,
+ .write = raw_dynamic_load_all_update,
+};
+
+static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
+{
+ struct raw_dynamic_load_all_context *context = io->priv1;
+
+ ocf_io_put(io);
+
+ if (error) {
+ raw_dynamic_load_all_complete(context, error);
+ return;
+ }
+
+ context->req->io_if = &_io_if_raw_dynamic_load_all_update;
+ ocf_engine_push_req_front(context->req, true);
+}
+
+static int raw_dynamic_load_all_read(struct ocf_request *req)
+{
+ struct raw_dynamic_load_all_context *context = req->priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint64_t count;
+ int result;
+
+ count = OCF_MIN(RAW_DYNAMIC_LOAD_PAGES, raw->ssd_pages - context->i);
+
+ /* Allocate IO */
+ context->io = ocf_new_cache_io(context->cache, req->io_queue,
+ PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
+ PAGES_TO_BYTES(count), OCF_READ, 0, 0);
+
+ if (!context->io) {
+ raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
+ return 0;
+ }
+
+ /* Setup IO */
+ result = ocf_io_set_data(context->io, context->data, 0);
+ if (result) {
+ ocf_io_put(context->io);
+ raw_dynamic_load_all_complete(context, result);
+ return 0;
+ }
+ ocf_io_set_cmpl(context->io, context, NULL,
+ raw_dynamic_load_all_read_end);
+
+ /* Submit IO */
+ ocf_volume_submit_io(context->io);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_raw_dynamic_load_all_read = {
+ .read = raw_dynamic_load_all_read,
+ .write = raw_dynamic_load_all_read,
+};
+
+static int raw_dynamic_load_all_update(struct ocf_request *req)
+{
+ struct raw_dynamic_load_all_context *context = req->priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ ocf_cache_t cache = context->cache;
+ uint64_t count = BYTES_TO_PAGES(context->io->bytes);
+ uint64_t i_page;
+ int result = 0;
+ int cmp;
+
+ /* Reset head of data buffer */
+ ctx_data_seek_check(context->cache->owner, context->data,
+ ctx_data_seek_begin, 0);
+
+ for (i_page = 0; i_page < count; i_page++, context->i++) {
+ if (!context->page) {
+ context->page = env_secure_alloc(PAGE_SIZE);
+ if (!context->page) {
+ /* Allocation error */
+ result = -OCF_ERR_NO_MEM;
+ break;
+ }
+ }
+
+ ctx_data_rd_check(cache->owner, context->page,
+ context->data, PAGE_SIZE);
+
+ result = env_memcmp(context->zpage, PAGE_SIZE, context->page,
+ PAGE_SIZE, &cmp);
+ if (result)
+ break;
+
+ /* When page is zero set, no need to allocate space for it */
+ if (cmp == 0) {
+ OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
+ continue;
+ }
+
+ OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
+
+ if (ctrl->pages[context->i])
+ env_vfree(ctrl->pages[context->i]);
+
+ ctrl->pages[context->i] = context->page;
+ context->page = NULL;
+
+ env_atomic_inc(&ctrl->count);
+ }
+
+ if (result || context->i >= raw->ssd_pages) {
+ raw_dynamic_load_all_complete(context, result);
+ return 0;
+ }
+
+ context->req->io_if = &_io_if_raw_dynamic_load_all_read;
+ ocf_engine_push_req_front(context->req, true);
+
+ return 0;
+}
+
+void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct raw_dynamic_load_all_context *context;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ context = env_vzalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ context->raw = raw;
+ context->cache = cache;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ context->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
+ if (!context->data) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_data;
+ }
+
+ context->zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ if (!context->zpage) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_zpage;
+ }
+
+ context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
+ if (!context->req) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_req;
+ }
+
+ context->req->info.internal = true;
+ context->req->priv = context;
+ context->req->io_if = &_io_if_raw_dynamic_load_all_read;
+
+ ocf_engine_push_req_front(context->req, true);
+ return;
+
+err_req:
+ env_free(context->zpage);
+err_zpage:
+ ctx_data_free(cache->owner, context->data);
+err_data:
+ env_vfree(context);
+ OCF_CMPL_RET(priv, result);
+}
+
+/*
+ * RAM DYNAMIC Implementation - Flush all
+ */
+
+struct raw_dynamic_flush_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
+/*
+ * RAM Implementation - Flush IO callback - Fill page
+ */
+static int raw_dynamic_flush_all_fill(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *priv)
+{
+ struct raw_dynamic_flush_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ uint32_t raw_page;
+
+ ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
+
+ raw_page = page - raw->ssd_pages_offset;
+
+ if (ctrl->pages[raw_page]) {
+ OCF_DEBUG_PARAM(cache, "Page = %u", raw_page);
+ if (raw->lock_page)
+ raw->lock_page(cache, raw, raw_page);
+ ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page],
+ PAGE_SIZE);
+ if (raw->unlock_page)
+ raw->unlock_page(cache, raw, raw_page);
+ } else {
+ OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page);
+ /* Page was not allocated before set only zeros */
+ ctx_data_zero_check(cache->owner, data, PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+static void raw_dynamic_flush_all_complete(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct raw_dynamic_flush_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
+void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct raw_dynamic_flush_all_context *context;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ context = env_vmalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ raw_dynamic_flush_all_fill,
+ raw_dynamic_flush_all_complete);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+}
+
+/*
+ * RAM DYNAMIC Implementation - Mark to Flush
+ */
+void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
+{
+ ENV_BUG();
+}
+
+/*
+ * RAM DYNAMIC Implementation - Do flushing asynchronously
+ */
+int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
+ struct ocf_metadata_raw *raw, ocf_req_end_t complete)
+{
+ ENV_BUG();
+ return -OCF_ERR_NOT_SUPP;
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_dynamic.h b/src/spdk/ocf/src/metadata/metadata_raw_dynamic.h
new file mode 100644
index 000000000..facddf90e
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_dynamic.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_RAW_DYNAMIC_H__
+#define __METADATA_RAW_DYNAMIC_H__
+
+/**
+ * @file metadata_raw_dynamic.h
+ * @brief Metadata RAW container implementation for dynamic numbers of elements
+ */
+
+/*
+ * RAW DYNAMIC - Initialize
+ */
+int raw_dynamic_init(ocf_cache_t cache,
+ ocf_flush_page_synch_t lock_page_pfn,
+ ocf_flush_page_synch_t unlock_page_pfn,
+ struct ocf_metadata_raw *raw);
+
+/*
+ * RAW DYNAMIC - De-Initialize
+ */
+int raw_dynamic_deinit(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+/*
+ * RAW DYNAMIC - Get size of memory footprint of this RAW metadata container
+ */
+size_t raw_dynamic_size_of(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+/*
+ * RAW DYNAMIC Implementation - Size on SSD
+ */
+uint32_t raw_dynamic_size_on_ssd(struct ocf_metadata_raw *raw);
+
+/*
+ * RAW DYNAMIC Implementation - Checksum
+ */
+uint32_t raw_dynamic_checksum(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+/*
+ * RAM DYNAMIC Implementation - Entry page number
+ */
+uint32_t raw_dynamic_page(struct ocf_metadata_raw *raw, uint32_t entry);
+
+/*
+ * RAW DYNAMIC - Get specified entry
+ */
+int raw_dynamic_get(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data);
+
+/*
+ * RAW DYNAMIC - Set specified entry
+ */
+int raw_dynamic_set(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ uint32_t entry, void *data);
+
+/*
+ * RAW DYNAMIC - Write access for specified entry
+ */
+void *raw_dynamic_access(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, uint32_t entry);
+
+/*
+ * RAW DYNAMIC - Load all metadata of this RAW metadata container
+ * from cache device
+ */
+void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/*
+ * RAW DYNAMIC - Flush all metadata of this RAW metadata container
+ * to cache device
+ */
+void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/*
+ * RAW DYNAMIC - Mark specified entry to be flushed
+ */
+void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
+
+/*
+ * DYNAMIC Implementation - Do Flush Asynchronously
+ */
+int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
+ struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+
+
+#endif /* METADATA_RAW_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_volatile.c b/src/spdk/ocf/src/metadata/metadata_raw_volatile.c
new file mode 100644
index 000000000..5bdcae904
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_volatile.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_hash.h"
+#include "metadata_raw.h"
+#include "metadata_io.h"
+#include "metadata_raw_volatile.h"
+
+/*
+ * RAW volatile Implementation - Size on SSD
+ */
+uint32_t raw_volatile_size_on_ssd(struct ocf_metadata_raw *raw)
+{
+ return 0;
+}
+
+/*
+ * RAW volatile Implementation - Checksum
+ */
+uint32_t raw_volatile_checksum(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw)
+{
+ return 0;
+}
+
+/*
+ * RAW volatile Implementation - Load all metadata elements from SSD
+ */
+void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cmpl(priv, -OCF_ERR_NOT_SUPP);
+}
+
+/*
+ * RAM Implementation - Flush all elements
+ */
+void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cmpl(priv, 0);
+}
+
+/*
+ * RAM RAM Implementation - Mark to Flush
+ */
+void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
+{
+}
+
+/*
+ * RAM RAM Implementation - Do Flush asynchronously
+ */
+int raw_volatile_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ complete(req, 0);
+ return 0;
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_raw_volatile.h b/src/spdk/ocf/src/metadata/metadata_raw_volatile.h
new file mode 100644
index 000000000..e2deb9366
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_raw_volatile.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_RAW_VOLATILE_H__
+#define __METADATA_RAW_VOLATILE_H__
+
+/*
+ * RAW volatile Implementation - Size on SSD
+ */
+uint32_t raw_volatile_size_on_ssd(struct ocf_metadata_raw *raw);
+
+/*
+ * RAW volatile Implementation - Checksum
+ */
+uint32_t raw_volatile_checksum(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw);
+
+/*
+ * RAW volatile Implementation - Load all metadata elements from SSD
+ */
+void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/*
+ * RAW volatile Implementation - Flush all elements
+ */
+void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
+
+/*
+ * RAM RAW volatile Implementation - Mark to Flush
+ */
+void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
+
+/*
+ * RAM RAW volatile Implementation - Do Flush asynchronously
+ */
+int raw_volatile_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete);
+
+#endif /* __METADATA_RAW_VOLATILE_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_status.h b/src/spdk/ocf/src/metadata/metadata_status.h
new file mode 100644
index 000000000..10faec8f0
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_status.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_STATUS_H__
+#define __METADATA_STATUS_H__
+
+#include "../concurrency/ocf_metadata_concurrency.h"
+
+/*******************************************************************************
+ * Dirty
+ ******************************************************************************/
+
+static inline void metadata_init_status_bits(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+
+ cache->metadata.iface.clear_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+ cache->metadata.iface.clear_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline bool metadata_test_dirty_all(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, true);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_dirty(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, false);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline void metadata_set_dirty(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.set_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_clear_dirty(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline bool metadata_test_and_clear_dirty(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ test = cache->metadata.iface.test_and_clear_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, false);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return test;
+}
+
+static inline bool metadata_test_and_set_dirty(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ test = cache->metadata.iface.test_and_set_dirty(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, false);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return test;
+}
+
+/*******************************************************************************
+ * Dirty - Sector Implementation
+ ******************************************************************************/
+
+static inline bool metadata_test_dirty_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_dirty(cache, line,
+ start, stop, false);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_dirty_all_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_dirty(cache, line,
+ start, stop, true);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_dirty_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ return metadata_test_dirty_sec(cache, line, pos, pos);
+}
+
+static inline bool metadata_test_dirty_out_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_out_dirty(cache, line, start, stop);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline void metadata_set_dirty_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.set_dirty(cache, line, start, stop);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_clear_dirty_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_dirty(cache, line, start, stop);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_set_dirty_sec_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.set_dirty(cache, line, pos, pos);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_clear_dirty_sec_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_dirty(cache, line, pos, pos);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline bool metadata_test_and_clear_dirty_sec(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop)
+{
+ bool test = false;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ test = cache->metadata.iface.test_and_clear_dirty(cache, line,
+ start, stop, false);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return test;
+}
+
+/*
+ * Marks given cache line's bits as clean
+ *
+ * @return true if any cache line's sector was dirty and became clean
+ * @return false for other cases
+ */
+static inline bool metadata_clear_dirty_sec_changed(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop, bool *line_is_clean)
+{
+ bool sec_changed;
+
+ OCF_METADATA_BITS_LOCK_WR();
+
+ sec_changed = cache->metadata.iface.test_dirty(cache, line,
+ start, stop, false);
+ *line_is_clean = !cache->metadata.iface.clear_dirty(cache, line,
+ start, stop);
+
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return sec_changed;
+}
+
+/*
+ * Marks given cache line's bits as dirty
+ *
+ * @return true if any cache line's sector became dirty
+ * @return false for other cases
+ */
+static inline bool metadata_set_dirty_sec_changed(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop, bool *line_was_dirty)
+{
+ bool sec_changed;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ sec_changed = !cache->metadata.iface.test_dirty(cache, line,
+ start, stop, true);
+ *line_was_dirty = cache->metadata.iface.set_dirty(cache, line, start,
+ stop);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return sec_changed;
+}
+
+/*******************************************************************************
+ * Valid
+ ******************************************************************************/
+
+static inline bool metadata_test_valid_any(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, false);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_valid(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, true);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline void metadata_set_valid(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.set_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_clear_valid(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline bool metadata_test_and_clear_valid(
+ struct ocf_cache *cache, ocf_cache_line_t line)
+{
+ bool test = false;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ test = cache->metadata.iface.test_and_clear_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, true);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return test;
+}
+
+static inline bool metadata_test_and_set_valid(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ bool test = false;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ test = cache->metadata.iface.test_and_set_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, true);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return test;
+}
+
+/*******************************************************************************
+ * Valid - Sector Implementation
+ ******************************************************************************/
+
+static inline bool metadata_test_valid_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ bool test;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_valid(cache, line,
+ start, stop, true);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_valid_any_out_sec(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop)
+{
+ bool test = false;
+
+ OCF_METADATA_BITS_LOCK_RD();
+ test = cache->metadata.iface.test_out_valid(cache, line,
+ start, stop);
+ OCF_METADATA_BITS_UNLOCK_RD();
+
+ return test;
+}
+
+static inline bool metadata_test_valid_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ return metadata_test_valid_sec(cache, line, pos, pos);
+}
+
+/*
+ * Marks given cache line's bits as valid
+ *
+ * @return true if any of the cache line's bits was valid before this operation
+ * @return false if the cache line was invalid (all bits invalid) before this
+ * operation
+ */
+static inline bool metadata_set_valid_sec_changed(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop)
+{
+ bool was_any_valid;
+
+ OCF_METADATA_BITS_LOCK_WR();
+ was_any_valid = cache->metadata.iface.set_valid(cache, line,
+ start, stop);
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return !was_any_valid;
+}
+
+static inline void metadata_clear_valid_sec(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_valid(cache, line, start, stop);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_clear_valid_sec_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.clear_valid(cache, line, pos, pos);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+
+static inline void metadata_set_valid_sec_one(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t pos)
+{
+ OCF_METADATA_BITS_LOCK_WR();
+ cache->metadata.iface.set_valid(cache, line, pos, pos);
+ OCF_METADATA_BITS_UNLOCK_WR();
+}
+/*
+ * Marks given cache line's bits as invalid
+ *
+ * @return true if any of the cache line's bits was valid and the cache line
+ * became invalid (all bits invalid) after the operation
+ * @return false in other cases
+ */
+static inline bool metadata_clear_valid_sec_changed(
+ struct ocf_cache *cache, ocf_cache_line_t line,
+ uint8_t start, uint8_t stop, bool *is_valid)
+{
+ bool was_any_valid;
+
+ OCF_METADATA_BITS_LOCK_WR();
+
+ was_any_valid = cache->metadata.iface.test_valid(cache, line,
+ cache->metadata.settings.sector_start,
+ cache->metadata.settings.sector_end, false);
+
+ *is_valid = cache->metadata.iface.clear_valid(cache, line,
+ start, stop);
+
+ OCF_METADATA_BITS_UNLOCK_WR();
+
+ return was_any_valid && !*is_valid;
+}
+
+#endif /* METADATA_STATUS_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_structs.h b/src/spdk/ocf/src/metadata/metadata_structs.h
new file mode 100644
index 000000000..71eb02a01
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_structs.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_STRUCTS_H__
+#define __METADATA_STRUCTS_H__
+
+#include "metadata_common.h"
+#include "../eviction/eviction.h"
+#include "../cleaning/cleaning.h"
+#include "../ocf_request.h"
+
+
+/**
+ * @file metadata_priv.h
+ * @brief Metadata private structures
+ */
+
+/**
+ * @brief Metadata shutdown status
+ */
+enum ocf_metadata_shutdown_status {
+ ocf_metadata_clean_shutdown = 1, /*!< OCF shutdown graceful*/
+ ocf_metadata_dirty_shutdown = 0, /*!< Dirty OCF shutdown*/
+ ocf_metadata_detached = 2, /*!< Cache device detached */
+};
+
+/*
+ * Metadata cache line location on pages interface
+ */
+struct ocf_metadata_layout_iface {
+ /**
+ * This function is mapping collision index to appropriate cache line
+ * (logical cache line to physical one mapping).
+ *
+ * It is necessary because we want to generate sequential workload with
+ * data to cache device.
+ * Our collision list, for example, looks:
+ * 0 3 6 9
+ * 1 4 7 10
+ * 2 5 8
+ * All collision index in each column is on the same page
+ * on cache device. We don't want send request x times to the same
+ * page. To don't do it we use collision index by row, but in this
+ * case we can't use collision index directly as cache line,
+ * because we will generate non sequential workload (we will write
+ * pages: 0 -> 3 -> 6 ...). To map collision index in correct way
+ * we use this function.
+ *
+ * After use this function, collision index in the above array
+ * corresponds with below cache line:
+ * 0 1 2 3
+ * 4 5 6 7
+ * 8 9 10
+ *
+ * @param cache - cache instance
+ * @param idx - index in collision list
+ * @return mapped cache line
+ */
+ ocf_cache_line_t (*lg2phy)(struct ocf_cache *cache,
+ ocf_cache_line_t coll_idx);
+
+ /**
+ * @brief Map physical cache line on cache device to logical one
+ * @note This function is the inverse of map_coll_idx_to_cache_line
+ *
+ * @param cache Cache instance
+ * @param phy Physical cache line of cache device
+ * @return Logical cache line
+ */
+ ocf_cache_line_t (*phy2lg)(struct ocf_cache *cache,
+ ocf_cache_line_t phy);
+};
+
+/**
+ * @brief Query cores completion callback
+ *
+ * @param priv - Caller private data
+ * @param error - Operation error status
+ * @param num_cores - Number of cores in metadata
+ */
+typedef void (*ocf_metadata_query_cores_end_t)(void *priv, int error,
+ unsigned int num_cores);
+
+/**
+ * OCF Metadata interface
+ */
+struct ocf_metadata_iface {
+ /**
+ * @brief Initialize metadata
+ *
+ * @param cache - Cache instance
+ * @param cache_line_size - Cache line size
+ * @return 0 - Operation success otherwise failure
+ */
+ int (*init)(struct ocf_cache *cache,
+ ocf_cache_line_size_t cache_line_size);
+
+ /**
+ * @brief Initialize variable size metadata sections
+ *
+ * @param cache - Cache instance
+ * @param device_size - Cache size in bytes
+ * @param cache_line_size - Cache line size
+ * @param layout Metadata layout
+ * @return 0 - Operation success otherwise failure
+ */
+ int (*init_variable_size)(struct ocf_cache *cache, uint64_t device_size,
+ ocf_cache_line_size_t cache_line_size,
+ ocf_metadata_layout_t layout);
+
+ /**
+ * @brief Query metadata for added cores
+ *
+ * @param[in] owner - OCF context
+ * @param[in] volume - volume to probe
+ * @param[in,out] uuid - array of uuids
+ * @param[in] count - size of uuid array
+ * @param[in] cmpl - completion callback
+ * @param[in] priv - completion private data
+ */
+ void (*query_cores)(ocf_ctx_t owner, ocf_volume_t volume,
+ struct ocf_volume_uuid *uuid, uint32_t count,
+ ocf_metadata_query_cores_end_t cmpl, void *priv);
+
+
+ /**
+ * @brief Metadata cache line location on pages interface
+ */
+ const struct ocf_metadata_layout_iface *layout_iface;
+
+ /**
+ * @brief Initialize hash table
+ *
+ * @param cache - Cache instance
+ */
+ void (*init_hash_table)(struct ocf_cache *cache);
+
+ /**
+ * @brief Initialize collision table
+ *
+ * @param cache - Cache instance
+ */
+ void (*init_collision)(struct ocf_cache *cache);
+
+ /**
+ * @brief De-Initialize metadata
+ *
+ * @param cache - Cache instance
+ */
+ void (*deinit)(struct ocf_cache *cache);
+
+ /**
+ * @brief De-Initialize variable size metadata segments
+ *
+ * @param cache - Cache instance
+ */
+ void (*deinit_variable_size)(struct ocf_cache *cache);
+
+ /**
+ * @brief Get memory footprint
+ *
+ * @param cache - Cache instance
+ * @return 0 - memory footprint
+ */
+ size_t (*size_of)(struct ocf_cache *cache);
+
+ /**
+ * @brief Get amount of pages required for metadata
+ *
+ * @param cache - Cache instance
+ * @return Pages required for store metadata on cache device
+ */
+ ocf_cache_line_t (*pages)(struct ocf_cache *cache);
+
+ /**
+ * @brief Get amount of cache lines
+ *
+ * @param cache - Cache instance
+ * @return Amount of cache lines (cache device lines - metadata space)
+ */
+ ocf_cache_line_t (*cachelines)(struct ocf_cache *cache);
+
+ /**
+ * @brief Load metadata from cache device
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
+ */
+ void (*load_all)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ /**
+ * @brief Load metadata from recovery procedure
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
+ */
+ void (*load_recovery)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ /**
+ * @brief Flush metadata into cahce cache
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
+ */
+ void (*flush_all)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ /**
+ * @brief Mark specified cache line to be flushed
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line - cache line which to be flushed
+ */
+ void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
+ uint32_t map_idx, int to_state, uint8_t start,
+ uint8_t stop);
+
+ /**
+ * @brief Flush marked cache lines asynchronously
+ *
+ * @param cache - Cache instance
+ * @param queue - I/O queue to which metadata flush should be submitted
+ * @param remaining - request remaining
+ * @param complete - flushing request callback
+ * @param context - context that will be passed into callback
+ */
+ void (*flush_do_asynch)(struct ocf_cache *cache,
+ struct ocf_request *req, ocf_req_end_t complete);
+
+
+ /* TODO Provide documentation below */
+
+ enum ocf_metadata_shutdown_status (*get_shutdown_status)(
+ struct ocf_cache *cache);
+
+ void (*set_shutdown_status)(ocf_cache_t cache,
+ enum ocf_metadata_shutdown_status shutdown_status,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ void (*load_superblock)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ void (*flush_superblock)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
+
+ uint64_t (*get_reserved_lba)(struct ocf_cache *cache);
+
+ /**
+ * @brief Get eviction policy
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line - cache line for which eviction policy is requested
+ * @param[out] eviction_policy - Eviction policy
+ */
+ void (*get_eviction_policy)(struct ocf_cache *cache,
+ ocf_cache_line_t line,
+ union eviction_policy_meta *eviction_policy);
+
+ /**
+ * @brief Set eviction policy
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line - Eviction policy values which will be stored in
+ * metadata service
+ * @param[out] eviction_policy - Eviction policy
+ */
+ void (*set_eviction_policy)(struct ocf_cache *cache,
+ ocf_cache_line_t line,
+ union eviction_policy_meta *eviction_policy);
+
+ /**
+ * @brief Get cleaning policy
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line - cache line for which cleaning policy is requested
+ * @param[out] cleaning_policy - Cleaning policy
+ */
+ void (*get_cleaning_policy)(struct ocf_cache *cache,
+ ocf_cache_line_t line,
+ struct cleaning_policy_meta *cleaning_policy);
+
+ /**
+ * @brief Set cleaning policy
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] line
+ * @param[in] cleaning_policy - Cleaning policy values which will be
+ * stored in metadata service
+ */
+ void (*set_cleaning_policy)(struct ocf_cache *cache,
+ ocf_cache_line_t line,
+ struct cleaning_policy_meta *cleaning_policy);
+
+ /**
+ * @brief Get hash table for specified index
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] index - Hash table index
+ * @return Cache line value under specified hash table index
+ */
+ ocf_cache_line_t (*get_hash)(struct ocf_cache *cache,
+ ocf_cache_line_t index);
+
+ /**
+ * @brief Set hash table value for specified index
+ *
+ * @param[in] cache - Cache instance
+ * @param[in] index - Hash table index
+ * @param[in] line - Cache line value to be set under specified hash
+ * table index
+ */
+ void (*set_hash)(struct ocf_cache *cache,
+ ocf_cache_line_t index, ocf_cache_line_t line);
+
+ /**
+ * @brief Get hash table entries
+ *
+ * @param[in] cache - Cache instance
+ * @return Hash table entries
+ */
+ ocf_cache_line_t (*entries_hash)(struct ocf_cache *cache);
+
+ /* TODO Provide documentation below */
+ void (*set_core_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t core_id,
+ uint64_t core_sector);
+
+ void (*get_core_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t *core_id,
+ uint64_t *core_sector);
+
+ ocf_core_id_t (*get_core_id)(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+ void (*get_core_and_part_id)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_core_id_t *core_id,
+ ocf_part_id_t *part_id);
+
+ struct ocf_metadata_uuid *(*get_core_uuid)(
+ struct ocf_cache *cache, ocf_core_id_t core_id);
+
+ void (*set_collision_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t next,
+ ocf_cache_line_t prev);
+
+ void (*get_collision_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t *next,
+ ocf_cache_line_t *prev);
+
+ void (*set_collision_next)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t next);
+
+ void (*set_collision_prev)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t prev);
+
+ void (*start_collision_shared_access)(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+ void (*end_collision_shared_access)(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+ void (*get_partition_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t *part_id,
+ ocf_cache_line_t *next_line,
+ ocf_cache_line_t *prev_line);
+
+ void (*set_partition_next)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t next_line);
+
+ void (*set_partition_prev)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t prev_line);
+
+ void (*set_partition_info)(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id,
+ ocf_cache_line_t next_line, ocf_cache_line_t prev_line);
+
+ const struct ocf_metadata_status*
+ (*rd_status_access)(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+ struct ocf_metadata_status*
+ (*wr_status_access)(struct ocf_cache *cache,
+ ocf_cache_line_t line);
+
+ bool (*test_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+
+ bool (*test_out_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*clear_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*set_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*test_and_set_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+
+ bool (*test_and_clear_dirty)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+
+
+ bool (*test_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+
+ bool (*test_out_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*clear_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*set_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop);
+
+ bool (*test_and_set_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+
+ bool (*test_and_clear_valid)(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
+};
+
+struct ocf_cache_line_settings {
+ ocf_cache_line_size_t size;
+ uint64_t sector_count;
+ uint64_t sector_start;
+ uint64_t sector_end;
+};
+
+struct ocf_metadata_lock
+{
+ env_rwsem global; /*!< global metadata lock (GML) */
+ env_rwlock status; /*!< Fast lock for status bits */
+ env_spinlock eviction; /*!< Fast lock for eviction policy */
+ env_rwsem *hash; /*!< Hash bucket locks */
+ env_rwsem *collision_pages; /*!< Collision table page locks */
+ env_spinlock partition[OCF_IO_CLASS_MAX]; /* partition lock */
+ uint32_t num_hash_entries; /*!< Hash bucket count */
+ uint32_t num_collision_pages; /*!< Collision table page count */
+ ocf_cache_t cache; /*!< Parent cache object */
+};
+
+/**
+ * @brief Metadata control structure
+ */
+struct ocf_metadata {
+ const struct ocf_metadata_iface iface;
+ /*!< Metadata service interface */
+
+ void *iface_priv;
+ /*!< Private data of metadata service interface */
+
+ const struct ocf_cache_line_settings settings;
+ /*!< Cache line configuration */
+
+ bool is_volatile;
+ /*!< true if metadata used in volatile mode (RAM only) */
+
+ struct ocf_metadata_lock lock;
+};
+
+#endif /* __METADATA_STRUCTS_H__ */
diff --git a/src/spdk/ocf/src/metadata/metadata_superblock.h b/src/spdk/ocf/src/metadata/metadata_superblock.h
new file mode 100644
index 000000000..5b89df50c
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_superblock.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_SUPERBLOCK_H__
+#define __METADATA_SUPERBLOCK_H__
+
+#include <ocf/ocf_def.h>
+
+#define CACHE_MAGIC_NUMBER 0x187E1CA6
+
+/**
+ * @brief OCF cache metadata configuration superblock
+ */
+struct ocf_superblock_config {
+ /** WARNING: Metadata probe disregards metadata version when
+ * checking if the cache is dirty - position of next two fields
+ * shouldn't change!! */
+ uint8_t clean_shutdown;
+ uint8_t dirty_flushed;
+ uint32_t magic_number;
+
+ uint32_t metadata_version;
+
+ /* Currently set cache mode */
+ ocf_cache_mode_t cache_mode;
+
+ char name[OCF_CACHE_NAME_SIZE];
+
+ ocf_cache_line_t cachelines;
+ uint32_t valid_parts_no;
+
+ ocf_cache_line_size_t line_size;
+ ocf_metadata_layout_t metadata_layout;
+ uint32_t core_count;
+
+ unsigned long valid_core_bitmap[(OCF_CORE_MAX /
+ (sizeof(unsigned long) * 8)) + 1];
+
+ ocf_cleaning_t cleaning_policy_type;
+ struct cleaning_policy_config cleaning[CLEANING_POLICY_TYPE_MAX];
+
+ ocf_promotion_t promotion_policy_type;
+ struct promotion_policy_config promotion[PROMOTION_POLICY_TYPE_MAX];
+
+ ocf_eviction_t eviction_policy_type;
+
+ /* Current core sequence number */
+ ocf_core_id_t curr_core_seq_no;
+
+ /*
+ * Checksum for each metadata region.
+ * This field has to be the last one!
+ */
+ uint32_t checksum[metadata_segment_max];
+};
+
+/**
+ * @brief OCF cache metadata runtime superblock
+ */
+struct ocf_superblock_runtime {
+ uint32_t cleaning_thread_access;
+};
+
+static inline void ocf_metadata_set_shutdown_status(ocf_cache_t cache,
+ enum ocf_metadata_shutdown_status shutdown_status,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cache->metadata.iface.set_shutdown_status(cache, shutdown_status,
+ cmpl, priv);
+}
+
+static inline void ocf_metadata_load_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cache->metadata.iface.load_superblock(cache, cmpl, priv);
+}
+
+static inline void ocf_metadata_flush_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ cache->metadata.iface.flush_superblock(cache, cmpl, priv);
+}
+
+#endif /* METADATA_SUPERBLOCK_H_ */
diff --git a/src/spdk/ocf/src/metadata/metadata_updater.c b/src/spdk/ocf/src/metadata/metadata_updater.c
new file mode 100644
index 000000000..bb720e303
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_updater.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "metadata.h"
+#include "metadata_io.h"
+#include "metadata_updater_priv.h"
+#include "../ocf_priv.h"
+#include "../engine/engine_common.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "../utils/utils_io.h"
+
+int ocf_metadata_updater_init(ocf_cache_t cache)
+{
+ ocf_metadata_updater_t mu = &cache->metadata_updater;
+ struct ocf_metadata_io_syncher *syncher = &mu->syncher;
+
+ INIT_LIST_HEAD(&syncher->in_progress_head);
+ INIT_LIST_HEAD(&syncher->pending_head);
+ env_mutex_init(&syncher->lock);
+
+ return ctx_metadata_updater_init(cache->owner, mu);
+}
+
+void ocf_metadata_updater_kick(ocf_cache_t cache)
+{
+ ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater);
+}
+
+void ocf_metadata_updater_stop(ocf_cache_t cache)
+{
+ ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater);
+ env_mutex_destroy(&cache->metadata_updater.syncher.lock);
+}
+
+void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv)
+{
+ OCF_CHECK_NULL(mu);
+ mu->priv = priv;
+}
+
+void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu)
+{
+ OCF_CHECK_NULL(mu);
+ return mu->priv;
+}
+
+ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu)
+{
+ OCF_CHECK_NULL(mu);
+ return container_of(mu, struct ocf_cache, metadata_updater);
+}
+
+static int _metadata_updater_iterate_in_progress(ocf_cache_t cache,
+ struct list_head *finished, struct metadata_io_request *new_req)
+{
+ struct ocf_metadata_io_syncher *syncher =
+ &cache->metadata_updater.syncher;
+ struct metadata_io_request *curr, *temp;
+
+ list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) {
+ if (env_atomic_read(&curr->finished)) {
+ list_move_tail(&curr->list, finished);
+ continue;
+ }
+ if (new_req) {
+ /* If request specified, check if overlap occurs. */
+ if (ocf_io_overlaps(new_req->page, new_req->count,
+ curr->page, curr->count)) {
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void metadata_updater_process_finished(struct list_head *finished)
+{
+ struct metadata_io_request *curr, *temp;
+
+ list_for_each_entry_safe(curr, temp, finished, list) {
+ list_del(&curr->list);
+ metadata_io_req_complete(curr);
+ }
+}
+
+void metadata_updater_submit(struct metadata_io_request *m_req)
+{
+ ocf_cache_t cache = m_req->cache;
+ struct ocf_metadata_io_syncher *syncher =
+ &cache->metadata_updater.syncher;
+ struct list_head finished;
+ int ret;
+
+ INIT_LIST_HEAD(&finished);
+
+ env_mutex_lock(&syncher->lock);
+
+ ret = _metadata_updater_iterate_in_progress(cache, &finished, m_req);
+
+ /* Either add it to in-progress list or pending list for deferred
+ * execution.
+ */
+ if (ret == 0)
+ list_add_tail(&m_req->list, &syncher->in_progress_head);
+ else
+ list_add_tail(&m_req->list, &syncher->pending_head);
+
+ env_mutex_unlock(&syncher->lock);
+
+ if (ret == 0)
+ ocf_engine_push_req_front(&m_req->req, true);
+
+ metadata_updater_process_finished(&finished);
+}
+
+uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
+{
+ struct metadata_io_request *curr, *temp;
+ struct ocf_metadata_io_syncher *syncher;
+ struct list_head finished;
+ ocf_cache_t cache;
+ int ret;
+
+ OCF_CHECK_NULL(mu);
+
+ INIT_LIST_HEAD(&finished);
+
+ cache = ocf_metadata_updater_get_cache(mu);
+ syncher = &cache->metadata_updater.syncher;
+
+ env_mutex_lock(&syncher->lock);
+ if (list_empty(&syncher->pending_head)) {
+ /*
+ * If pending list is empty, we iterate over in progress
+ * list to free memory used by finished requests.
+ */
+ _metadata_updater_iterate_in_progress(cache, &finished, NULL);
+ env_mutex_unlock(&syncher->lock);
+ metadata_updater_process_finished(&finished);
+ env_cond_resched();
+ return 0;
+ }
+ list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) {
+ ret = _metadata_updater_iterate_in_progress(cache, &finished, curr);
+ if (ret == 0) {
+ /* Move to in-progress list and kick the workers */
+ list_move_tail(&curr->list, &syncher->in_progress_head);
+ }
+ env_mutex_unlock(&syncher->lock);
+ metadata_updater_process_finished(&finished);
+ if (ret == 0)
+ ocf_engine_push_req_front(&curr->req, true);
+ env_cond_resched();
+ env_mutex_lock(&syncher->lock);
+ }
+ env_mutex_unlock(&syncher->lock);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/metadata/metadata_updater_priv.h b/src/spdk/ocf/src/metadata/metadata_updater_priv.h
new file mode 100644
index 000000000..0f2ee3058
--- /dev/null
+++ b/src/spdk/ocf/src/metadata/metadata_updater_priv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_UPDATER_PRIV_H__
+#define __METADATA_UPDATER_PRIV_H__
+
+#include "../ocf_def_priv.h"
+#include "metadata_io.h"
+
+struct ocf_metadata_updater {
+ /* Metadata flush synchronizer context */
+ struct ocf_metadata_io_syncher {
+ struct list_head in_progress_head;
+ struct list_head pending_head;
+ env_mutex lock;
+ } syncher;
+
+ void *priv;
+};
+
+
+void metadata_updater_submit(struct metadata_io_request *m_req);
+
+int ocf_metadata_updater_init(struct ocf_cache *cache);
+
+void ocf_metadata_updater_kick(struct ocf_cache *cache);
+
+void ocf_metadata_updater_stop(struct ocf_cache *cache);
+
+#endif /* __METADATA_UPDATER_PRIV_H__ */
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_cache.c b/src/spdk/ocf/src/mngt/ocf_mngt_cache.c
new file mode 100644
index 000000000..709e5569d
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_cache.c
@@ -0,0 +1,2557 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "ocf_mngt_core_priv.h"
+#include "../ocf_priv.h"
+#include "../ocf_core_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_pipeline.h"
+#include "../utils/utils_refcnt.h"
+#include "../utils/utils_async_lock.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../eviction/ops.h"
+#include "../ocf_ctx_priv.h"
+#include "../ocf_freelist.h"
+#include "../cleaning/cleaning.h"
+#include "../promotion/ops.h"
+
+#define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
+
+#define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
+ "previous cache state (Warning: data corruption may happen)" \
+ "\nOr initialize your cache using --force option. " \
+ "Warning: All dirty data will be lost!\n"
+
+#define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \
+ "Restart with --load or --force option\n"
+
+/**
+ * @brief Helpful struct to start cache
+ */
+struct ocf_cache_mngt_init_params {
+ bool metadata_volatile;
+
+ ocf_ctx_t ctx;
+ /*!< OCF context */
+
+ ocf_cache_t cache;
+ /*!< cache that is being initialized */
+
+ uint8_t locked;
+ /*!< Keep cache locked */
+
+ /**
+ * @brief initialization state (in case of error, it is used to know
+ * which assets have to be deallocated in premature exit from function
+ */
+ struct {
+ bool cache_alloc : 1;
+ /*!< cache is allocated and added to list */
+
+ bool metadata_inited : 1;
+ /*!< Metadata is inited to valid state */
+
+ bool added_to_list : 1;
+ /*!< Cache is added to context list */
+
+ bool cache_locked : 1;
+ /*!< Cache has been locked */
+ } flags;
+
+ struct ocf_metadata_init_params {
+ ocf_cache_line_size_t line_size;
+ /*!< Metadata cache line size */
+
+ ocf_metadata_layout_t layout;
+ /*!< Metadata layout (striping/sequential) */
+
+ ocf_cache_mode_t cache_mode;
+ /*!< cache mode */
+
+ ocf_promotion_t promotion_policy;
+ } metadata;
+};
+
+typedef void (*_ocf_mngt_cache_attach_end_t)(ocf_cache_t, void *priv1,
+ void *priv2, int error);
+
+struct ocf_cache_attach_context {
+ ocf_cache_t cache;
+ /*!< cache that is being initialized */
+
+ struct ocf_mngt_cache_device_config cfg;
+
+ uint64_t volume_size;
+ /*!< size of the device in cache lines */
+
+ enum ocf_mngt_cache_init_mode init_mode;
+ /*!< cache init mode */
+
+ /**
+ * @brief initialization state (in case of error, it is used to know
+ * which assets have to be deallocated in premature exit from function
+ */
+ struct {
+ bool device_alloc : 1;
+ /*!< data structure allocated */
+
+ bool volume_inited : 1;
+ /*!< uuid for cache device is allocated */
+
+ bool attached_metadata_inited : 1;
+ /*!< attached metadata sections initialized */
+
+ bool device_opened : 1;
+ /*!< underlying device volume is open */
+
+ bool cleaner_started : 1;
+ /*!< Cleaner has been started */
+
+ bool promotion_initialized : 1;
+ /*!< Promotion policy has been started */
+
+ bool cores_opened : 1;
+ /*!< underlying cores are opened (happens only during
+ * load or recovery
+ */
+
+ bool freelist_inited : 1;
+
+ bool concurrency_inited : 1;
+ } flags;
+
+ struct {
+ ocf_cache_line_size_t line_size;
+ /*!< Metadata cache line size */
+
+ ocf_metadata_layout_t layout;
+ /*!< Metadata layout (striping/sequential) */
+
+ ocf_cache_mode_t cache_mode;
+ /*!< cache mode */
+
+ enum ocf_metadata_shutdown_status shutdown_status;
+ /*!< dirty or clean */
+
+ uint8_t dirty_flushed;
+ /*!< is dirty data fully flushed */
+
+ int status;
+ /*!< metadata retrieval status (nonzero is sign of an error
+ * during recovery/load but is non issue in case of clean init
+ */
+ } metadata;
+
+ struct {
+ void *rw_buffer;
+ void *cmp_buffer;
+ unsigned long reserved_lba_addr;
+ ocf_pipeline_t pipeline;
+ } test;
+
+ _ocf_mngt_cache_attach_end_t cmpl;
+ void *priv1;
+ void *priv2;
+
+ ocf_pipeline_t pipeline;
+};
+
+static void __init_partitions(ocf_cache_t cache)
+{
+ ocf_part_id_t i_part;
+
+ /* Init default Partition */
+ ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_DEFAULT,
+ "unclassified", 0, PARTITION_SIZE_MAX,
+ OCF_IO_CLASS_PRIO_LOWEST, true));
+
+ /* Add other partition to the cache and make it as dummy */
+ for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
+ ocf_refcnt_freeze(&cache->refcnt.cleaning[i_part]);
+
+ if (i_part == PARTITION_DEFAULT)
+ continue;
+
+ /* Init default Partition */
+ ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, i_part,
+ "Inactive", 0, PARTITION_SIZE_MAX,
+ OCF_IO_CLASS_PRIO_LOWEST, false));
+ }
+}
+
+static void __init_partitions_attached(ocf_cache_t cache)
+{
+ ocf_part_id_t part_id;
+
+ for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) {
+ cache->user_parts[part_id].runtime->head =
+ cache->device->collision_table_entries;
+ cache->user_parts[part_id].runtime->curr_size = 0;
+
+ ocf_eviction_initialize(cache, part_id);
+ }
+}
+
+static void __init_freelist(ocf_cache_t cache)
+{
+ uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
+ ocf_get_cache_occupancy(cache);
+
+ ocf_freelist_populate(cache->freelist, free_clines);
+}
+
+static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
+{
+ ocf_cleaning_t cleaning_policy = ocf_cleaning_default;
+ int i;
+ ocf_error_t result = 0;
+
+ OCF_ASSERT_PLUGGED(cache);
+
+ for (i = 0; i < ocf_cleaning_max; i++) {
+ if (cleaning_policy_ops[i].setup)
+ cleaning_policy_ops[i].setup(cache);
+ }
+
+ cache->conf_meta->cleaning_policy_type = ocf_cleaning_default;
+ if (cleaning_policy_ops[cleaning_policy].initialize)
+ result = cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
+
+ return result;
+}
+
+static void __deinit_cleaning_policy(ocf_cache_t cache)
+{
+ ocf_cleaning_t cleaning_policy;
+
+ cleaning_policy = cache->conf_meta->cleaning_policy_type;
+ if (cleaning_policy_ops[cleaning_policy].deinitialize)
+ cleaning_policy_ops[cleaning_policy].deinitialize(cache);
+}
+
+static void __init_eviction_policy(ocf_cache_t cache,
+ ocf_eviction_t eviction)
+{
+ ENV_BUG_ON(eviction < 0 || eviction >= ocf_eviction_max);
+
+ cache->conf_meta->eviction_policy_type = eviction;
+}
+
+static void __setup_promotion_policy(ocf_cache_t cache)
+{
+ int i;
+
+ OCF_CHECK_NULL(cache);
+
+ for (i = 0; i < ocf_promotion_max; i++) {
+ if (ocf_promotion_policies[i].setup)
+ ocf_promotion_policies[i].setup(cache);
+ }
+}
+
+static void __deinit_promotion_policy(ocf_cache_t cache)
+{
+ ocf_promotion_deinit(cache->promotion_policy);
+ cache->promotion_policy = NULL;
+}
+
+static void __init_cores(ocf_cache_t cache)
+{
+ /* No core devices yet */
+ cache->conf_meta->core_count = 0;
+ ENV_BUG_ON(env_memset(cache->conf_meta->valid_core_bitmap,
+ sizeof(cache->conf_meta->valid_core_bitmap), 0));
+}
+
+static void __init_metadata_version(ocf_cache_t cache)
+{
+ cache->conf_meta->metadata_version = METADATA_VERSION();
+}
+
+static void __reset_stats(ocf_cache_t cache)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ ocf_part_id_t i;
+
+ for_each_core_all(cache, core, core_id) {
+ env_atomic_set(&core->runtime_meta->cached_clines, 0);
+ env_atomic_set(&core->runtime_meta->dirty_clines, 0);
+ env_atomic64_set(&core->runtime_meta->dirty_since, 0);
+
+ for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
+ env_atomic_set(&core->runtime_meta->
+ part_counters[i].cached_clines, 0);
+ env_atomic_set(&core->runtime_meta->
+ part_counters[i].dirty_clines, 0);
+ }
+ }
+}
+
+static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
+ ocf_eviction_t eviction_policy)
+{
+ ocf_error_t result;
+
+ /* Lock to ensure consistency */
+
+ ocf_metadata_init_hash_table(cache);
+ ocf_metadata_init_collision(cache);
+ __init_partitions_attached(cache);
+ __init_freelist(cache);
+
+ result = __init_cleaning_policy(cache);
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Cannot initialize cleaning policy\n");
+ return result;
+ }
+
+ __init_eviction_policy(cache, eviction_policy);
+ __setup_promotion_policy(cache);
+
+ return 0;
+}
+
+static void init_attached_data_structures_recovery(ocf_cache_t cache)
+{
+ ocf_metadata_init_hash_table(cache);
+ ocf_metadata_init_collision(cache);
+ __init_partitions_attached(cache);
+ __reset_stats(cache);
+ __init_metadata_version(cache);
+}
+
+/****************************************************************
+ * Function for removing all uninitialized core objects *
+ * from the cache instance. *
+ * Used in case of cache initialization errors. *
+ ****************************************************************/
+static void _ocf_mngt_close_all_uninitialized_cores(
+ ocf_cache_t cache)
+{
+ ocf_volume_t volume;
+ int j, i;
+
+ for (j = cache->conf_meta->core_count, i = 0; j > 0; ++i) {
+ if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
+ continue;
+
+ volume = &(cache->core[i].volume);
+ ocf_volume_close(volume);
+
+ --j;
+
+ env_free(cache->core[i].counters);
+ cache->core[i].counters = NULL;
+
+ env_bit_clear(i, cache->conf_meta->valid_core_bitmap);
+ }
+
+ cache->conf_meta->core_count = 0;
+}
+
+/**
+ * @brief routine loading metadata from cache device
+ * - attempts to open all the underlying cores
+ */
+static int _ocf_mngt_init_instance_add_cores(
+ struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ int ret = -1;
+ uint64_t hd_lines = 0;
+
+ OCF_ASSERT_PLUGGED(cache);
+
+ /* Count value will be re-calculated on the basis of 'valid' flag */
+ cache->conf_meta->core_count = 0;
+
+ /* Check in metadata which cores were saved in cache metadata */
+ for_each_core_metadata(cache, core, core_id) {
+ ocf_volume_t tvolume = NULL;
+
+ if (!core->volume.type)
+ goto err;
+
+ tvolume = ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache),
+ &core->volume.uuid, core->volume.type);
+ if (tvolume) {
+ /*
+ * Attach bottom device to core structure
+ * in cache
+ */
+ ocf_volume_move(&core->volume, tvolume);
+ ocf_mngt_core_pool_remove(cache->owner, tvolume);
+
+ core->opened = true;
+ ocf_cache_log(cache, log_info,
+ "Attached core %u from pool\n",
+ core_id);
+ } else if (context->cfg.open_cores) {
+ ret = ocf_volume_open(&core->volume, NULL);
+ if (ret == -OCF_ERR_NOT_OPEN_EXC) {
+ ocf_cache_log(cache, log_warn,
+ "Cannot open core %u. "
+ "Cache is busy", core_id);
+ } else if (ret) {
+ ocf_cache_log(cache, log_warn,
+ "Cannot open core %u", core_id);
+ } else {
+ core->opened = true;
+ }
+ }
+
+ env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
+ core->added = true;
+ cache->conf_meta->core_count++;
+ core->volume.cache = cache;
+
+ if (ocf_mngt_core_init_front_volume(core))
+ goto err;
+
+ core->counters =
+ env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
+ if (!core->counters)
+ goto err;
+
+ if (!core->opened) {
+ env_bit_set(ocf_cache_state_incomplete,
+ &cache->cache_state);
+ cache->ocf_core_inactive_count++;
+ ocf_cache_log(cache, log_warn,
+ "Cannot find core %u in pool"
+ ", core added as inactive\n", core_id);
+ continue;
+ }
+
+ hd_lines = ocf_bytes_2_lines(cache,
+ ocf_volume_get_length(&core->volume));
+
+ if (hd_lines) {
+ ocf_cache_log(cache, log_info,
+ "Disk lines = %" ENV_PRIu64 "\n", hd_lines);
+ }
+ }
+
+ context->flags.cores_opened = true;
+ return 0;
+
+err:
+ _ocf_mngt_close_all_uninitialized_cores(cache);
+
+ return -OCF_ERR_START_CACHE_FAIL;
+}
+
+void _ocf_mngt_init_instance_load_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_cleaning_t cleaning_policy;
+ ocf_error_t result;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Cannot read cache metadata\n");
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
+ }
+
+ __init_freelist(cache);
+
+ cleaning_policy = cache->conf_meta->cleaning_policy_type;
+ if (!cleaning_policy_ops[cleaning_policy].initialize)
+ goto out;
+
+ if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
+ result = cleaning_policy_ops[cleaning_policy].initialize(cache, 0);
+ else
+ result = cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
+
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Cannot initialize cleaning policy\n");
+ OCF_PL_FINISH_RET(context->pipeline, result);
+ }
+
+out:
+ ocf_pipeline_next(context->pipeline);
+}
+
+/**
+ * handle load variant
+ */
+static void _ocf_mngt_init_instance_clean_load(
+ struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+
+ ocf_metadata_load_all(cache,
+ _ocf_mngt_init_instance_load_complete, context);
+}
+
+/**
+ * handle recovery variant
+ */
+static void _ocf_mngt_init_instance_recovery(
+ struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+
+ init_attached_data_structures_recovery(cache);
+
+ ocf_cache_log(cache, log_warn,
+ "ERROR: Cache device did not shut down properly!\n");
+
+ ocf_cache_log(cache, log_info, "Initiating recovery sequence...\n");
+
+ ocf_metadata_load_recovery(cache,
+ _ocf_mngt_init_instance_load_complete, context);
+}
+
+static void _ocf_mngt_init_instance_load(
+ struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+ int ret;
+
+ OCF_ASSERT_PLUGGED(cache);
+
+ ret = _ocf_mngt_init_instance_add_cores(context);
+ if (ret)
+ OCF_PL_FINISH_RET(context->pipeline, ret);
+
+ if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
+ _ocf_mngt_init_instance_clean_load(context);
+ else
+ _ocf_mngt_init_instance_recovery(context);
+}
+
+/**
+ * @brief allocate memory for new cache, add it to cache queue, set initial
+ * values and running state
+ */
+static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
+{
+ ocf_cache_t cache = env_vzalloc(sizeof(*cache));
+ int result;
+
+ if (!cache)
+ return -OCF_ERR_NO_MEM;
+
+ if (ocf_mngt_cache_lock_init(cache)) {
+ result = -OCF_ERR_NO_MEM;
+ goto alloc_err;
+ }
+
+ /* Lock cache during setup - this trylock should always succeed */
+ ENV_BUG_ON(ocf_mngt_cache_trylock(cache));
+
+ if (env_mutex_init(&cache->flush_mutex)) {
+ result = -OCF_ERR_NO_MEM;
+ goto lock_err;
+ }
+
+ ENV_BUG_ON(!ocf_refcnt_inc(&cache->refcnt.cache));
+
+ /* start with freezed metadata ref counter to indicate detached device*/
+ ocf_refcnt_freeze(&cache->refcnt.metadata);
+
+ env_atomic_set(&(cache->last_access_ms),
+ env_ticks_to_msecs(env_get_tick_count()));
+
+ env_bit_set(ocf_cache_state_initializing, &cache->cache_state);
+
+ params->cache = cache;
+ params->flags.cache_alloc = true;
+
+ return 0;
+
+lock_err:
+ ocf_mngt_cache_lock_deinit(cache);
+alloc_err:
+ env_vfree(cache);
+
+ return result;
+}
+
+static void _ocf_mngt_attach_cache_device(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_volume_type_t type;
+ int ret;
+
+ cache->device = env_vzalloc(sizeof(*cache->device));
+ if (!cache->device)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
+
+ context->flags.device_alloc = true;
+
+ cache->device->init_mode = context->init_mode;
+
+ /* Prepare UUID of cache volume */
+ type = ocf_ctx_get_volume_type(cache->owner, context->cfg.volume_type);
+ if (!type) {
+ OCF_PL_FINISH_RET(context->pipeline,
+ -OCF_ERR_INVAL_VOLUME_TYPE);
+ }
+
+ ret = ocf_volume_init(&cache->device->volume, type,
+ &context->cfg.uuid, true);
+ if (ret)
+ OCF_PL_FINISH_RET(context->pipeline, ret);
+
+ cache->device->volume.cache = cache;
+ context->flags.volume_inited = true;
+
+ /*
+ * Open cache device, It has to be done first because metadata service
+ * need to know size of cache device.
+ */
+ ret = ocf_volume_open(&cache->device->volume,
+ context->cfg.volume_params);
+ if (ret) {
+ ocf_cache_log(cache, log_err, "ERROR: Cache not available\n");
+ OCF_PL_FINISH_RET(context->pipeline, ret);
+ }
+ context->flags.device_opened = true;
+
+ context->volume_size = ocf_volume_get_length(&cache->device->volume);
+
+ /* Check minimum size of cache device */
+ if (context->volume_size < OCF_CACHE_SIZE_MIN) {
+ ocf_cache_log(cache, log_err, "ERROR: Cache cache size must "
+ "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN / MiB);
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_INVAL_CACHE_DEV);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+/**
+ * @brief prepare cache for init. This is first step towards initializing
+ * the cache
+ */
+static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param,
+ struct ocf_mngt_cache_config *cfg)
+{
+ ocf_cache_t cache;
+ int ret = 0;
+
+ /* Check if cache with specified name exists */
+ ret = ocf_mngt_cache_get_by_name(param->ctx, cfg->name,
+ OCF_CACHE_NAME_SIZE, &cache);
+ if (!ret) {
+ ocf_mngt_cache_put(cache);
+ /* Cache already exist */
+ ret = -OCF_ERR_CACHE_EXIST;
+ goto out;
+ }
+
+ ocf_log(param->ctx, log_info, "Inserting cache %s\n", cfg->name);
+
+ ret = _ocf_mngt_init_new_cache(param);
+ if (ret)
+ goto out;
+
+ cache = param->cache;
+
+ cache->backfill.max_queue_size = cfg->backfill.max_queue_size;
+ cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size;
+
+ param->flags.cache_locked = true;
+
+ cache->pt_unaligned_io = cfg->pt_unaligned_io;
+ cache->use_submit_io_fast = cfg->use_submit_io_fast;
+
+ cache->eviction_policy_init = cfg->eviction_policy;
+ cache->metadata.is_volatile = cfg->metadata_volatile;
+
+out:
+ return ret;
+}
+
+static void _ocf_mngt_test_volume_initial_write_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->test.pipeline, error);
+}
+
+static void _ocf_mngt_test_volume_initial_write(
+ ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /*
+ * Write buffer filled with "1"
+ */
+
+ ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
+
+ ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
+ OCF_WRITE, context->test.rw_buffer,
+ _ocf_mngt_test_volume_initial_write_complete, context);
+}
+
+static void _ocf_mngt_test_volume_first_read_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int ret, diff;
+
+ if (error)
+ OCF_PL_FINISH_RET(context->test.pipeline, error);
+
+ ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
+ context->test.cmp_buffer, PAGE_SIZE, &diff);
+ if (ret)
+ OCF_PL_FINISH_RET(context->test.pipeline, ret);
+
+ if (diff) {
+ /* we read back different data than what we had just
+ written - this is fatal error */
+ OCF_PL_FINISH_RET(context->test.pipeline, -OCF_ERR_IO);
+ }
+
+ if (!ocf_volume_is_atomic(&cache->device->volume)) {
+ /* If not atomic, stop testing here */
+ OCF_PL_FINISH_RET(context->test.pipeline, 0);
+ }
+
+ ocf_pipeline_next(context->test.pipeline);
+}
+
+static void _ocf_mngt_test_volume_first_read(
+ ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /*
+ * First read
+ */
+
+ ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 0));
+ ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 1));
+
+ ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
+ OCF_READ, context->test.rw_buffer,
+ _ocf_mngt_test_volume_first_read_complete, context);
+}
+
+static void _ocf_mngt_test_volume_discard_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->test.pipeline, error);
+}
+
+static void _ocf_mngt_test_volume_discard(
+ ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /*
+ * Submit discard request
+ */
+
+ ocf_submit_volume_discard(&cache->device->volume,
+ context->test.reserved_lba_addr, PAGE_SIZE,
+ _ocf_mngt_test_volume_discard_complete, context);
+}
+
+static void _ocf_mngt_test_volume_second_read_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int ret, diff;
+
+ if (error)
+ OCF_PL_FINISH_RET(context->test.pipeline, error);
+
+ ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
+ context->test.cmp_buffer, PAGE_SIZE, &diff);
+ if (ret)
+ OCF_PL_FINISH_RET(context->test.pipeline, ret);
+
+ if (diff) {
+ /* discard does not cause target adresses to return 0 on
+ subsequent read */
+ cache->device->volume.features.discard_zeroes = 0;
+ }
+
+ ocf_pipeline_next(context->test.pipeline);
+}
+
+static void _ocf_mngt_test_volume_second_read(
+ ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /*
+ * Second read
+ */
+
+ ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
+ ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 0));
+
+ ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
+ OCF_READ, context->test.rw_buffer,
+ _ocf_mngt_test_volume_second_read_complete, context);
+}
+
+static void _ocf_mngt_test_volume_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+
+ env_free(context->test.rw_buffer);
+ env_free(context->test.cmp_buffer);
+
+ ocf_pipeline_destroy(context->test.pipeline);
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+struct ocf_pipeline_properties _ocf_mngt_test_volume_pipeline_properties = {
+ .priv_size = 0,
+ .finish = _ocf_mngt_test_volume_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_test_volume_initial_write),
+ OCF_PL_STEP(_ocf_mngt_test_volume_first_read),
+ OCF_PL_STEP(_ocf_mngt_test_volume_discard),
+ OCF_PL_STEP(_ocf_mngt_test_volume_second_read),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+static void _ocf_mngt_test_volume(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_pipeline_t test_pipeline;
+ int result;
+
+ cache->device->volume.features.discard_zeroes = 1;
+
+ if (!context->cfg.perform_test)
+ OCF_PL_NEXT_RET(pipeline);
+
+ context->test.reserved_lba_addr = ocf_metadata_get_reserved_lba(cache);
+
+ context->test.rw_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ if (!context->test.rw_buffer)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
+
+ context->test.cmp_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ if (!context->test.cmp_buffer)
+ goto err_buffer;
+
+ result = ocf_pipeline_create(&test_pipeline, cache,
+ &_ocf_mngt_test_volume_pipeline_properties);
+ if (result)
+ goto err_pipeline;
+
+ ocf_pipeline_set_priv(test_pipeline, context);
+
+ context->test.pipeline = test_pipeline;
+
+ OCF_PL_NEXT_RET(test_pipeline);
+
+err_pipeline:
+ env_free(context->test.rw_buffer);
+err_buffer:
+ env_free(context->test.cmp_buffer);
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
+}
+
+/**
+ * Prepare metadata accordingly to mode (for load/recovery read from disk)
+ */
+static void _ocf_mngt_attach_load_properties_end(void *priv, int error,
+ struct ocf_metadata_load_properties *properties)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ context->metadata.status = error;
+
+ if (error) {
+ /*
+ * If --load option wasn't used and old metadata doesn't exist on the
+ * device, dismiss error.
+ */
+ if (error == -OCF_ERR_NO_METADATA &&
+ cache->device->init_mode != ocf_init_mode_load)
+ OCF_PL_NEXT_RET(context->pipeline);
+ else
+ OCF_PL_FINISH_RET(context->pipeline, error);
+ } else if (cache->device->init_mode != ocf_init_mode_load) {
+ /*
+ * To prevent silent metadata overriding, return error if old metadata
+ * was detected but --load flag wasn't used.
+ */
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_METADATA_FOUND);
+ }
+
+ /*
+ * Check if name loaded from disk is the same as present one.
+ */
+ if (env_strncmp(cache->conf_meta->name, OCF_CACHE_NAME_SIZE,
+ properties->cache_name, OCF_CACHE_NAME_SIZE)) {
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CACHE_NAME_MISMATCH);
+ }
+
+ context->metadata.shutdown_status = properties->shutdown_status;
+ context->metadata.dirty_flushed = properties->dirty_flushed;
+
+ if (cache->device->init_mode == ocf_init_mode_load) {
+ context->metadata.line_size = properties->line_size;
+ cache->conf_meta->metadata_layout = properties->layout;
+ cache->conf_meta->cache_mode = properties->cache_mode;
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_attach_load_properties(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ OCF_ASSERT_PLUGGED(cache);
+
+ context->metadata.shutdown_status = ocf_metadata_clean_shutdown;
+ context->metadata.dirty_flushed = DIRTY_FLUSHED;
+ context->metadata.line_size = context->cfg.cache_line_size;
+
+ if (context->cfg.force)
+ OCF_PL_NEXT_RET(context->pipeline);
+
+ if (cache->device->init_mode == ocf_init_mode_metadata_volatile)
+ OCF_PL_NEXT_RET(context->pipeline);
+
+ ocf_metadata_load_properties(&cache->device->volume,
+ _ocf_mngt_attach_load_properties_end, context);
+}
+
+static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int ret;
+
+ if (context->init_mode == ocf_init_mode_load &&
+ context->metadata.status) {
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
+ }
+
+ context->metadata.line_size = context->metadata.line_size ?:
+ cache->metadata.settings.size;
+
+ /*
+ * Initialize variable size metadata segments
+ */
+ if (ocf_metadata_init_variable_size(cache, context->volume_size,
+ context->metadata.line_size,
+ cache->conf_meta->metadata_layout)) {
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
+ }
+ context->flags.attached_metadata_inited = true;
+
+ cache->freelist = ocf_freelist_init(cache);
+ if (!cache->freelist)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
+ context->flags.freelist_inited = true;
+
+ ret = ocf_concurrency_init(cache);
+ if (ret)
+ OCF_PL_FINISH_RET(context->pipeline, ret);
+
+ context->flags.concurrency_inited = 1;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+/**
+ * @brief initializing cache anew (not loading or recovering)
+ */
+static void _ocf_mngt_init_instance_init(struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+ ocf_error_t result;
+
+ if (!context->metadata.status && !context->cfg.force &&
+ context->metadata.shutdown_status !=
+ ocf_metadata_detached) {
+
+ if (context->metadata.shutdown_status !=
+ ocf_metadata_clean_shutdown) {
+ ocf_cache_log(cache, log_err, DIRTY_SHUTDOWN_ERROR_MSG);
+ OCF_PL_FINISH_RET(context->pipeline,
+ -OCF_ERR_DIRTY_SHUTDOWN);
+ }
+
+ if (context->metadata.dirty_flushed == DIRTY_NOT_FLUSHED) {
+ ocf_cache_log(cache, log_err,
+ DIRTY_NOT_FLUSHED_ERROR_MSG);
+ OCF_PL_FINISH_RET(context->pipeline,
+ -OCF_ERR_DIRTY_EXISTS);
+
+ }
+ }
+
+ result = init_attached_data_structures(cache, cache->eviction_policy_init);
+ if (result)
+ OCF_PL_FINISH_RET(context->pipeline, result);
+
+ /* In initial cache state there is no dirty data, so all dirty data is
+ considered to be flushed
+ */
+ cache->conf_meta->dirty_flushed = true;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+uint64_t _ocf_mngt_calculate_ram_needed(ocf_cache_t cache,
+ ocf_volume_t cache_volume)
+{
+ ocf_cache_line_size_t line_size = ocf_line_size(cache);
+ uint64_t volume_size = ocf_volume_get_length(cache_volume);
+ uint64_t const_data_size;
+ uint64_t cache_line_no;
+ uint64_t data_per_line;
+ uint64_t min_free_ram;
+
+ /* Superblock + per core metadata */
+ const_data_size = 100 * MiB;
+
+ /* Cache metadata */
+ cache_line_no = volume_size / line_size;
+ data_per_line = (68 + (2 * (line_size / KiB / 4)));
+
+ min_free_ram = const_data_size + cache_line_no * data_per_line;
+
+ /* 110% of calculated value */
+ min_free_ram = (11 * min_free_ram) / 10;
+
+ return min_free_ram;
+}
+
+int ocf_mngt_get_ram_needed(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg, uint64_t *ram_needed)
+{
+ ocf_volume_t volume;
+ ocf_volume_type_t type;
+ int result;
+
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(cfg);
+ OCF_CHECK_NULL(ram_needed);
+
+ type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
+ if (!type)
+ return -OCF_ERR_INVAL_VOLUME_TYPE;
+
+ result = ocf_volume_create(&volume, type,
+ &cfg->uuid);
+ if (result)
+ return result;
+
+ result = ocf_volume_open(volume, cfg->volume_params);
+ if (result) {
+ ocf_volume_destroy(volume);
+ return result;
+ }
+
+ *ram_needed = _ocf_mngt_calculate_ram_needed(cache, volume);
+
+ ocf_volume_close(volume);
+ ocf_volume_destroy(volume);
+
+ return 0;
+}
+
+/**
+ * @brief for error handling do partial cleanup of datastructures upon
+ * premature function exit.
+ *
+ * @param ctx OCF context
+ * @param params - startup params containing initialization status flags.
+ *
+ */
+static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
+ struct ocf_cache_mngt_init_params *params)
+{
+ ocf_cache_t cache = params->cache;
+
+ if (!params->flags.cache_alloc)
+ return;
+
+ if (params->flags.metadata_inited)
+ ocf_metadata_deinit(cache);
+
+ if (!params->flags.added_to_list)
+ return;
+
+ env_rmutex_lock(&ctx->lock);
+
+ list_del(&cache->list);
+ env_vfree(cache);
+
+ env_rmutex_unlock(&ctx->lock);
+}
+
+static void _ocf_mngt_attach_handle_error(
+ struct ocf_cache_attach_context *context)
+{
+ ocf_cache_t cache = context->cache;
+
+ if (context->flags.cleaner_started)
+ ocf_stop_cleaner(cache);
+
+ if (context->flags.promotion_initialized)
+ __deinit_promotion_policy(cache);
+
+ if (context->flags.cores_opened)
+ _ocf_mngt_close_all_uninitialized_cores(cache);
+
+ if (context->flags.attached_metadata_inited)
+ ocf_metadata_deinit_variable_size(cache);
+
+ if (context->flags.device_opened)
+ ocf_volume_close(&cache->device->volume);
+
+ if (context->flags.concurrency_inited)
+ ocf_concurrency_deinit(cache);
+
+ if (context->flags.freelist_inited)
+ ocf_freelist_deinit(cache->freelist);
+
+ if (context->flags.volume_inited)
+ ocf_volume_deinit(&cache->device->volume);
+
+ if (context->flags.device_alloc)
+ env_vfree(cache->device);
+
+ ocf_pipeline_destroy(cache->stop_pipeline);
+}
+
+static void _ocf_mngt_cache_init(ocf_cache_t cache,
+ struct ocf_cache_mngt_init_params *params)
+{
+ /*
+ * Super block elements initialization
+ */
+ cache->conf_meta->cache_mode = params->metadata.cache_mode;
+ cache->conf_meta->metadata_layout = params->metadata.layout;
+ cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy;
+
+ INIT_LIST_HEAD(&cache->io_queues);
+
+ /* Init Partitions */
+ ocf_part_init(cache);
+
+ __init_cores(cache);
+ __init_metadata_version(cache);
+ __init_partitions(cache);
+}
+
+static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
+ struct ocf_mngt_cache_config *cfg)
+{
+ struct ocf_cache_mngt_init_params params;
+ ocf_cache_t tmp_cache;
+ int result;
+
+ ENV_BUG_ON(env_memset(&params, sizeof(params), 0));
+
+ params.ctx = ctx;
+ params.metadata.cache_mode = cfg->cache_mode;
+ params.metadata.layout = cfg->metadata_layout;
+ params.metadata.line_size = cfg->cache_line_size;
+ params.metadata_volatile = cfg->metadata_volatile;
+ params.metadata.promotion_policy = cfg->promotion_policy;
+ params.locked = cfg->locked;
+
+ result = env_rmutex_lock_interruptible(&ctx->lock);
+ if (result)
+ goto _cache_mngt_init_instance_ERROR;
+
+ /* Prepare cache */
+ result = _ocf_mngt_init_prepare_cache(&params, cfg);
+ if (result) {
+ env_rmutex_unlock(&ctx->lock);
+ goto _cache_mngt_init_instance_ERROR;
+ }
+
+ tmp_cache = params.cache;
+ tmp_cache->owner = ctx;
+
+ /*
+ * Initialize metadata selected segments of metadata in memory
+ */
+ result = ocf_metadata_init(tmp_cache, params.metadata.line_size);
+ if (result) {
+ env_rmutex_unlock(&ctx->lock);
+ result = -OCF_ERR_NO_MEM;
+ goto _cache_mngt_init_instance_ERROR;
+ }
+ params.flags.metadata_inited = true;
+
+ result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE);
+ if (result) {
+ env_rmutex_unlock(&ctx->lock);
+ goto _cache_mngt_init_instance_ERROR;
+ }
+
+ list_add_tail(&tmp_cache->list, &ctx->caches);
+ params.flags.added_to_list = true;
+ env_rmutex_unlock(&ctx->lock);
+
+ result = ocf_metadata_io_init(tmp_cache);
+ if (result)
+ goto _cache_mngt_init_instance_ERROR;
+
+ ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
+
+ _ocf_mngt_cache_init(tmp_cache, &params);
+
+ ocf_ctx_get(ctx);
+
+ if (!params.locked) {
+ /* User did not request to lock cache instance after creation -
+ unlock it here since we have acquired the lock to
+ perform management operations. */
+ ocf_mngt_cache_unlock(tmp_cache);
+ params.flags.cache_locked = false;
+ }
+
+ *cache = tmp_cache;
+
+ return 0;
+
+_cache_mngt_init_instance_ERROR:
+ _ocf_mngt_init_handle_error(ctx, &params);
+ *cache = NULL;
+ return result;
+}
+
+static void _ocf_mngt_cache_set_valid(ocf_cache_t cache)
+{
+ /*
+ * Clear initialization state and set the valid bit so we know
+ * its in use.
+ */
+ env_bit_clear(ocf_cache_state_initializing, &cache->cache_state);
+ env_bit_set(ocf_cache_state_running, &cache->cache_state);
+}
+
+static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache)
+{
+ env_atomic_set(&cache->fallback_pt_error_counter, 0);
+}
+
+static void _ocf_mngt_attach_check_ram(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ uint64_t min_free_ram;
+ uint64_t free_ram;
+
+ min_free_ram = _ocf_mngt_calculate_ram_needed(cache,
+ &cache->device->volume);
+
+ free_ram = env_get_free_memory();
+
+ if (free_ram < min_free_ram) {
+ ocf_cache_log(cache, log_err, "Not enough free RAM for cache "
+ "metadata to start cache\n");
+ ocf_cache_log(cache, log_err,
+ "Available RAM: %" ENV_PRIu64 " B\n", free_ram);
+ ocf_cache_log(cache, log_err, "Needed RAM: %" ENV_PRIu64 " B\n",
+ min_free_ram);
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_NO_FREE_RAM);
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+
+static void _ocf_mngt_attach_load_superblock_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (cache->conf_meta->cachelines !=
+ ocf_metadata_get_cachelines_count(cache)) {
+ ocf_cache_log(cache, log_err,
+ "ERROR: Cache device size mismatch!\n");
+ OCF_PL_FINISH_RET(context->pipeline,
+ -OCF_ERR_START_CACHE_FAIL);
+ }
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "ERROR: Cannot load cache state\n");
+ OCF_PL_FINISH_RET(context->pipeline,
+ -OCF_ERR_START_CACHE_FAIL);
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_attach_load_superblock(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (cache->device->init_mode != ocf_init_mode_load)
+ OCF_PL_NEXT_RET(context->pipeline);
+
+ ocf_cache_log(cache, log_info, "Loading cache state...\n");
+ ocf_metadata_load_superblock(cache,
+ _ocf_mngt_attach_load_superblock_complete, context);
+}
+
+static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int result;
+
+ result = ocf_start_cleaner(cache);
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Error while starting cleaner\n");
+ OCF_PL_FINISH_RET(context->pipeline, result);
+ }
+ context->flags.cleaner_started = true;
+
+ result = ocf_promotion_init(cache, cache->conf_meta->promotion_policy_type);
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Cannot initialize promotion policy\n");
+ OCF_PL_FINISH_RET(context->pipeline, result);
+ }
+ context->flags.promotion_initialized = true;
+
+ switch (cache->device->init_mode) {
+ case ocf_init_mode_init:
+ case ocf_init_mode_metadata_volatile:
+ _ocf_mngt_init_instance_init(context);
+ return;
+ case ocf_init_mode_load:
+ _ocf_mngt_init_instance_load(context);
+ return;
+ default:
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_INVAL);
+ }
+}
+
+static void _ocf_mngt_attach_flush_metadata_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "ERROR: Cannot save cache state\n");
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_attach_flush_metadata(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_metadata_flush_all(cache,
+ _ocf_mngt_attach_flush_metadata_complete, context);
+}
+
+static void _ocf_mngt_attach_discard_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ bool discard = cache->device->volume.features.discard_zeroes;
+
+ if (error) {
+ ocf_cache_log(cache, log_warn, "%s failed\n",
+ discard ? "Discarding whole cache device" :
+ "Overwriting cache with zeroes");
+
+ if (ocf_volume_is_atomic(&cache->device->volume)) {
+ ocf_cache_log(cache, log_err, "This step is required"
+ " for atomic mode!\n");
+ OCF_PL_FINISH_RET(context->pipeline, error);
+ }
+
+ ocf_cache_log(cache, log_warn, "This may impact cache"
+ " performance!\n");
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ uint64_t addr = cache->device->metadata_offset;
+ uint64_t length = ocf_volume_get_length(&cache->device->volume) - addr;
+ bool discard = cache->device->volume.features.discard_zeroes;
+
+ if (cache->device->init_mode == ocf_init_mode_load)
+ OCF_PL_NEXT_RET(context->pipeline);
+
+ if (!context->cfg.discard_on_start)
+ OCF_PL_NEXT_RET(context->pipeline);
+
+ if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
+ /* discard doesn't zero data - need to explicitly write zeros */
+ ocf_submit_write_zeros(&cache->device->volume, addr, length,
+ _ocf_mngt_attach_discard_complete, context);
+ } else {
+ /* Discard volume after metadata */
+ ocf_submit_volume_discard(&cache->device->volume, addr, length,
+ _ocf_mngt_attach_discard_complete, context);
+ }
+}
+
+static void _ocf_mngt_attach_flush_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ bool discard = cache->device->volume.features.discard_zeroes;
+
+ if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
+ ocf_submit_volume_flush(&cache->device->volume,
+ _ocf_mngt_attach_flush_complete, context);
+ } else {
+ ocf_pipeline_next(context->pipeline);
+ }
+}
+
+static void _ocf_mngt_attach_shutdown_status_complete(void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "Cannot flush shutdown status\n");
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_attach_shutdown_status(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /* clear clean shutdown status */
+ ocf_metadata_set_shutdown_status(cache, ocf_metadata_dirty_shutdown,
+ _ocf_mngt_attach_shutdown_status_complete, context);
+}
+
+static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_attach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_cleaner_refcnt_unfreeze(cache);
+ ocf_refcnt_unfreeze(&cache->refcnt.metadata);
+
+ ocf_cache_log(cache, log_debug, "Cache attached\n");
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_cache_attach_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_cache_attach_context *context = priv;
+
+ if (error)
+ _ocf_mngt_attach_handle_error(context);
+
+ context->cmpl(context->cache, context->priv1, context->priv2, error);
+
+ env_vfree(context->cfg.uuid.data);
+ ocf_pipeline_destroy(context->pipeline);
+}
+
+struct ocf_pipeline_properties _ocf_mngt_cache_attach_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_cache_attach_context),
+ .finish = _ocf_mngt_cache_attach_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_attach_cache_device),
+ OCF_PL_STEP(_ocf_mngt_attach_check_ram),
+ OCF_PL_STEP(_ocf_mngt_attach_load_properties),
+ OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata),
+ OCF_PL_STEP(_ocf_mngt_test_volume),
+ OCF_PL_STEP(_ocf_mngt_attach_load_superblock),
+ OCF_PL_STEP(_ocf_mngt_attach_init_instance),
+ OCF_PL_STEP(_ocf_mngt_attach_flush_metadata),
+ OCF_PL_STEP(_ocf_mngt_attach_discard),
+ OCF_PL_STEP(_ocf_mngt_attach_flush),
+ OCF_PL_STEP(_ocf_mngt_attach_shutdown_status),
+ OCF_PL_STEP(_ocf_mngt_attach_post_init),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+typedef void (*_ocf_mngt_cache_unplug_end_t)(void *context, int error);
+
+struct _ocf_mngt_cache_unplug_context {
+ _ocf_mngt_cache_unplug_end_t cmpl;
+ void *priv;
+ ocf_cache_t cache;
+};
+
+struct ocf_mngt_cache_stop_context {
+ /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
+ * it is member of stop context only to reserve memory in advance for
+ * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
+ * at the point where we are effectively unable to handle it */
+ struct _ocf_mngt_cache_unplug_context unplug_context;
+
+ ocf_mngt_cache_stop_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ ocf_ctx_t ctx;
+ char cache_name[OCF_CACHE_NAME_SIZE];
+ int cache_write_error;
+};
+
+static void ocf_mngt_cache_stop_wait_metadata_io_finish(void *priv)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_stop_wait_metadata_io(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_refcnt_freeze(&cache->refcnt.metadata);
+ ocf_refcnt_register_zero_cb(&cache->refcnt.metadata,
+ ocf_mngt_cache_stop_wait_metadata_io_finish, context);
+}
+
+static void _ocf_mngt_cache_stop_remove_cores(ocf_cache_t cache, bool attached)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ int no = cache->conf_meta->core_count;
+
+ /* All exported objects removed, cleaning up rest. */
+ for_each_core_all(cache, core, core_id) {
+ if (!env_bit_test(core_id, cache->conf_meta->valid_core_bitmap))
+ continue;
+
+ cache_mngt_core_remove_from_cache(core);
+ if (attached)
+ cache_mngt_core_remove_from_cleaning_pol(core);
+ cache_mngt_core_close(core);
+ if (--no == 0)
+ break;
+ }
+ ENV_BUG_ON(cache->conf_meta->core_count != 0);
+}
+
+static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ _ocf_mngt_cache_stop_remove_cores(cache, true);
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_stop_unplug_complete(void *priv, int error)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+
+ if (error) {
+ ENV_BUG_ON(error != -OCF_ERR_WRITE_CACHE);
+ context->cache_write_error = error;
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop,
+ struct _ocf_mngt_cache_unplug_context *context,
+ _ocf_mngt_cache_unplug_end_t cmpl, void *priv);
+
+static void ocf_mngt_cache_stop_unplug(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ _ocf_mngt_cache_unplug(cache, true, &context->unplug_context,
+ ocf_mngt_cache_stop_unplug_complete, context);
+}
+
+static void _ocf_mngt_cache_put_io_queues(ocf_cache_t cache)
+{
+ ocf_queue_t queue, tmp_queue;
+
+ list_for_each_entry_safe(queue, tmp_queue, &cache->io_queues, list)
+ ocf_queue_put(queue);
+}
+
+static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ _ocf_mngt_cache_put_io_queues(cache);
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_remove(ocf_ctx_t ctx, ocf_cache_t cache)
+{
+ /* Mark device uninitialized */
+ ocf_refcnt_freeze(&cache->refcnt.cache);
+
+ /* Deinitialize locks */
+ ocf_mngt_cache_lock_deinit(cache);
+ env_mutex_destroy(&cache->flush_mutex);
+
+ /* Remove cache from the list */
+ env_rmutex_lock(&ctx->lock);
+ list_del(&cache->list);
+ env_rmutex_unlock(&ctx->lock);
+}
+
+static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_stop_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_ctx_t ctx = context->ctx;
+ int pipeline_error;
+ ocf_mngt_cache_stop_end_t pipeline_cmpl;
+ void *completion_priv;
+
+ if (!error) {
+ ocf_mngt_cache_remove(context->ctx, cache);
+ } else {
+ /* undo metadata counter freeze */
+ ocf_refcnt_unfreeze(&cache->refcnt.metadata);
+
+ env_bit_clear(ocf_cache_state_stopping, &cache->cache_state);
+ env_bit_set(ocf_cache_state_running, &cache->cache_state);
+ }
+
+ if (!error) {
+ if (!context->cache_write_error) {
+ ocf_log(ctx, log_info,
+ "Cache %s successfully stopped\n",
+ context->cache_name);
+ } else {
+ ocf_log(ctx, log_warn, "Stopped cache %s with errors\n",
+ context->cache_name);
+ }
+ } else {
+ ocf_log(ctx, log_err, "Stopping cache %s failed\n",
+ context->cache_name);
+ }
+
+ /*
+ * FIXME: Destroying pipeline before completing management operation is a
+ * temporary workaround for insufficient object lifetime management in pyocf
+ * Context must not be referenced after destroying pipeline as this is
+ * typically freed upon pipeline destroy.
+ */
+ pipeline_error = error ?: context->cache_write_error;
+ pipeline_cmpl = context->cmpl;
+ completion_priv = context->priv;
+
+ ocf_pipeline_destroy(context->pipeline);
+
+ pipeline_cmpl(cache, completion_priv, pipeline_error);
+
+ if (!error) {
+ /* Finally release cache instance */
+ ocf_mngt_cache_put(cache);
+ }
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_stop_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_stop_context),
+ .finish = ocf_mngt_cache_stop_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_stop_wait_metadata_io),
+ OCF_PL_STEP(ocf_mngt_cache_stop_remove_cores),
+ OCF_PL_STEP(ocf_mngt_cache_stop_unplug),
+ OCF_PL_STEP(ocf_mngt_cache_stop_put_io_queues),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+
+static void _ocf_mngt_cache_attach(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg, bool load,
+ _ocf_mngt_cache_attach_end_t cmpl, void *priv1, void *priv2)
+{
+ struct ocf_cache_attach_context *context;
+ ocf_pipeline_t pipeline;
+ void *data;
+ int result;
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &_ocf_mngt_cache_attach_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(cache, priv1, priv2, -OCF_ERR_NO_MEM);
+
+ result = ocf_pipeline_create(&cache->stop_pipeline, cache,
+ &ocf_mngt_cache_stop_pipeline_properties);
+ if (result) {
+ ocf_pipeline_destroy(pipeline);
+ OCF_CMPL_RET(cache, priv1, priv2, -OCF_ERR_NO_MEM);
+ }
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv1 = priv1;
+ context->priv2 = priv2;
+ context->pipeline = pipeline;
+
+ context->cache = cache;
+ context->cfg = *cfg;
+
+ data = env_vmalloc(cfg->uuid.size);
+ if (!data) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_pipeline;
+ }
+
+ result = env_memcpy(data, cfg->uuid.size, cfg->uuid.data,
+ cfg->uuid.size);
+ if (result)
+ goto err_uuid;
+
+ context->cfg.uuid.data = data;
+
+ if (cache->metadata.is_volatile) {
+ context->init_mode = ocf_init_mode_metadata_volatile;
+ } else {
+ context->init_mode = load ?
+ ocf_init_mode_load : ocf_init_mode_init;
+ }
+
+ _ocf_mngt_init_attached_nonpersistent(cache);
+
+ OCF_PL_NEXT_RET(pipeline);
+
+err_uuid:
+ env_vfree(data);
+err_pipeline:
+ ocf_pipeline_destroy(pipeline);
+ ocf_pipeline_destroy(cache->stop_pipeline);
+ OCF_CMPL_RET(cache, priv1, priv2, result);
+}
+
+static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg)
+{
+ if (!strnlen(cfg->name, OCF_CACHE_NAME_SIZE))
+ return -OCF_ERR_INVAL;
+
+ if (!ocf_cache_mode_is_valid(cfg->cache_mode))
+ return -OCF_ERR_INVALID_CACHE_MODE;
+
+ if (cfg->eviction_policy >= ocf_eviction_max ||
+ cfg->eviction_policy < 0) {
+ return -OCF_ERR_INVAL;
+ }
+
+ if (cfg->promotion_policy >= ocf_promotion_max ||
+ cfg->promotion_policy < 0 ) {
+ return -OCF_ERR_INVAL;
+ }
+
+ if (!ocf_cache_line_size_is_valid(cfg->cache_line_size))
+ return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
+
+ if (cfg->metadata_layout >= ocf_metadata_layout_max ||
+ cfg->metadata_layout < 0) {
+ return -OCF_ERR_INVAL;
+ }
+
+ if (cfg->backfill.queue_unblock_size > cfg->backfill.max_queue_size )
+ return -OCF_ERR_INVAL;
+
+ return 0;
+}
+
+static int _ocf_mngt_cache_validate_device_cfg(
+ struct ocf_mngt_cache_device_config *device_cfg)
+{
+ if (!device_cfg->uuid.data)
+ return -OCF_ERR_INVAL;
+
+ if (device_cfg->uuid.size > OCF_VOLUME_UUID_MAX_SIZE)
+ return -OCF_ERR_INVAL;
+
+ if (device_cfg->cache_line_size != ocf_cache_line_size_none &&
+ !ocf_cache_line_size_is_valid(device_cfg->cache_line_size))
+ return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
+
+ return 0;
+}
+
+static const char *_ocf_cache_mode_names[ocf_cache_mode_max] = {
+ [ocf_cache_mode_wt] = "wt",
+ [ocf_cache_mode_wb] = "wb",
+ [ocf_cache_mode_wa] = "wa",
+ [ocf_cache_mode_pt] = "pt",
+ [ocf_cache_mode_wi] = "wi",
+ [ocf_cache_mode_wo] = "wo",
+};
+
+static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode)
+{
+ if (!ocf_cache_mode_is_valid(cache_mode))
+ return NULL;
+
+ return _ocf_cache_mode_names[cache_mode];
+}
+
+int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
+ struct ocf_mngt_cache_config *cfg)
+{
+ int result;
+
+ if (!ctx || !cache || !cfg)
+ return -OCF_ERR_INVAL;
+
+ result = _ocf_mngt_cache_validate_cfg(cfg);
+ if (result)
+ return result;
+
+ result = _ocf_mngt_cache_start(ctx, cache, cfg);
+ if (!result) {
+ _ocf_mngt_cache_set_valid(*cache);
+
+ ocf_cache_log(*cache, log_info, "Successfully added\n");
+ ocf_cache_log(*cache, log_info, "Cache mode : %s\n",
+ _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache)));
+ } else
+ ocf_log(ctx, log_err, "%s: Inserting cache failed\n", cfg->name);
+
+ return result;
+}
+
+int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue)
+{
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(queue);
+
+ if (cache->mngt_queue)
+ return -OCF_ERR_INVAL;
+
+ ocf_queue_get(queue);
+ cache->mngt_queue = queue;
+
+ return 0;
+}
+
+static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache, void *priv1,
+ void *priv2, int error)
+{
+ ocf_mngt_cache_attach_end_t cmpl = priv1;
+
+ if (!error) {
+ ocf_cache_log(cache, log_info, "Successfully attached\n");
+ } else {
+ ocf_cache_log(cache, log_err, "Attaching cache device "
+ "failed\n");
+ }
+
+ OCF_CMPL_RET(cache, priv2, error);
+}
+
+void ocf_mngt_cache_attach(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg,
+ ocf_mngt_cache_attach_end_t cmpl, void *priv)
+{
+ int result;
+
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(cfg);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+
+ result = _ocf_mngt_cache_validate_device_cfg(cfg);
+ if (result)
+ OCF_CMPL_RET(cache, priv, result);
+
+ _ocf_mngt_cache_attach(cache, cfg, false,
+ _ocf_mngt_cache_attach_complete, cmpl, priv);
+}
+
+static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
+{
+ struct _ocf_mngt_cache_unplug_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_volume_close(&cache->device->volume);
+
+ ocf_metadata_deinit_variable_size(cache);
+ ocf_concurrency_deinit(cache);
+ ocf_freelist_deinit(cache->freelist);
+
+ ocf_volume_deinit(&cache->device->volume);
+
+ env_vfree(cache->device);
+ cache->device = NULL;
+
+ /* TODO: this should be removed from detach after 'attached' stats
+ are better separated in statistics */
+ _ocf_mngt_init_attached_nonpersistent(cache);
+
+ context->cmpl(context->priv, error ? -OCF_ERR_WRITE_CACHE : 0);
+}
+
+/**
+ * @brief Unplug caching device from cache instance. Variable size metadata
+ * containers are deinitialiazed as well as other cacheline related
+ * structures. Cache volume is closed.
+ *
+ * @param cache OCF cache instance
+ * @param stop - true if unplugging during stop - in this case we mark
+ * clean shutdown in metadata and flush all containers.
+ * - false if the device is to be detached from cache - loading
+ * metadata from this device will not be possible.
+ * @param context - context for this call, must be zeroed
+ * @param cmpl Completion callback
+ * @param priv Completion context
+ */
+static void _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop,
+ struct _ocf_mngt_cache_unplug_context *context,
+ _ocf_mngt_cache_unplug_end_t cmpl, void *priv)
+{
+ ENV_BUG_ON(stop && cache->conf_meta->core_count != 0);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->cache = cache;
+
+ ocf_stop_cleaner(cache);
+
+ __deinit_cleaning_policy(cache);
+ __deinit_promotion_policy(cache);
+
+ if (ocf_mngt_cache_is_dirty(cache)) {
+ ENV_BUG_ON(!stop);
+
+ cache->conf_meta->dirty_flushed = DIRTY_NOT_FLUSHED;
+
+ ocf_cache_log(cache, log_warn, "Cache is still dirty. "
+ "DO NOT USE your core devices until flushing "
+ "dirty data!\n");
+ } else {
+ cache->conf_meta->dirty_flushed = DIRTY_FLUSHED;
+ }
+
+ if (!stop) {
+ /* Just set correct shutdown status */
+ ocf_metadata_set_shutdown_status(cache, ocf_metadata_detached,
+ _ocf_mngt_cache_unplug_complete, context);
+ } else {
+ /* Flush metadata */
+ ocf_metadata_flush_all(cache,
+ _ocf_mngt_cache_unplug_complete, context);
+ }
+}
+
+static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx)
+{
+ ocf_core_log(core, log_info, "Successfully added\n");
+
+ return 0;
+}
+
+static void _ocf_mngt_cache_load_log(ocf_cache_t cache)
+{
+ ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache);
+ ocf_eviction_t eviction_type = cache->conf_meta->eviction_policy_type;
+ ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
+ ocf_promotion_t promotion_type = cache->conf_meta->promotion_policy_type;
+
+ ocf_cache_log(cache, log_info, "Successfully loaded\n");
+ ocf_cache_log(cache, log_info, "Cache mode : %s\n",
+ _ocf_cache_mode_get_name(cache_mode));
+ ocf_cache_log(cache, log_info, "Eviction policy : %s\n",
+ evict_policy_ops[eviction_type].name);
+ ocf_cache_log(cache, log_info, "Cleaning policy : %s\n",
+ cleaning_policy_ops[cleaning_type].name);
+ ocf_cache_log(cache, log_info, "Promotion policy : %s\n",
+ ocf_promotion_policies[promotion_type].name);
+ ocf_core_visit(cache, _ocf_mngt_cache_load_core_log,
+ cache, false);
+}
+
+static void _ocf_mngt_cache_load_complete(ocf_cache_t cache, void *priv1,
+ void *priv2, int error)
+{
+ ocf_mngt_cache_load_end_t cmpl = priv1;
+
+ if (error)
+ OCF_CMPL_RET(cache, priv2, error);
+
+ _ocf_mngt_cache_set_valid(cache);
+ _ocf_mngt_cache_load_log(cache);
+
+ OCF_CMPL_RET(cache, priv2, 0);
+}
+
+void ocf_mngt_cache_load(ocf_cache_t cache,
+ struct ocf_mngt_cache_device_config *cfg,
+ ocf_mngt_cache_load_end_t cmpl, void *priv)
+{
+ int result;
+
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(cfg);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+
+ /* Load is not allowed in volatile metadata mode */
+ if (cache->metadata.is_volatile)
+ OCF_CMPL_RET(cache, priv, -EINVAL);
+
+ result = _ocf_mngt_cache_validate_device_cfg(cfg);
+ if (result)
+ OCF_CMPL_RET(cache, priv, result);
+
+ _ocf_mngt_cache_attach(cache, cfg, true,
+ _ocf_mngt_cache_load_complete, cmpl, priv);
+}
+
+static void ocf_mngt_cache_stop_detached(ocf_cache_t cache,
+ ocf_mngt_cache_stop_end_t cmpl, void *priv)
+{
+ _ocf_mngt_cache_stop_remove_cores(cache, false);
+ _ocf_mngt_cache_put_io_queues(cache);
+ ocf_mngt_cache_remove(cache->owner, cache);
+ ocf_cache_log(cache, log_info, "Cache %s successfully stopped\n",
+ ocf_cache_get_name(cache));
+ cmpl(cache, priv, 0);
+ ocf_mngt_cache_put(cache);
+}
+
+void ocf_mngt_cache_stop(ocf_cache_t cache,
+ ocf_mngt_cache_stop_end_t cmpl, void *priv)
+{
+ struct ocf_mngt_cache_stop_context *context;
+ ocf_pipeline_t pipeline;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!ocf_cache_is_device_attached(cache)) {
+ ocf_mngt_cache_stop_detached(cache, cmpl, priv);
+ return;
+ }
+
+ ENV_BUG_ON(!cache->mngt_queue);
+
+ pipeline = cache->stop_pipeline;
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+ context->ctx = cache->owner;
+
+ ENV_BUG_ON(env_strncpy(context->cache_name, sizeof(context->cache_name),
+ ocf_cache_get_name(cache), sizeof(context->cache_name)));
+
+ ocf_cache_log(cache, log_info, "Stopping cache\n");
+
+ env_bit_set(ocf_cache_state_stopping, &cache->cache_state);
+ env_bit_clear(ocf_cache_state_running, &cache->cache_state);
+
+ ocf_pipeline_next(pipeline);
+}
+
+struct ocf_mngt_cache_save_context {
+ ocf_mngt_cache_save_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+};
+
+static void ocf_mngt_cache_save_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_save_context *context = priv;
+
+ context->cmpl(context->cache, context->priv, error);
+
+ ocf_pipeline_destroy(context->pipeline);
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_save_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_save_context),
+ .finish = ocf_mngt_cache_save_finish,
+ .steps = {
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+static void ocf_mngt_cache_save_flush_sb_complete(void *priv, int error)
+{
+ struct ocf_mngt_cache_save_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Failed to flush superblock! Changes "
+ "in cache config are not persistent!\n");
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+void ocf_mngt_cache_save(ocf_cache_t cache,
+ ocf_mngt_cache_save_end_t cmpl, void *priv)
+{
+ struct ocf_mngt_cache_save_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_mngt_cache_save_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(cache, priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_metadata_flush_superblock(cache,
+ ocf_mngt_cache_save_flush_sb_complete, context);
+}
+
+static void _cache_mngt_update_initial_dirty_clines(ocf_cache_t cache)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ for_each_core(cache, core, core_id) {
+ env_atomic_set(&core->runtime_meta->initial_dirty_clines,
+ env_atomic_read(&core->runtime_meta->
+ dirty_clines));
+ }
+
+}
+
+static int _cache_mngt_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
+{
+ ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode;
+
+ /* Check if IO interface type is valid */
+ if (!ocf_cache_mode_is_valid(mode))
+ return -OCF_ERR_INVAL;
+
+ if (mode == mode_old) {
+ ocf_cache_log(cache, log_info, "Cache mode '%s' is already set\n",
+ ocf_get_io_iface_name(mode));
+ return 0;
+ }
+
+ cache->conf_meta->cache_mode = mode;
+
+ if (ocf_mngt_cache_mode_has_lazy_write(mode_old) &&
+ !ocf_mngt_cache_mode_has_lazy_write(mode)) {
+ _cache_mngt_update_initial_dirty_clines(cache);
+ }
+
+ ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
+ "successful\n", ocf_get_io_iface_name(mode_old),
+ ocf_get_io_iface_name(mode));
+
+ return 0;
+}
+
+int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
+{
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!ocf_cache_mode_is_valid(mode)) {
+ ocf_cache_log(cache, log_err, "Cache mode %u is invalid\n",
+ mode);
+ return -OCF_ERR_INVAL;
+ }
+
+ result = _cache_mngt_set_cache_mode(cache, mode);
+
+ if (result) {
+ const char *name = ocf_get_io_iface_name(mode);
+
+ ocf_cache_log(cache, log_err, "Setting cache mode '%s' "
+ "failed\n", name);
+ }
+
+ return result;
+}
+
+int ocf_mngt_cache_promotion_set_policy(ocf_cache_t cache, ocf_promotion_t type)
+{
+ int result;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ result = ocf_promotion_set_policy(cache->promotion_policy, type);
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return result;
+}
+
+ocf_promotion_t ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache)
+{
+ ocf_promotion_t result;
+
+ ocf_metadata_start_shared_access(&cache->metadata.lock);
+
+ result = cache->conf_meta->promotion_policy_type;
+
+ ocf_metadata_end_shared_access(&cache->metadata.lock);
+
+ return result;
+}
+
+int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t *param_value)
+{
+ int result;
+
+ ocf_metadata_start_shared_access(&cache->metadata.lock);
+
+ result = ocf_promotion_get_param(cache, type, param_id, param_value);
+
+ ocf_metadata_end_shared_access(&cache->metadata.lock);
+
+ return result;
+}
+
+int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t param_value)
+{
+ int result;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ result = ocf_promotion_set_param(cache, type, param_id, param_value);
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return result;
+}
+
+int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ if (ocf_fallback_pt_is_on(cache)) {
+ ocf_cache_log(cache, log_info,
+ "Fallback Pass Through inactive\n");
+ }
+
+ env_atomic_set(&cache->fallback_pt_error_counter, 0);
+
+ return 0;
+}
+
+int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache,
+ uint32_t new_threshold)
+{
+ bool old_fallback_pt_state, new_fallback_pt_state;
+
+ OCF_CHECK_NULL(cache);
+
+ if (new_threshold > OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD)
+ return -OCF_ERR_INVAL;
+
+ old_fallback_pt_state = ocf_fallback_pt_is_on(cache);
+
+ cache->fallback_pt_error_threshold = new_threshold;
+
+ new_fallback_pt_state = ocf_fallback_pt_is_on(cache);
+
+ if (old_fallback_pt_state != new_fallback_pt_state) {
+ if (new_fallback_pt_state) {
+ ocf_cache_log(cache, log_info, "Error threshold reached. "
+ "Fallback Pass Through activated\n");
+ } else {
+ ocf_cache_log(cache, log_info, "Fallback Pass Through "
+ "inactive\n");
+ }
+ }
+
+ return 0;
+}
+
+int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache,
+ uint32_t *threshold)
+{
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(threshold);
+
+ *threshold = cache->fallback_pt_error_threshold;
+
+ return 0;
+}
+
+struct ocf_mngt_cache_detach_context {
+ /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
+ * it is member of detach context only to reserve memory in advance for
+ * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
+ * at the point where we are effectively unable to handle it */
+ struct _ocf_mngt_cache_unplug_context unplug_context;
+
+ ocf_mngt_cache_detach_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ int cache_write_error;
+ struct ocf_cleaner_wait_context cleaner_wait;
+};
+
+static void ocf_mngt_cache_detach_flush_cmpl(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static void ocf_mngt_cache_detach_flush(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_mngt_cache_flush(cache, ocf_mngt_cache_detach_flush_cmpl, context);
+}
+
+static void ocf_mngt_cache_detach_stop_cache_io_finish(void *priv)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_detach_stop_cache_io(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_refcnt_freeze(&cache->refcnt.metadata);
+ ocf_refcnt_register_zero_cb(&cache->refcnt.metadata,
+ ocf_mngt_cache_detach_stop_cache_io_finish, context);
+}
+
+static void ocf_mngt_cache_detach_stop_cleaner_io_finish(void *priv)
+{
+ ocf_pipeline_t pipeline = priv;
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_detach_stop_cleaner_io(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_cleaner_refcnt_freeze(cache);
+ ocf_cleaner_refcnt_register_zero_cb(cache, &context->cleaner_wait,
+ ocf_mngt_cache_detach_stop_cleaner_io_finish,
+ pipeline);
+}
+
+static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+ int no = cache->conf_meta->core_count;
+
+ /* remove cacheline metadata and cleaning policy meta for all cores */
+ for_each_core_metadata(cache, core, core_id) {
+ cache_mngt_core_deinit_attached_meta(core);
+ cache_mngt_core_remove_from_cleaning_pol(core);
+ if (--no == 0)
+ break;
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_detach_unplug_complete(void *priv, int error)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+
+ if (error) {
+ ENV_BUG_ON(error != -OCF_ERR_WRITE_CACHE);
+ context->cache_write_error = error;
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_detach_unplug(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ /* Do the actual detach - deinit cacheline metadata,
+ * stop cleaner thread and close cache bottom device */
+ _ocf_mngt_cache_unplug(cache, false, &context->unplug_context,
+ ocf_mngt_cache_detach_unplug_complete, context);
+}
+
+static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_detach_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ ocf_refcnt_unfreeze(&cache->refcnt.dirty);
+
+ if (!error) {
+ if (!context->cache_write_error) {
+ ocf_cache_log(cache, log_info,
+ "Device successfully detached\n");
+ } else {
+ ocf_cache_log(cache, log_warn,
+ "Device detached with errors\n");
+ }
+ } else {
+ ocf_cache_log(cache, log_err,
+ "Detaching device failed\n");
+ }
+
+ context->cmpl(cache, context->priv,
+ error ?: context->cache_write_error);
+
+ ocf_pipeline_destroy(context->pipeline);
+ ocf_pipeline_destroy(cache->stop_pipeline);
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_detach_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_detach_context),
+ .finish = ocf_mngt_cache_detach_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_detach_flush),
+ OCF_PL_STEP(ocf_mngt_cache_detach_stop_cache_io),
+ OCF_PL_STEP(ocf_mngt_cache_detach_stop_cleaner_io),
+ OCF_PL_STEP(ocf_mngt_cache_detach_update_metadata),
+ OCF_PL_STEP(ocf_mngt_cache_detach_unplug),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_detach(ocf_cache_t cache,
+ ocf_mngt_cache_detach_end_t cmpl, void *priv)
+{
+ struct ocf_mngt_cache_detach_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+
+ if (!ocf_cache_is_device_attached(cache))
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_mngt_cache_detach_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ /* prevent dirty io */
+ ocf_refcnt_freeze(&cache->refcnt.dirty);
+
+ ocf_pipeline_next(pipeline);
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_common.c b/src/spdk/ocf/src/mngt/ocf_mngt_common.c
new file mode 100644
index 000000000..7e0022c87
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_common.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "ocf_mngt_core_priv.h"
+#include "../ocf_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../ocf_request.h"
+#include "../eviction/ops.h"
+#include "../ocf_logger_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../engine/engine_common.h"
+
+/* Close if opened */
+int cache_mngt_core_close(ocf_core_t core)
+{
+ if (!core->opened)
+ return -OCF_ERR_CORE_IN_INACTIVE_STATE;
+
+ ocf_volume_close(&core->front_volume);
+ ocf_volume_deinit(&core->front_volume);
+
+ ocf_volume_close(&core->volume);
+ ocf_volume_deinit(&core->volume);
+ core->opened = false;
+
+ return 0;
+}
+
+/* Remove core from cleaning policy */
+void cache_mngt_core_remove_from_cleaning_pol(ocf_core_t core)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+ ocf_cleaning_t clean_pol_type;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ clean_pol_type = cache->conf_meta->cleaning_policy_type;
+ if (cache->core[core_id].opened) {
+ if (cleaning_policy_ops[clean_pol_type].remove_core) {
+ cleaning_policy_ops[clean_pol_type].
+ remove_core(cache, core_id);
+ }
+ }
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+}
+
+/* Deinitialize core metadata in attached metadata */
+void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
+{
+ int retry = 1;
+ uint64_t core_size = 0;
+ ocf_cleaning_t clean_pol_type;
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+
+ core_size = ocf_volume_get_length(&core->volume);
+ if (!core_size)
+ core_size = ~0ULL;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ clean_pol_type = cache->conf_meta->cleaning_policy_type;
+ while (retry) {
+ retry = 0;
+ if (cleaning_policy_ops[clean_pol_type].purge_range) {
+ retry = cleaning_policy_ops[clean_pol_type].purge_range(cache,
+ core_id, 0, core_size);
+ }
+
+ if (!retry) {
+ /* Remove from collision_table and Partition. Put in FREELIST */
+ retry = ocf_metadata_sparse_range(cache, core_id, 0,
+ core_size);
+ }
+
+ if (retry) {
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ env_msleep(100);
+ ocf_metadata_start_exclusive_access(
+ &cache->metadata.lock);
+ }
+ }
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+}
+
+/* Mark core as removed in metadata */
+void cache_mngt_core_remove_from_meta(ocf_core_t core)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ /* In metadata mark data this core was removed from cache */
+ core->conf_meta->valid = false;
+
+ /* Clear UUID of core */
+ ocf_mngt_core_clear_uuid_metadata(core);
+ core->conf_meta->seq_no = OCF_SEQ_NO_INVALID;
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+}
+
+/* Deinit in-memory structures related to this core */
+void cache_mngt_core_remove_from_cache(ocf_core_t core)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+
+ env_free(core->counters);
+ core->counters = NULL;
+ core->added = false;
+ env_bit_clear(core_id, cache->conf_meta->valid_core_bitmap);
+
+ if (!core->opened && --cache->ocf_core_inactive_count == 0)
+ env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
+
+ cache->conf_meta->core_count--;
+}
+
+void ocf_mngt_cache_put(ocf_cache_t cache)
+{
+ ocf_ctx_t ctx;
+
+ OCF_CHECK_NULL(cache);
+
+ if (ocf_refcnt_dec(&cache->refcnt.cache) == 0) {
+ ctx = cache->owner;
+ ocf_metadata_deinit(cache);
+ env_vfree(cache);
+ ocf_ctx_put(ctx);
+ }
+}
+
+int ocf_mngt_cache_get_by_name(ocf_ctx_t ctx, const char *name, size_t name_len,
+ ocf_cache_t *cache)
+{
+ struct ocf_cache *instance = NULL;
+ struct ocf_cache *iter = NULL;
+
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(cache);
+
+ /* Lock caches list */
+ env_rmutex_lock(&ctx->lock);
+
+ list_for_each_entry(iter, &ctx->caches, list) {
+ if (!env_strncmp(ocf_cache_get_name(iter), OCF_CACHE_NAME_SIZE,
+ name, name_len)) {
+ instance = iter;
+ break;
+ }
+ }
+
+ if (instance) {
+ /* if cache is either fully initialized or during recovery */
+ if (!ocf_refcnt_inc(&instance->refcnt.cache)) {
+ /* Cache not initialized yet */
+ instance = NULL;
+ }
+ }
+
+ env_rmutex_unlock(&ctx->lock);
+
+ if (!instance)
+ return -OCF_ERR_CACHE_NOT_EXIST;
+
+ *cache = instance;
+
+ return 0;
+}
+
+typedef void (*ocf_lock_fn_t)(ocf_async_lock_waiter_t waiter);
+
+typedef int (*ocf_trylock_fn_t)(ocf_async_lock_t lock);
+
+typedef void (*ocf_unlock_fn_t)(ocf_async_lock_t lock);
+
+struct ocf_mngt_cache_lock_context {
+ ocf_cache_t cache;
+ ocf_unlock_fn_t unlock_fn;
+ ocf_mngt_cache_lock_end_t cmpl;
+ void *priv;
+};
+
+static void _ocf_mngt_cache_lock_complete(
+ ocf_async_lock_waiter_t waiter, int error)
+{
+ struct ocf_mngt_cache_lock_context *context;
+ ocf_cache_t cache;
+
+ context = ocf_async_lock_waiter_get_priv(waiter);
+ cache = context->cache;
+
+ if (error) {
+ ocf_mngt_cache_put(cache);
+ goto out;
+ }
+
+ if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
+ /* Cache already stopping, do not allow any operation */
+ context->unlock_fn(ocf_async_lock_waiter_get_lock(waiter));
+ ocf_mngt_cache_put(cache);
+ error = -OCF_ERR_CACHE_NOT_EXIST;
+ }
+
+out:
+ context->cmpl(context->cache, context->priv, error);
+}
+
+static void _ocf_mngt_cache_lock(ocf_cache_t cache,
+ ocf_mngt_cache_lock_end_t cmpl, void *priv,
+ ocf_lock_fn_t lock_fn, ocf_unlock_fn_t unlock_fn)
+{
+ ocf_async_lock_waiter_t waiter;
+ struct ocf_mngt_cache_lock_context *context;
+
+ if (ocf_mngt_cache_get(cache))
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_CACHE_NOT_EXIST);
+
+ waiter = ocf_async_lock_new_waiter(&cache->lock,
+ _ocf_mngt_cache_lock_complete);
+ if (!waiter) {
+ ocf_mngt_cache_put(cache);
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
+ }
+
+ context = ocf_async_lock_waiter_get_priv(waiter);
+ context->cache = cache;
+ context->unlock_fn = unlock_fn;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ lock_fn(waiter);
+}
+
+static int _ocf_mngt_cache_trylock(ocf_cache_t cache,
+ ocf_trylock_fn_t trylock_fn, ocf_unlock_fn_t unlock_fn)
+{
+ int result;
+
+ if (ocf_mngt_cache_get(cache))
+ return -OCF_ERR_CACHE_NOT_EXIST;
+
+ result = trylock_fn(&cache->lock);
+ if (result)
+ return result;
+
+ if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
+ /* Cache already stopping, do not allow any operation */
+ unlock_fn(&cache->lock);
+ return -OCF_ERR_CACHE_NOT_EXIST;
+ }
+
+ return 0;
+}
+
+static void _ocf_mngt_cache_unlock(ocf_cache_t cache,
+ ocf_unlock_fn_t unlock_fn)
+{
+ unlock_fn(&cache->lock);
+ ocf_mngt_cache_put(cache);
+}
+
+int ocf_mngt_cache_lock_init(ocf_cache_t cache)
+{
+ return ocf_async_lock_init(&cache->lock,
+ sizeof(struct ocf_mngt_cache_lock_context));
+}
+
+void ocf_mngt_cache_lock_deinit(ocf_cache_t cache)
+{
+ ocf_async_lock_deinit(&cache->lock);
+}
+
+void ocf_mngt_cache_lock(ocf_cache_t cache,
+ ocf_mngt_cache_lock_end_t cmpl, void *priv)
+{
+ OCF_CHECK_NULL(cache);
+
+ _ocf_mngt_cache_lock(cache, cmpl, priv,
+ ocf_async_lock, ocf_async_unlock);
+}
+
+int ocf_mngt_cache_trylock(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ return _ocf_mngt_cache_trylock(cache,
+ ocf_async_trylock, ocf_async_unlock);
+}
+
+void ocf_mngt_cache_unlock(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ _ocf_mngt_cache_unlock(cache, ocf_async_unlock);
+}
+
+void ocf_mngt_cache_read_lock(ocf_cache_t cache,
+ ocf_mngt_cache_lock_end_t cmpl, void *priv)
+{
+ OCF_CHECK_NULL(cache);
+
+ _ocf_mngt_cache_lock(cache, cmpl, priv,
+ ocf_async_read_lock, ocf_async_read_unlock);
+}
+
+int ocf_mngt_cache_read_trylock(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ return _ocf_mngt_cache_trylock(cache,
+ ocf_async_read_trylock, ocf_async_read_unlock);
+}
+
+void ocf_mngt_cache_read_unlock(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ _ocf_mngt_cache_unlock(cache, ocf_async_read_unlock);
+}
+
+bool ocf_mngt_cache_is_locked(ocf_cache_t cache)
+{
+ return ocf_async_is_locked(&cache->lock);
+}
+
+/* if cache is either fully initialized or during recovery */
+static bool _ocf_mngt_cache_try_get(ocf_cache_t cache)
+{
+ return !!ocf_refcnt_inc(&cache->refcnt.cache);
+}
+
+int ocf_mngt_cache_get(ocf_cache_t cache)
+{
+ if (!_ocf_mngt_cache_try_get(cache))
+ return -OCF_ERR_CACHE_NOT_AVAIL;
+
+ return 0;
+}
+
+static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
+ uint32_t *size)
+{
+ int result = 0;
+ uint32_t count = 0, i = 0;
+ ocf_cache_t iter;
+
+ *list = NULL;
+ *size = 0;
+
+ env_rmutex_lock(&ocf_ctx->lock);
+
+ list_for_each_entry(iter, &ocf_ctx->caches, list) {
+ count++;
+ }
+
+ if (!count)
+ goto END;
+
+ *list = env_vmalloc(sizeof((*list)[0]) * count);
+ if (*list == NULL) {
+ result = -ENOMEM;
+ goto END;
+ }
+
+ list_for_each_entry(iter, &ocf_ctx->caches, list) {
+ if (_ocf_mngt_cache_try_get(iter))
+ (*list)[i++] = iter;
+ }
+
+ if (i) {
+ /* Update size if cache list */
+ *size = i;
+ } else {
+ env_vfree(*list);
+ *list = NULL;
+ }
+
+END:
+ env_rmutex_unlock(&ocf_ctx->lock);
+ return result;
+}
+
+int ocf_mngt_cache_visit(ocf_ctx_t ocf_ctx, ocf_mngt_cache_visitor_t visitor,
+ void *cntx)
+{
+ ocf_cache_t *list;
+ uint32_t size, i;
+ int result;
+
+ OCF_CHECK_NULL(ocf_ctx);
+ OCF_CHECK_NULL(visitor);
+
+ result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
+ if (result)
+ return result;
+
+ if (size == 0)
+ return 0;
+
+ /* Iterate over caches */
+ for (i = 0; i < size; i++) {
+ ocf_cache_t this = list[i];
+
+ result = visitor(this, cntx);
+
+ if (result)
+ break;
+ }
+
+ /* Put caches */
+ for (i = 0; i < size; i++)
+ ocf_mngt_cache_put(list[i]);
+
+ env_vfree(list);
+
+ return result;
+}
+
+int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx,
+ ocf_mngt_cache_visitor_t visitor, void *cntx)
+{
+ ocf_cache_t *list;
+ uint32_t size, i;
+ int result;
+
+ OCF_CHECK_NULL(ocf_ctx);
+ OCF_CHECK_NULL(visitor);
+
+ result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
+ if (result)
+ return result;
+
+ if (size == 0)
+ return 0;
+
+ /* Iterate over caches */
+ for (i = size; i; i--) {
+ ocf_cache_t this = list[i - 1];
+
+ result = visitor(this, cntx);
+
+ if (result)
+ break;
+ }
+
+ /* Put caches */
+ for (i = 0; i < size; i++)
+ ocf_mngt_cache_put(list[i]);
+
+ env_vfree(list);
+
+ return result;
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_common.h b/src/spdk/ocf/src/mngt/ocf_mngt_common.h
new file mode 100644
index 000000000..38283d0c9
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+
+#ifndef __OCF_MNGT_COMMON_H__
+#define __OCF_MNGT_COMMON_H__
+
+int cache_mngt_core_close(ocf_core_t core);
+
+void cache_mngt_core_remove_from_meta(ocf_core_t core);
+
+void cache_mngt_core_remove_from_cache(ocf_core_t core);
+
+void cache_mngt_core_deinit_attached_meta(ocf_core_t core);
+
+void cache_mngt_core_remove_from_cleaning_pol(ocf_core_t core);
+
+int _ocf_cleaning_thread(void *priv);
+
+int cache_mngt_thread_io_requests(void *data);
+
+int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
+ ocf_part_id_t part_id, const char *name, uint32_t min_size,
+ uint32_t max_size, uint8_t priority, bool valid);
+
+int ocf_mngt_cache_lock_init(ocf_cache_t cache);
+void ocf_mngt_cache_lock_deinit(ocf_cache_t cache);
+
+bool ocf_mngt_cache_is_locked(ocf_cache_t cache);
+
+#endif /* __OCF_MNGT_COMMON_H__ */
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_core.c b/src/spdk/ocf/src/mngt/ocf_mngt_core.c
new file mode 100644
index 000000000..594c3431c
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_core.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "ocf_mngt_core_priv.h"
+#include "../ocf_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_pipeline.h"
+#include "../ocf_stats_priv.h"
+#include "../ocf_def_priv.h"
+
+static ocf_seq_no_t _ocf_mngt_get_core_seq_no(ocf_cache_t cache)
+{
+ if (cache->conf_meta->curr_core_seq_no == OCF_SEQ_NO_MAX)
+ return OCF_SEQ_NO_INVALID;
+
+ return ++cache->conf_meta->curr_core_seq_no;
+}
+
+static int ocf_mngt_core_set_name(ocf_core_t core, const char *name)
+{
+ return env_strncpy(core->conf_meta->name, OCF_CORE_NAME_SIZE,
+ name, OCF_CORE_NAME_SIZE);
+}
+
+static int _ocf_uuid_set(const struct ocf_volume_uuid *uuid,
+ struct ocf_metadata_uuid *muuid)
+{
+ int result;
+
+ if (!uuid->data)
+ return -EINVAL;
+
+ if (uuid->size > sizeof(muuid->data))
+ return -ENOBUFS;
+
+ result = env_memcpy(muuid->data, sizeof(muuid->data),
+ uuid->data, uuid->size);
+ if (result)
+ return result;
+
+ result = env_memset(muuid->data + uuid->size,
+ sizeof(muuid->data) - uuid->size, 0);
+ if (result)
+ return result;
+
+ muuid->size = uuid->size;
+
+ return 0;
+}
+
+static int ocf_mngt_core_set_uuid_metadata(ocf_core_t core,
+ const struct ocf_volume_uuid *uuid,
+ struct ocf_volume_uuid *new_uuid)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ struct ocf_metadata_uuid *muuid = ocf_metadata_get_core_uuid(cache,
+ ocf_core_get_id(core));
+
+ if (_ocf_uuid_set(uuid, muuid))
+ return -ENOBUFS;
+
+ if (new_uuid) {
+ new_uuid->data = muuid->data;
+ new_uuid->size = muuid->size;
+ }
+
+ return 0;
+}
+
+void ocf_mngt_core_clear_uuid_metadata(ocf_core_t core)
+{
+ struct ocf_volume_uuid uuid = { .size = 0, };
+
+ ocf_mngt_core_set_uuid_metadata(core, &uuid, NULL);
+}
+
+struct ocf_cache_add_core_context {
+ ocf_mngt_cache_add_core_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ struct ocf_mngt_core_config cfg;
+ ocf_cache_t cache;
+ ocf_core_t core;
+
+ struct {
+ bool uuid_set : 1;
+ bool volume_inited : 1;
+ bool volume_opened : 1;
+ bool clean_pol_added : 1;
+ bool counters_allocated : 1;
+ } flags;
+};
+
+static void _ocf_mngt_cache_add_core_handle_error(
+ struct ocf_cache_add_core_context *context)
+{
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+ ocf_core_id_t core_id;
+ ocf_volume_t volume;
+ ocf_cleaning_t clean_type;
+
+ if (!core)
+ return;
+
+ core_id = ocf_core_get_id(core);
+ volume = &core->volume;
+ clean_type = cache->conf_meta->cleaning_policy_type;
+
+ if (context->flags.counters_allocated) {
+ env_bit_clear(core_id,
+ cache->conf_meta->valid_core_bitmap);
+ core->conf_meta->valid = false;
+ core->added = false;
+ core->opened = false;
+
+ env_free(core->counters);
+ core->counters = NULL;
+ }
+
+ if (context->flags.clean_pol_added) {
+ if (cleaning_policy_ops[clean_type].remove_core)
+ cleaning_policy_ops[clean_type].remove_core(cache,
+ core_id);
+ }
+
+ if (context->flags.volume_opened)
+ ocf_volume_close(volume);
+
+ if (context->flags.volume_inited)
+ ocf_volume_deinit(volume);
+
+ if (context->flags.uuid_set)
+ ocf_mngt_core_clear_uuid_metadata(core);
+}
+
+static unsigned long _ffz(unsigned long word)
+{
+ int i;
+
+ for (i = 0; i < sizeof(word)*8 && (word & 1); i++)
+ word >>= 1;
+
+ return i;
+}
+
+static unsigned long _ocf_mngt_find_first_free_core(const unsigned long *bitmap)
+{
+ unsigned long i;
+ unsigned long ret = OCF_CORE_MAX;
+
+ /* check core 0 availability */
+ bool zero_core_free = !(*bitmap & 0x1UL);
+
+ /* check if any core id is free except 0 */
+ for (i = 0; i * sizeof(unsigned long) * 8 < OCF_CORE_MAX; i++) {
+ unsigned long long ignore_mask = (i == 0) ? 1UL : 0UL;
+ if (~(bitmap[i] | ignore_mask)) {
+ ret = OCF_MIN(OCF_CORE_MAX, i * sizeof(unsigned long) *
+ 8 + _ffz(bitmap[i] | ignore_mask));
+ break;
+ }
+ }
+
+ /* return 0 only if no other core is free */
+ if (ret == OCF_CORE_MAX && zero_core_free)
+ return 0;
+
+ return ret;
+}
+
+static int ocf_mngt_find_free_core(ocf_cache_t cache, ocf_core_t *core)
+{
+ ocf_core_id_t core_id;
+ ocf_core_t tmp_core;
+
+ core_id = _ocf_mngt_find_first_free_core(
+ cache->conf_meta->valid_core_bitmap);
+
+ tmp_core = ocf_cache_get_core(cache, core_id);
+ if (!tmp_core)
+ return -OCF_ERR_TOO_MANY_CORES;
+
+ *core = tmp_core;
+
+ return 0;
+}
+
+int ocf_mngt_core_init_front_volume(ocf_core_t core)
+{
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ ocf_volume_type_t type;
+ struct ocf_volume_uuid uuid = {
+ .data = core,
+ .size = sizeof(core),
+ };
+ int ret;
+
+ type = ocf_ctx_get_volume_type(cache->owner, 0);
+ if (!type)
+ return -OCF_ERR_INVAL;
+
+ ret = ocf_volume_init(&core->front_volume, type, &uuid, false);
+ if (ret)
+ return ret;
+
+ ret = ocf_volume_open(&core->front_volume, NULL);
+ if (ret)
+ ocf_volume_deinit(&core->front_volume);
+
+ return ret;
+}
+
+static void ocf_mngt_cache_try_add_core_prepare(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ struct ocf_mngt_core_config *cfg = &context->cfg;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core;
+ ocf_volume_t volume;
+ ocf_volume_type_t type;
+ ocf_ctx_t ctx = cache->owner;
+ int result;
+
+ result = ocf_core_get_by_name(cache, cfg->name,
+ OCF_CORE_NAME_SIZE, &core);
+ if (result)
+ goto err;
+
+ if (core->opened) {
+ result = -OCF_ERR_INVAL;
+ goto err;
+ }
+
+ volume = ocf_core_get_volume(core);
+ type = ocf_volume_get_type(volume);
+
+ if (ocf_ctx_get_volume_type_id(ctx, type) != cfg->volume_type) {
+ result = -OCF_ERR_INVAL_VOLUME_TYPE;
+ goto err;
+ }
+
+ if (env_strncmp(volume->uuid.data, volume->uuid.size, cfg->uuid.data,
+ cfg->uuid.size)) {
+ result = -OCF_ERR_INVAL;
+ goto err;
+ }
+
+ context->core = core;
+
+ OCF_PL_NEXT_RET(pipeline);
+
+err:
+ ocf_cache_log(cache, log_err, "Core with given uuid not found "
+ "in cache metadata\n");
+ OCF_PL_FINISH_RET(pipeline, result);
+}
+
+static void ocf_mngt_cache_try_add_core_insert(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+ ocf_volume_t volume;
+ int result;
+
+ ocf_core_log(core, log_debug, "Inserting core\n");
+
+ volume = ocf_core_get_volume(core);
+
+ result = ocf_volume_open(volume, NULL);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ if (!ocf_volume_get_length(volume)) {
+ result = -OCF_ERR_CORE_NOT_AVAIL;
+ goto error_after_open;
+ }
+
+ core->opened = true;
+
+ if (!(--cache->ocf_core_inactive_count))
+ env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
+
+ OCF_PL_NEXT_RET(pipeline);
+
+error_after_open:
+ ocf_volume_close(volume);
+ OCF_PL_FINISH_RET(pipeline, result);
+}
+
+static void ocf_mngt_cache_add_core_prepare(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ struct ocf_mngt_core_config *cfg = &context->cfg;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core;
+ int result;
+
+ result = ocf_core_get_by_name(cache, cfg->name,
+ OCF_CACHE_NAME_SIZE, &core);
+ if (!result)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_EXIST);
+
+ result = ocf_mngt_find_free_core(cache, &core);
+ if (result)
+ OCF_PL_FINISH_RET(context->pipeline, result);
+
+ context->core = core;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_cache_add_core_flush_sb_complete(void *priv, int error)
+{
+ struct ocf_cache_add_core_context *context = priv;
+
+ if (error)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
+
+ /* Increase value of added cores */
+ context->cache->conf_meta->core_count++;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ struct ocf_mngt_core_config *cfg = &context->cfg;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+ ocf_core_id_t core_id;
+ struct ocf_volume_uuid new_uuid;
+ ocf_volume_t volume;
+ ocf_volume_type_t type;
+ ocf_seq_no_t core_sequence_no;
+ ocf_cleaning_t clean_type;
+ uint64_t length;
+ int result = 0;
+
+ ocf_cache_log(cache, log_debug, "Inserting core %s\n", cfg->name);
+
+ volume = ocf_core_get_volume(core);
+ volume->cache = cache;
+ core_id = ocf_core_get_id(core);
+
+ result = ocf_mngt_core_set_name(core, cfg->name);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ /* Set uuid */
+ result = ocf_mngt_core_set_uuid_metadata(core, &cfg->uuid, &new_uuid);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ context->flags.uuid_set = true;
+
+ type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
+ if (!type)
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL_VOLUME_TYPE);
+
+ result = ocf_volume_init(volume, type, &new_uuid, false);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ context->flags.volume_inited = true;
+
+ if (cfg->user_metadata.data && cfg->user_metadata.size > 0) {
+ result = ocf_mngt_core_set_user_metadata(core,
+ cfg->user_metadata.data,
+ cfg->user_metadata.size);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+ }
+
+ result = ocf_volume_open(volume, NULL);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ context->flags.volume_opened = true;
+
+ length = ocf_volume_get_length(volume);
+ if (!length)
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_CORE_NOT_AVAIL);
+
+ core->conf_meta->length = length;
+
+ clean_type = cache->conf_meta->cleaning_policy_type;
+ if (ocf_cache_is_device_attached(cache) &&
+ cleaning_policy_ops[clean_type].add_core) {
+ result = cleaning_policy_ops[clean_type].add_core(cache,
+ core_id);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, result);
+
+ context->flags.clean_pol_added = true;
+ }
+
+ /* When adding new core to cache, allocate stat counters */
+ core->counters =
+ env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
+ if (!core->counters)
+ OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
+
+ context->flags.counters_allocated = true;
+
+ /* When adding new core to cache, reset all core/cache statistics */
+ ocf_core_stats_initialize(core);
+ env_atomic_set(&core->runtime_meta->cached_clines, 0);
+ env_atomic_set(&core->runtime_meta->dirty_clines, 0);
+ env_atomic64_set(&core->runtime_meta->dirty_since, 0);
+
+ /* In metadata mark data this core was added into cache */
+ env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
+ core->conf_meta->valid = true;
+ core->added = true;
+ core->opened = true;
+
+ /* Set default cache parameters for sequential */
+ core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
+ core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
+
+ /* Add core sequence number for atomic metadata matching */
+ core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
+ if (core_sequence_no == OCF_SEQ_NO_INVALID)
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_TOO_MANY_CORES);
+
+ core->conf_meta->seq_no = core_sequence_no;
+
+ /* Update super-block with core device addition */
+ ocf_metadata_flush_superblock(cache,
+ _ocf_mngt_cache_add_core_flush_sb_complete, context);
+}
+
+static void ocf_mngt_cache_add_core_init_front_volume(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ int result;
+
+ result = ocf_mngt_core_init_front_volume(context->core);
+ if (result)
+ OCF_PL_FINISH_RET(context->pipeline, result);
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_mngt_cache_add_core_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_cache_add_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+
+ if (error) {
+ _ocf_mngt_cache_add_core_handle_error(context);
+
+ if (error == -OCF_ERR_CORE_NOT_AVAIL) {
+ ocf_cache_log(cache, log_err, "Core %s is zero size\n",
+ context->cfg.name);
+ }
+ ocf_cache_log(cache, log_err, "Adding core %s failed\n",
+ context->cfg.name);
+ goto out;
+ }
+
+ ocf_core_log(core, log_info, "Successfully added\n");
+
+out:
+ context->cmpl(cache, core, context->priv, error);
+ env_vfree(context->cfg.uuid.data);
+ ocf_pipeline_destroy(context->pipeline);
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_try_add_core_pipeline_props = {
+ .priv_size = sizeof(struct ocf_cache_add_core_context),
+ .finish = ocf_mngt_cache_add_core_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_try_add_core_prepare),
+ OCF_PL_STEP(ocf_mngt_cache_try_add_core_insert),
+ OCF_PL_STEP(ocf_mngt_cache_add_core_init_front_volume),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+struct ocf_pipeline_properties ocf_mngt_cache_add_core_pipeline_props = {
+ .priv_size = sizeof(struct ocf_cache_add_core_context),
+ .finish = ocf_mngt_cache_add_core_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_add_core_prepare),
+ OCF_PL_STEP(ocf_mngt_cache_add_core_insert),
+ OCF_PL_STEP(ocf_mngt_cache_add_core_init_front_volume),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_add_core(ocf_cache_t cache,
+ struct ocf_mngt_core_config *cfg,
+ ocf_mngt_cache_add_core_end_t cmpl, void *priv)
+{
+ struct ocf_cache_add_core_context *context;
+ ocf_pipeline_t pipeline;
+ void *data;
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_INVAL);
+
+ if (!env_strnlen(cfg->name, OCF_CORE_NAME_SIZE))
+ OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_INVAL);
+
+ result = ocf_pipeline_create(&pipeline, cache, cfg->try_add ?
+ &ocf_mngt_cache_try_add_core_pipeline_props :
+ &ocf_mngt_cache_add_core_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+ context->cfg = *cfg;
+
+ data = env_vmalloc(cfg->uuid.size);
+ if (!data) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_pipeline;
+ }
+
+ result = env_memcpy(data, cfg->uuid.size, cfg->uuid.data,
+ cfg->uuid.size);
+ if (result)
+ goto err_uuid;
+
+ context->cfg.uuid.data = data;
+
+ OCF_PL_NEXT_RET(pipeline);
+
+err_uuid:
+ env_vfree(data);
+err_pipeline:
+ ocf_pipeline_destroy(context->pipeline);
+ OCF_CMPL_RET(cache, NULL, priv, result);
+}
+
+struct ocf_mngt_cache_remove_core_context {
+ ocf_mngt_cache_remove_core_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ ocf_core_t core;
+ const char *core_name;
+ struct ocf_cleaner_wait_context cleaner_wait;
+};
+
+static void ocf_mngt_cache_remove_core_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (!error) {
+ ocf_cache_log(cache, log_info, "Core %s successfully removed\n",
+ context->core_name);
+ } else {
+ ocf_cache_log(cache, log_err, "Removing core %s failed\n",
+ context->core_name);
+ }
+
+ ocf_cleaner_refcnt_unfreeze(cache);
+
+ context->cmpl(context->priv, error);
+
+ ocf_pipeline_destroy(context->pipeline);
+}
+
+static void ocf_mngt_cache_remove_core_flush_sb_complete(void *priv, int error)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+
+ error = error ? -OCF_ERR_WRITE_CACHE : 0;
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
+}
+
+static void _ocf_mngt_cache_remove_core(ocf_pipeline_t pipeline, void *priv,
+ ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+
+ ocf_core_log(core, log_debug, "Removing core\n");
+
+ /* Deinit everything*/
+ if (ocf_cache_is_device_attached(cache)) {
+ cache_mngt_core_deinit_attached_meta(core);
+ cache_mngt_core_remove_from_cleaning_pol(core);
+ }
+ cache_mngt_core_remove_from_meta(core);
+ cache_mngt_core_remove_from_cache(core);
+ cache_mngt_core_close(core);
+
+ /* Update super-block with core device removal */
+ ocf_metadata_flush_superblock(cache,
+ ocf_mngt_cache_remove_core_flush_sb_complete, context);
+}
+
+static void ocf_mngt_cache_remove_core_wait_cleaning_complete(void *priv)
+{
+ ocf_pipeline_t pipeline = priv;
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_remove_core_wait_cleaning(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (!ocf_cache_is_device_attached(cache))
+ OCF_PL_NEXT_RET(pipeline);
+
+ ocf_cleaner_refcnt_freeze(cache);
+ ocf_cleaner_refcnt_register_zero_cb(cache, &context->cleaner_wait,
+ ocf_mngt_cache_remove_core_wait_cleaning_complete,
+ pipeline);
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_remove_core_pipeline_props = {
+ .priv_size = sizeof(struct ocf_mngt_cache_remove_core_context),
+ .finish = ocf_mngt_cache_remove_core_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_remove_core_wait_cleaning),
+ OCF_PL_STEP(_ocf_mngt_cache_remove_core),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_remove_core(ocf_core_t core,
+ ocf_mngt_cache_remove_core_end_t cmpl, void *priv)
+{
+ struct ocf_mngt_cache_remove_core_context *context;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ int result;
+
+ OCF_CHECK_NULL(core);
+
+ cache = ocf_core_get_cache(core);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, -OCF_ERR_INVAL);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_mngt_cache_remove_core_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+ context->core = core;
+ context->core_name = ocf_core_get_name(core);
+
+ ocf_pipeline_next(pipeline);
+}
+
+struct ocf_mngt_cache_detach_core_context {
+ ocf_mngt_cache_detach_core_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ ocf_core_t core;
+ const char *core_name;
+ struct ocf_cleaner_wait_context cleaner_wait;
+};
+
+static void _ocf_mngt_cache_detach_core(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+ int status;
+
+ ocf_core_log(core, log_debug, "Detaching core\n");
+
+ status = cache_mngt_core_close(core);
+
+ if (status)
+ OCF_PL_FINISH_RET(pipeline, status);
+
+ cache->ocf_core_inactive_count++;
+ env_bit_set(ocf_cache_state_incomplete,
+ &cache->cache_state);
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_detach_core_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (!error) {
+ ocf_cache_log(cache, log_info, "Core %s successfully detached\n",
+ context->core_name);
+ } else {
+ ocf_cache_log(cache, log_err, "Detaching core %s failed\n",
+ context->core_name);
+ }
+
+ ocf_cleaner_refcnt_unfreeze(context->cache);
+
+ context->cmpl(context->priv, error);
+
+ ocf_pipeline_destroy(context->pipeline);
+}
+
+static void ocf_mngt_cache_detach_core_wait_cleaning_complete(void *priv)
+{
+ ocf_pipeline_t pipeline = priv;
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_mngt_cache_detach_core_wait_cleaning(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_remove_core_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (!ocf_cache_is_device_attached(cache))
+ OCF_PL_NEXT_RET(pipeline);
+
+ ocf_cleaner_refcnt_freeze(cache);
+ ocf_cleaner_refcnt_register_zero_cb(cache, &context->cleaner_wait,
+ ocf_mngt_cache_detach_core_wait_cleaning_complete,
+ pipeline);
+}
+
+struct ocf_pipeline_properties ocf_mngt_cache_detach_core_pipeline_props = {
+ .priv_size = sizeof(struct ocf_mngt_cache_detach_core_context),
+ .finish = ocf_mngt_cache_detach_core_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_mngt_cache_detach_core_wait_cleaning),
+ OCF_PL_STEP(_ocf_mngt_cache_detach_core),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_detach_core(ocf_core_t core,
+ ocf_mngt_cache_detach_core_end_t cmpl, void *priv)
+{
+ struct ocf_mngt_cache_detach_core_context *context;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+ int result;
+
+ OCF_CHECK_NULL(core);
+
+ cache = ocf_core_get_cache(core);
+
+ if (!cache->mngt_queue)
+ OCF_CMPL_RET(cache, -OCF_ERR_INVAL);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_mngt_cache_detach_core_pipeline_props);
+ if (result)
+ OCF_CMPL_RET(priv, result);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+ context->core = core;
+ context->core_name = ocf_core_get_name(core);
+
+ ocf_pipeline_next(pipeline);
+}
+
+int ocf_mngt_core_set_uuid(ocf_core_t core, const struct ocf_volume_uuid *uuid)
+{
+ struct ocf_volume_uuid *current_uuid;
+ int result;
+ int diff;
+
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(uuid);
+ OCF_CHECK_NULL(uuid->data);
+
+ current_uuid = &ocf_core_get_volume(core)->uuid;
+
+ result = env_memcmp(current_uuid->data, current_uuid->size,
+ uuid->data, uuid->size, &diff);
+ if (result)
+ return result;
+
+ if (!diff) {
+ /* UUIDs are identical */
+ return 0;
+ }
+
+ result = ocf_mngt_core_set_uuid_metadata(core, uuid, NULL);
+ if (result)
+ return result;
+
+ ocf_volume_set_uuid(&core->volume, uuid);
+
+ return result;
+}
+
+int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
+{
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(data);
+
+ if (size > OCF_CORE_USER_DATA_SIZE)
+ return -EINVAL;
+
+ return env_memcpy(core->conf_meta->user_data,
+ OCF_CORE_USER_DATA_SIZE, data, size);
+}
+
+int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
+{
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(data);
+
+ if (size > sizeof(core->conf_meta->user_data))
+ return -EINVAL;
+
+ return env_memcpy(data, size, core->conf_meta->user_data,
+ OCF_CORE_USER_DATA_SIZE);
+}
+
+static int _cache_mngt_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
+{
+ uint32_t threshold = *(uint32_t*) cntx;
+ uint32_t threshold_old = core->conf_meta->seq_cutoff_threshold;
+
+ if (threshold_old == threshold) {
+ ocf_core_log(core, log_info,
+ "Sequential cutoff threshold %u bytes is "
+ "already set\n", threshold);
+ return 0;
+ }
+ core->conf_meta->seq_cutoff_threshold = threshold;
+
+ ocf_core_log(core, log_info, "Changing sequential cutoff "
+ "threshold from %u to %u bytes successful\n",
+ threshold_old, threshold);
+
+ return 0;
+}
+
+int ocf_mngt_core_set_seq_cutoff_threshold(ocf_core_t core, uint32_t thresh)
+{
+ OCF_CHECK_NULL(core);
+
+ return _cache_mngt_set_core_seq_cutoff_threshold(core, &thresh);
+}
+
+int ocf_mngt_core_set_seq_cutoff_threshold_all(ocf_cache_t cache,
+ uint32_t thresh)
+{
+ OCF_CHECK_NULL(cache);
+
+ return ocf_core_visit(cache, _cache_mngt_set_core_seq_cutoff_threshold,
+ &thresh, true);
+}
+
+int ocf_mngt_core_get_seq_cutoff_threshold(ocf_core_t core, uint32_t *thresh)
+{
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(thresh);
+
+ *thresh = ocf_core_get_seq_cutoff_threshold(core);
+
+ return 0;
+}
+
+static const char *_ocf_seq_cutoff_policy_names[ocf_seq_cutoff_policy_max] = {
+ [ocf_seq_cutoff_policy_always] = "always",
+ [ocf_seq_cutoff_policy_full] = "full",
+ [ocf_seq_cutoff_policy_never] = "never",
+};
+
+static const char *_cache_mngt_seq_cutoff_policy_get_name(
+ ocf_seq_cutoff_policy policy)
+{
+ if (policy < 0 || policy >= ocf_seq_cutoff_policy_max)
+ return NULL;
+
+ return _ocf_seq_cutoff_policy_names[policy];
+}
+
+static int _cache_mngt_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
+{
+ ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx;
+ uint32_t policy_old = core->conf_meta->seq_cutoff_policy;
+
+ if (policy_old == policy) {
+ ocf_core_log(core, log_info,
+ "Sequential cutoff policy %s is already set\n",
+ _cache_mngt_seq_cutoff_policy_get_name(policy));
+ return 0;
+ }
+
+ if (policy < 0 || policy >= ocf_seq_cutoff_policy_max) {
+ ocf_core_log(core, log_info,
+ "Wrong sequential cutoff policy!\n");
+ return -OCF_ERR_INVAL;
+ }
+
+ core->conf_meta->seq_cutoff_policy = policy;
+
+ ocf_core_log(core, log_info,
+ "Changing sequential cutoff policy from %s to %s\n",
+ _cache_mngt_seq_cutoff_policy_get_name(policy_old),
+ _cache_mngt_seq_cutoff_policy_get_name(policy));
+
+ return 0;
+}
+
+int ocf_mngt_core_set_seq_cutoff_policy(ocf_core_t core,
+ ocf_seq_cutoff_policy policy)
+{
+ OCF_CHECK_NULL(core);
+
+ return _cache_mngt_set_core_seq_cutoff_policy(core, &policy);
+}
+int ocf_mngt_core_set_seq_cutoff_policy_all(ocf_cache_t cache,
+ ocf_seq_cutoff_policy policy)
+{
+ OCF_CHECK_NULL(cache);
+
+ return ocf_core_visit(cache, _cache_mngt_set_core_seq_cutoff_policy,
+ &policy, true);
+}
+
+int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core,
+ ocf_seq_cutoff_policy *policy)
+{
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(policy);
+
+ *policy = ocf_core_get_seq_cutoff_policy(core);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_core_pool.c b/src/spdk/ocf/src/mngt/ocf_mngt_core_pool.c
new file mode 100644
index 000000000..ae7e9eac6
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_core_pool.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_priv.h"
+#include "../ocf_core_priv.h"
+#include "../ocf_ctx_priv.h"
+
+void ocf_mngt_core_pool_init(ocf_ctx_t ctx)
+{
+ OCF_CHECK_NULL(ctx);
+ INIT_LIST_HEAD(&ctx->core_pool.core_pool_head);
+}
+
+int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx)
+{
+ int count;
+ OCF_CHECK_NULL(ctx);
+ env_rmutex_lock(&ctx->lock);
+ count = ctx->core_pool.core_pool_count;
+ env_rmutex_unlock(&ctx->lock);
+ return count;
+}
+
+int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type)
+{
+ ocf_volume_t volume;
+
+ int result = 0;
+
+ OCF_CHECK_NULL(ctx);
+
+ result = ocf_ctx_volume_create(ctx, &volume, uuid, type);
+ if (result)
+ return result;
+
+ result = ocf_volume_open(volume, NULL);
+ if (result) {
+ ocf_volume_deinit(volume);
+ return result;
+ }
+
+ env_rmutex_lock(&ctx->lock);
+ list_add(&volume->core_pool_item, &ctx->core_pool.core_pool_head);
+ ctx->core_pool.core_pool_count++;
+ env_rmutex_unlock(&ctx->lock);
+ return result;
+}
+
+int ocf_mngt_core_pool_visit(ocf_ctx_t ctx,
+ int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx)
+{
+ int result = 0;
+ ocf_volume_t svolume;
+
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(visitor);
+
+ env_rmutex_lock(&ctx->lock);
+ list_for_each_entry(svolume, &ctx->core_pool.core_pool_head,
+ core_pool_item) {
+ result = visitor(&svolume->uuid, visitor_ctx);
+ if (result)
+ break;
+ }
+ env_rmutex_unlock(&ctx->lock);
+ return result;
+}
+
+ocf_volume_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid,
+ ocf_volume_type_t type)
+{
+ ocf_volume_t svolume;
+
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(uuid);
+ OCF_CHECK_NULL(uuid->data);
+
+ list_for_each_entry(svolume, &ctx->core_pool.core_pool_head,
+ core_pool_item) {
+ if (svolume->type == type && !env_strncmp(svolume->uuid.data,
+ svolume->uuid.size, uuid->data, uuid->size)) {
+ return svolume;
+ }
+ }
+
+ return NULL;
+}
+
+void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(volume);
+ env_rmutex_lock(&ctx->lock);
+ ctx->core_pool.core_pool_count--;
+ list_del(&volume->core_pool_item);
+ env_rmutex_unlock(&ctx->lock);
+ ocf_volume_destroy(volume);
+}
+
+void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx)
+{
+ ocf_volume_t svolume, tvolume;
+
+ OCF_CHECK_NULL(ctx);
+
+ list_for_each_entry_safe(svolume, tvolume, &ctx->core_pool.core_pool_head,
+ core_pool_item) {
+ ocf_volume_close(svolume);
+ ocf_mngt_core_pool_remove(ctx, svolume);
+ }
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_core_pool_priv.h b/src/spdk/ocf/src/mngt/ocf_mngt_core_pool_priv.h
new file mode 100644
index 000000000..c3a184787
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_core_pool_priv.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_MNGT_CORE_POOL_PRIV_H__
+#define __OCF_MNGT_CORE_POOL_PRIV_H__
+
+#include "ocf/ocf.h"
+
+void ocf_mngt_core_pool_init(ocf_ctx_t ctx);
+
+void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx);
+
+#endif /* __OCF_MNGT_CORE_POOL_PRIV_H__ */
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_core_priv.h b/src/spdk/ocf/src/mngt/ocf_mngt_core_priv.h
new file mode 100644
index 000000000..4d0c4e1e2
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_core_priv.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_MNGT_CORE_PRIV_H__
+#define __OCF_MNGT_CORE_PRIV_H__
+
+#include "../ocf_core_priv.h"
+
+int ocf_mngt_core_init_front_volume(ocf_core_t core);
+
+void ocf_mngt_core_clear_uuid_metadata(ocf_core_t core);
+
+#endif /* __OCF_MNGT_CORE_PRIV_H__ */
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_flush.c b/src/spdk/ocf/src/mngt/ocf_mngt_flush.c
new file mode 100644
index 000000000..78e63c2fa
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_flush.c
@@ -0,0 +1,999 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_priv.h"
+#include "../metadata/metadata.h"
+#include "../cleaning/cleaning.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_pipeline.h"
+#include "../utils/utils_refcnt.h"
+#include "../ocf_request.h"
+#include "../ocf_def_priv.h"
+
+struct ocf_mngt_cache_flush_context;
+typedef void (*ocf_flush_complete_t)(struct ocf_mngt_cache_flush_context *, int);
+
+struct flush_containers_context
+{
+ /* array of container descriptors */
+ struct flush_container *fctbl;
+ /* fctbl array size */
+ uint32_t fcnum;
+ /* shared error for all concurrent container flushes */
+ env_atomic error;
+ /* number of outstanding container flushes */
+ env_atomic count;
+ /* first container flush to notice interrupt sets this to 1 */
+ env_atomic interrupt_seen;
+ /* completion to be called after all containers are flushed */
+ ocf_flush_complete_t complete;
+};
+
+/* common struct for cache/core flush/purge pipeline priv */
+struct ocf_mngt_cache_flush_context
+{
+ /* pipeline for flush / purge */
+ ocf_pipeline_t pipeline;
+ /* target cache */
+ ocf_cache_t cache;
+ /* target core */
+ ocf_core_t core;
+
+ struct {
+ bool lock : 1;
+ bool freeze : 1;
+ } flags;
+
+ /* management operation identifier */
+ enum {
+ flush_cache = 0,
+ flush_core,
+ purge_cache,
+ purge_core
+ } op;
+
+ /* ocf management entry point completion */
+ union {
+ ocf_mngt_cache_flush_end_t flush_cache;
+ ocf_mngt_core_flush_end_t flush_core;
+ ocf_mngt_cache_purge_end_t purge_cache;
+ ocf_mngt_core_purge_end_t purge_core;
+ } cmpl;
+
+ /* completion pivate data */
+ void *priv;
+
+ /* purge parameters */
+ struct {
+ uint64_t end_byte;
+ uint64_t core_id;
+ } purge;
+
+ /* context for flush containers logic */
+ struct flush_containers_context fcs;
+};
+
+static void _ocf_mngt_begin_flush_complete(void *priv)
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_begin_flush(ocf_pipeline_t pipeline, void *priv,
+ ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int result;
+
+ /* FIXME: need mechanism for async waiting for outstanding flushes to
+ * finish */
+ result = env_mutex_trylock(&cache->flush_mutex);
+ if (result)
+ OCF_PL_FINISH_RET(pipeline, -OCF_ERR_FLUSH_IN_PROGRESS);
+ context->flags.lock = true;
+
+ ocf_refcnt_freeze(&cache->refcnt.dirty);
+ context->flags.freeze = true;
+
+ ocf_refcnt_register_zero_cb(&cache->refcnt.dirty,
+ _ocf_mngt_begin_flush_complete, context);
+}
+
+bool ocf_mngt_core_is_dirty(ocf_core_t core)
+{
+ return !!env_atomic_read(&core->runtime_meta->dirty_clines);
+}
+
+bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
+{
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ OCF_CHECK_NULL(cache);
+
+ for_each_core(cache, core, core_id) {
+ if (ocf_mngt_core_is_dirty(core))
+ return true;
+ }
+
+ return false;
+}
+
+/************************FLUSH CORE CODE**************************************/
+/* Returns:
+ * 0 if OK and tbl & num is filled:
+ * * tbl - table with sectors&cacheline
+ * * num - number of items in this table.
+ * other value means error.
+ * NOTE:
+ * Table is not sorted.
+ */
+static int _ocf_mngt_get_sectors(ocf_cache_t cache, ocf_core_id_t core_id,
+ struct flush_data **tbl, uint32_t *num)
+{
+ ocf_core_t core = ocf_cache_get_core(cache, core_id);
+ uint64_t core_line;
+ ocf_core_id_t i_core_id;
+ struct flush_data *elem;
+ uint32_t line, dirty_found = 0, dirty_total = 0;
+ unsigned ret = 0;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ dirty_total = env_atomic_read(&core->runtime_meta->dirty_clines);
+ if (!dirty_total) {
+ *num = 0;
+ *tbl = NULL;
+ goto unlock;
+ }
+
+ *tbl = env_vmalloc(dirty_total * sizeof(**tbl));
+ if (*tbl == NULL) {
+ ret = -OCF_ERR_NO_MEM;
+ goto unlock;
+ }
+
+ for (line = 0, elem = *tbl;
+ line < cache->device->collision_table_entries;
+ line++) {
+ ocf_metadata_get_core_info(cache, line, &i_core_id,
+ &core_line);
+
+ if (i_core_id == core_id &&
+ metadata_test_valid_any(cache, line) &&
+ metadata_test_dirty(cache, line)) {
+ /* It's valid and dirty target core cacheline */
+ elem->cache_line = line;
+ elem->core_line = core_line;
+ elem->core_id = i_core_id;
+ elem++;
+ dirty_found++;
+
+ /* stop if all cachelines were found */
+ if (dirty_found == dirty_total)
+ break;
+ }
+
+ if ((line + 1) % 131072 == 0) {
+ ocf_metadata_end_exclusive_access(
+ &cache->metadata.lock);
+ env_cond_resched();
+ ocf_metadata_start_exclusive_access(
+ &cache->metadata.lock);
+ }
+ }
+
+ ocf_core_log(core, log_debug,
+ "%u dirty cache lines to clean\n", dirty_found);
+
+
+ *num = dirty_found;
+
+unlock:
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return ret;
+}
+
+static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
+ struct flush_container **fctbl, uint32_t *fcnum)
+{
+ struct flush_container *fc;
+ struct flush_container *curr;
+ uint32_t *core_revmap;
+ uint32_t num;
+ uint64_t core_line;
+ ocf_core_id_t core_id;
+ ocf_core_t core;
+ uint32_t i, j = 0, line;
+ uint32_t dirty_found = 0, dirty_total = 0;
+ int ret = 0;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ /*
+ * TODO: Create containers for each physical device, not for
+ * each core. Cores can be partitions of single device.
+ */
+ num = cache->conf_meta->core_count;
+ if (num == 0) {
+ *fcnum = 0;
+ goto unlock;
+ }
+
+ core_revmap = env_vzalloc(sizeof(*core_revmap) * OCF_CORE_MAX);
+ if (!core_revmap)
+ return -OCF_ERR_NO_MEM;
+
+ /* TODO: Alloc fcs and data tables in single allocation */
+ fc = env_vzalloc(sizeof(**fctbl) * num);
+ if (!fc) {
+ env_vfree(core_revmap);
+ ret = -OCF_ERR_NO_MEM;
+ goto unlock;
+ }
+
+ for_each_core(cache, core, core_id) {
+ fc[j].core_id = core_id;
+ core_revmap[core_id] = j;
+
+ /* Check for dirty blocks */
+ fc[j].count = env_atomic_read(
+ &core->runtime_meta->dirty_clines);
+ dirty_total += fc[j].count;
+
+ if (fc[j].count) {
+ fc[j].flush_data = env_vmalloc(fc[j].count *
+ sizeof(*fc[j].flush_data));
+ }
+
+ if (++j == cache->conf_meta->core_count)
+ break;
+ }
+
+ if (!dirty_total) {
+ env_vfree(core_revmap);
+ env_vfree(fc);
+ *fcnum = 0;
+ goto unlock;
+ }
+
+ for (line = 0; line < cache->device->collision_table_entries; line++) {
+ ocf_metadata_get_core_info(cache, line, &core_id, &core_line);
+
+ if (metadata_test_valid_any(cache, line) &&
+ metadata_test_dirty(cache, line)) {
+ curr = &fc[core_revmap[core_id]];
+
+ ENV_BUG_ON(curr->iter >= curr->count);
+
+ /* It's core_id cacheline and it's valid and it's dirty! */
+ curr->flush_data[curr->iter].cache_line = line;
+ curr->flush_data[curr->iter].core_line = core_line;
+ curr->flush_data[curr->iter].core_id = core_id;
+ curr->iter++;
+ dirty_found++;
+
+ /* stop if all cachelines were found */
+ if (dirty_found == dirty_total)
+ break;
+ }
+
+ if ((line + 1) % 131072 == 0) {
+ ocf_metadata_end_exclusive_access(
+ &cache->metadata.lock);
+ env_cond_resched();
+ ocf_metadata_start_exclusive_access(
+ &cache->metadata.lock);
+ }
+ }
+
+ if (dirty_total != dirty_found) {
+ for (i = 0; i < num; i++)
+ fc[i].count = fc[i].iter;
+ }
+
+ for (i = 0; i < num; i++)
+ fc[i].iter = 0;
+
+ env_vfree(core_revmap);
+ *fctbl = fc;
+ *fcnum = num;
+
+unlock:
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ return ret;
+}
+
+static void _ocf_mngt_free_flush_containers(struct flush_container *fctbl,
+ uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ env_vfree(fctbl[i].flush_data);
+ env_vfree(fctbl);
+}
+
+/*
+ * OCF will try to guess disk speed etc. and adjust flushing block
+ * size accordingly, however these bounds shall be respected regardless
+ * of disk speed, cache line size configured etc.
+ */
+#define OCF_MNG_FLUSH_MIN (4*MiB / ocf_line_size(cache))
+#define OCF_MNG_FLUSH_MAX (100*MiB / ocf_line_size(cache))
+
+static void _ocf_mngt_flush_portion(struct flush_container *fc)
+{
+ ocf_cache_t cache = fc->cache;
+ uint64_t flush_portion_div;
+ uint32_t curr_count;
+
+ flush_portion_div = env_ticks_to_msecs(fc->ticks2 - fc->ticks1);
+ if (unlikely(!flush_portion_div))
+ flush_portion_div = 1;
+
+ fc->flush_portion = fc->flush_portion * 1000 / flush_portion_div;
+ fc->flush_portion &= ~0x3ffULL;
+
+ /* regardless those calculations, limit flush portion to be
+ * between OCF_MNG_FLUSH_MIN and OCF_MNG_FLUSH_MAX
+ */
+ fc->flush_portion = OCF_MIN(fc->flush_portion, OCF_MNG_FLUSH_MAX);
+ fc->flush_portion = OCF_MAX(fc->flush_portion, OCF_MNG_FLUSH_MIN);
+
+ curr_count = OCF_MIN(fc->count - fc->iter, fc->flush_portion);
+
+ ocf_cleaner_do_flush_data_async(fc->cache,
+ &fc->flush_data[fc->iter],
+ curr_count, &fc->attribs);
+
+ fc->iter += curr_count;
+}
+
+static void _ocf_mngt_flush_portion_end(void *private_data, int error)
+{
+ struct flush_container *fc = private_data;
+ struct ocf_mngt_cache_flush_context *context = fc->context;
+ struct flush_containers_context *fsc = &context->fcs;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = &cache->core[fc->core_id];
+ bool first_interrupt;
+
+ env_atomic_set(&core->flushed, fc->iter);
+
+ fc->ticks2 = env_get_tick_count();
+
+ env_atomic_cmpxchg(&fsc->error, 0, error);
+
+ if (cache->flushing_interrupted) {
+ first_interrupt = !env_atomic_cmpxchg(
+ &fsc->interrupt_seen, 0, 1);
+ if (first_interrupt) {
+ ocf_cache_log(cache, log_info,
+ "Flushing interrupted by user\n");
+ env_atomic_cmpxchg(&fsc->error, 0,
+ -OCF_ERR_FLUSHING_INTERRUPTED);
+ }
+ }
+
+ if (env_atomic_read(&fsc->error) || fc->iter == fc->count) {
+ ocf_req_put(fc->req);
+ fc->end(context);
+ return;
+ }
+
+ ocf_engine_push_req_back(fc->req, false);
+}
+
+
+static int _ofc_flush_container_step(struct ocf_request *req)
+{
+ struct flush_container *fc = req->priv;
+ ocf_cache_t cache = fc->cache;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+ _ocf_mngt_flush_portion(fc);
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_flush_portion = {
+ .read = _ofc_flush_container_step,
+ .write = _ofc_flush_container_step,
+};
+
+static void _ocf_mngt_flush_container(
+ struct ocf_mngt_cache_flush_context *context,
+ struct flush_container *fc, ocf_flush_containter_coplete_t end)
+{
+ ocf_cache_t cache = context->cache;
+ struct ocf_request *req;
+ int error = 0;
+
+ if (!fc->count)
+ goto finish;
+
+ fc->end = end;
+ fc->context = context;
+
+ req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
+ if (!req) {
+ error = OCF_ERR_NO_MEM;
+ goto finish;
+ }
+
+ req->info.internal = true;
+ req->io_if = &_io_if_flush_portion;
+ req->priv = fc;
+
+ fc->req = req;
+ fc->attribs.cache_line_lock = true;
+ fc->attribs.cmpl_context = fc;
+ fc->attribs.cmpl_fn = _ocf_mngt_flush_portion_end;
+ fc->attribs.io_queue = cache->mngt_queue;
+ fc->cache = cache;
+ fc->flush_portion = OCF_MNG_FLUSH_MIN;
+ fc->ticks1 = 0;
+ fc->ticks2 = UINT_MAX;
+
+ ocf_engine_push_req_back(fc->req, true);
+ return;
+
+finish:
+ env_atomic_cmpxchg(&context->fcs.error, 0, error);
+ end(context);
+}
+
+void _ocf_flush_container_complete(void *ctx)
+{
+ struct ocf_mngt_cache_flush_context *context = ctx;
+
+ if (env_atomic_dec_return(&context->fcs.count)) {
+ return;
+ }
+
+ _ocf_mngt_free_flush_containers(context->fcs.fctbl,
+ context->fcs.fcnum);
+
+ context->fcs.complete(context,
+ env_atomic_read(&context->fcs.error));
+}
+
+static void _ocf_mngt_flush_containers(
+ struct ocf_mngt_cache_flush_context *context,
+ struct flush_container *fctbl,
+ uint32_t fcnum, ocf_flush_complete_t complete)
+{
+ int i;
+
+ if (fcnum == 0) {
+ complete(context, 0);
+ return;
+ }
+
+ /* Sort data. Smallest sectors first (0...n). */
+ ocf_cleaner_sort_flush_containers(fctbl, fcnum);
+
+ env_atomic_set(&context->fcs.error, 0);
+ env_atomic_set(&context->fcs.count, 1);
+ context->fcs.complete = complete;
+ context->fcs.fctbl = fctbl;
+ context->fcs.fcnum = fcnum;
+
+ for (i = 0; i < fcnum; i++) {
+ env_atomic_inc(&context->fcs.count);
+ _ocf_mngt_flush_container(context, &fctbl[i],
+ _ocf_flush_container_complete);
+ }
+
+ _ocf_flush_container_complete(context);
+}
+
+
+static void _ocf_mngt_flush_core(
+ struct ocf_mngt_cache_flush_context *context,
+ ocf_flush_complete_t complete)
+{
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+ struct flush_container *fc;
+ int ret;
+
+ fc = env_vzalloc(sizeof(*fc));
+ if (!fc) {
+ complete(context, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ ret = _ocf_mngt_get_sectors(cache, core_id,
+ &fc->flush_data, &fc->count);
+ if (ret) {
+ ocf_core_log(core, log_err, "Flushing operation aborted, "
+ "no memory\n");
+ env_vfree(fc);
+ complete(context, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ fc->core_id = core_id;
+ fc->iter = 0;
+
+ _ocf_mngt_flush_containers(context, fc, 1, complete);
+}
+
+static void _ocf_mngt_flush_all_cores(
+ struct ocf_mngt_cache_flush_context *context,
+ ocf_flush_complete_t complete)
+{
+ ocf_cache_t cache = context->cache;
+ struct flush_container *fctbl = NULL;
+ uint32_t fcnum = 0;
+ int ret;
+
+ if (context->op == flush_cache)
+ ocf_cache_log(cache, log_info, "Flushing cache\n");
+ else if (context->op == purge_cache)
+ ocf_cache_log(cache, log_info, "Purging cache\n");
+
+ env_atomic_set(&cache->flush_in_progress, 1);
+
+ /* Get all 'dirty' sectors for all cores */
+ ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum);
+ if (ret) {
+ ocf_cache_log(cache, log_err, "Flushing operation aborted, "
+ "no memory\n");
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ complete(context, ret);
+ return;
+ }
+
+ _ocf_mngt_flush_containers(context, fctbl, fcnum, complete);
+}
+
+static void _ocf_mngt_flush_all_cores_complete(
+ struct ocf_mngt_cache_flush_context *context, int error)
+{
+ ocf_cache_t cache = context->cache;
+ uint32_t i, j;
+
+ env_atomic_set(&cache->flush_in_progress, 0);
+
+ for (i = 0, j = 0; i < OCF_CORE_MAX; i++) {
+ if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
+ continue;
+
+ env_atomic_set(&cache->core[i].flushed, 0);
+
+ if (++j == cache->conf_meta->core_count)
+ break;
+ }
+
+ if (error)
+ OCF_PL_FINISH_RET(context->pipeline, error);
+
+ if (context->op == flush_cache)
+ ocf_cache_log(cache, log_info, "Flushing cache completed\n");
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+/**
+ * Flush all the dirty data stored on cache (all the cores attached to it)
+ */
+static void _ocf_mngt_cache_flush(ocf_pipeline_t pipeline, void *priv,
+ ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+
+ context->cache->flushing_interrupted = 0;
+ _ocf_mngt_flush_all_cores(context, _ocf_mngt_flush_all_cores_complete);
+}
+
+static void _ocf_mngt_flush_finish(ocf_pipeline_t pipeline, void *priv,
+ int error)
+
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+
+ if (context->flags.freeze)
+ ocf_refcnt_unfreeze(&cache->refcnt.dirty);
+
+ if (context->flags.lock)
+ env_mutex_unlock(&cache->flush_mutex);
+
+ switch (context->op) {
+ case flush_cache:
+ context->cmpl.flush_cache(cache, context->priv, error);
+ break;
+ case flush_core:
+ context->cmpl.flush_core(core, context->priv, error);
+ break;
+ case purge_cache:
+ context->cmpl.purge_cache(cache, context->priv, error);
+ break;
+ case purge_core:
+ context->cmpl.purge_core(core, context->priv, error);
+ break;
+ default:
+ ENV_BUG();
+ }
+
+ ocf_pipeline_destroy(pipeline);
+}
+
+static struct ocf_pipeline_properties _ocf_mngt_cache_flush_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_flush_context),
+ .finish = _ocf_mngt_flush_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_begin_flush),
+ OCF_PL_STEP(_ocf_mngt_cache_flush),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_flush(ocf_cache_t cache,
+ ocf_mngt_cache_flush_end_t cmpl, void *priv)
+{
+ ocf_pipeline_t pipeline;
+ struct ocf_mngt_cache_flush_context *context;
+ int result = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!ocf_cache_is_device_attached(cache)) {
+ ocf_cache_log(cache, log_err, "Cannot flush cache - "
+ "cache device is detached\n");
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+ }
+
+ if (ocf_cache_is_incomplete(cache)) {
+ ocf_cache_log(cache, log_err, "Cannot flush cache - "
+ "cache is in incomplete state\n");
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_CACHE_IN_INCOMPLETE_STATE);
+ }
+
+ if (!cache->mngt_queue) {
+ ocf_cache_log(cache, log_err,
+ "Cannot flush cache - no flush queue set\n");
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+ }
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &_ocf_mngt_cache_flush_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->pipeline = pipeline;
+ context->cmpl.flush_cache = cmpl;
+ context->priv = priv;
+ context->cache = cache;
+ context->op = flush_cache;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_flush_core_complete(
+ struct ocf_mngt_cache_flush_context *context, int error)
+{
+ ocf_cache_t cache = context->cache;
+ ocf_core_t core = context->core;
+
+ env_atomic_set(&core->flushed, 0);
+
+ if (error)
+ OCF_PL_FINISH_RET(context->pipeline, error);
+
+ if (context->op == flush_core)
+ ocf_cache_log(cache, log_info, "Flushing completed\n");
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_core_flush(ocf_pipeline_t pipeline, void *priv,
+ ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (context->op == flush_core)
+ ocf_cache_log(cache, log_info, "Flushing core\n");
+ else if (context->op == purge_core)
+ ocf_cache_log(cache, log_info, "Purging core\n");
+
+ context->cache->flushing_interrupted = 0;
+ _ocf_mngt_flush_core(context, _ocf_mngt_flush_core_complete);
+}
+
+static
+struct ocf_pipeline_properties _ocf_mngt_core_flush_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_flush_context),
+ .finish = _ocf_mngt_flush_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_begin_flush),
+ OCF_PL_STEP(_ocf_mngt_core_flush),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_core_flush(ocf_core_t core,
+ ocf_mngt_core_flush_end_t cmpl, void *priv)
+{
+ ocf_pipeline_t pipeline;
+ struct ocf_mngt_cache_flush_context *context;
+ ocf_cache_t cache;
+ int result;
+
+ OCF_CHECK_NULL(core);
+
+ cache = ocf_core_get_cache(core);
+
+ if (!ocf_cache_is_device_attached(cache)) {
+ ocf_cache_log(cache, log_err, "Cannot flush core - "
+ "cache device is detached\n");
+ OCF_CMPL_RET(core, priv, -OCF_ERR_INVAL);
+ }
+
+ if (!core->opened) {
+ ocf_core_log(core, log_err, "Cannot flush - core is in "
+ "inactive state\n");
+ OCF_CMPL_RET(core, priv, -OCF_ERR_CORE_IN_INACTIVE_STATE);
+ }
+
+ if (!cache->mngt_queue) {
+ ocf_core_log(core, log_err,
+ "Cannot flush core - no flush queue set\n");
+ OCF_CMPL_RET(core, priv, -OCF_ERR_INVAL);
+ }
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &_ocf_mngt_core_flush_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(core, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->pipeline = pipeline;
+ context->cmpl.flush_core = cmpl;
+ context->priv = priv;
+ context->cache = cache;
+ context->op = flush_core;
+ context->core = core;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void _ocf_mngt_cache_invalidate(ocf_pipeline_t pipeline, void *priv,
+ ocf_pipeline_arg_t arg)
+{
+ struct ocf_mngt_cache_flush_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int result;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+ result = ocf_metadata_sparse_range(cache, context->purge.core_id, 0,
+ context->purge.end_byte);
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result);
+}
+
+static
+struct ocf_pipeline_properties _ocf_mngt_cache_purge_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_flush_context),
+ .finish = _ocf_mngt_flush_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_begin_flush),
+ OCF_PL_STEP(_ocf_mngt_cache_flush),
+ OCF_PL_STEP(_ocf_mngt_cache_invalidate),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_cache_purge(ocf_cache_t cache,
+ ocf_mngt_cache_purge_end_t cmpl, void *priv)
+{
+ ocf_pipeline_t pipeline;
+ int result = 0;
+ struct ocf_mngt_cache_flush_context *context;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!cache->mngt_queue) {
+ ocf_cache_log(cache, log_err,
+ "Cannot purge cache - no flush queue set\n");
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
+ }
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &_ocf_mngt_cache_purge_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->pipeline = pipeline;
+ context->cmpl.purge_cache = cmpl;
+ context->priv = priv;
+ context->cache = cache;
+ context->op = purge_cache;
+ context->purge.core_id = OCF_CORE_ID_INVALID;
+ context->purge.end_byte = ~0ULL;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static
+struct ocf_pipeline_properties _ocf_mngt_core_purge_pipeline_properties = {
+ .priv_size = sizeof(struct ocf_mngt_cache_flush_context),
+ .finish = _ocf_mngt_flush_finish,
+ .steps = {
+ OCF_PL_STEP(_ocf_mngt_begin_flush),
+ OCF_PL_STEP(_ocf_mngt_core_flush),
+ OCF_PL_STEP(_ocf_mngt_cache_invalidate),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+void ocf_mngt_core_purge(ocf_core_t core,
+ ocf_mngt_core_purge_end_t cmpl, void *priv)
+{
+ ocf_pipeline_t pipeline;
+ struct ocf_mngt_cache_flush_context *context;
+ ocf_cache_t cache;
+ ocf_core_id_t core_id;
+ int result = 0;
+ uint64_t core_size = ~0ULL;
+
+ OCF_CHECK_NULL(core);
+
+ cache = ocf_core_get_cache(core);
+ core_id = ocf_core_get_id(core);
+
+ if (!cache->mngt_queue) {
+ ocf_core_log(core, log_err,
+ "Cannot purge core - no flush queue set\n");
+ OCF_CMPL_RET(core, priv, -OCF_ERR_INVAL);
+ }
+
+ core_size = ocf_volume_get_length(&cache->core[core_id].volume);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &_ocf_mngt_core_purge_pipeline_properties);
+ if (result)
+ OCF_CMPL_RET(core, priv, -OCF_ERR_NO_MEM);
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->pipeline = pipeline;
+ context->cmpl.purge_core = cmpl;
+ context->priv = priv;
+ context->cache = cache;
+ context->op = purge_core;
+ context->purge.core_id = core_id;
+ context->purge.end_byte = core_size ?: ~0ULL;
+ context->core = core;
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+void ocf_mngt_cache_flush_interrupt(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ ocf_cache_log(cache, log_alert, "Flushing interrupt\n");
+ cache->flushing_interrupted = 1;
+}
+
+int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type)
+{
+ ocf_cleaning_t old_type;
+ int ret = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (type < 0 || type >= ocf_cleaning_max)
+ return -OCF_ERR_INVAL;
+
+ old_type = cache->conf_meta->cleaning_policy_type;
+
+ if (type == old_type) {
+ ocf_cache_log(cache, log_info, "Cleaning policy %s is already "
+ "set\n", cleaning_policy_ops[old_type].name);
+ return 0;
+ }
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ if (cleaning_policy_ops[old_type].deinitialize)
+ cleaning_policy_ops[old_type].deinitialize(cache);
+
+ if (cleaning_policy_ops[type].initialize) {
+ if (cleaning_policy_ops[type].initialize(cache, 1)) {
+ /*
+ * If initialization of new cleaning policy failed,
+ * we set cleaning policy to nop.
+ */
+ type = ocf_cleaning_nop;
+ ret = -OCF_ERR_INVAL;
+ }
+ }
+
+ cache->conf_meta->cleaning_policy_type = type;
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ ocf_cache_log(cache, log_info, "Changing cleaning policy from "
+ "%s to %s\n", cleaning_policy_ops[old_type].name,
+ cleaning_policy_ops[type].name);
+
+ return ret;
+}
+
+int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type)
+{
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(type);
+
+ *type = cache->conf_meta->cleaning_policy_type;
+
+ return 0;
+}
+
+int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type,
+ uint32_t param_id, uint32_t param_value)
+{
+ int ret;
+
+ OCF_CHECK_NULL(cache);
+
+ if (type < 0 || type >= ocf_cleaning_max)
+ return -OCF_ERR_INVAL;
+
+ if (!cleaning_policy_ops[type].set_cleaning_param)
+ return -OCF_ERR_INVAL;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ ret = cleaning_policy_ops[type].set_cleaning_param(cache,
+ param_id, param_value);
+
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return ret;
+}
+
+int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache, ocf_cleaning_t type,
+ uint32_t param_id, uint32_t *param_value)
+{
+ int ret;
+
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(param_value);
+
+ if (type < 0 || type >= ocf_cleaning_max)
+ return -OCF_ERR_INVAL;
+
+ if (!cleaning_policy_ops[type].get_cleaning_param)
+ return -OCF_ERR_INVAL;
+
+ ret = cleaning_policy_ops[type].get_cleaning_param(cache,
+ param_id, param_value);
+
+ return ret;
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_io_class.c b/src/spdk/ocf/src/mngt/ocf_mngt_io_class.c
new file mode 100644
index 000000000..29ee45bc0
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_io_class.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_part.h"
+#include "../eviction/ops.h"
+#include "ocf_env.h"
+
+static uint64_t _ocf_mngt_count_parts_min_size(struct ocf_cache *cache)
+{
+ struct ocf_user_part *part;
+ ocf_part_id_t part_id;
+ uint64_t count = 0;
+
+ for_each_part(cache, part, part_id) {
+ if (ocf_part_is_valid(part))
+ count += part->config->min_size;
+ }
+
+ return count;
+}
+
+int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
+ ocf_part_id_t part_id, const char *name, uint32_t min_size,
+ uint32_t max_size, uint8_t priority, bool valid)
+{
+ uint32_t size;
+
+ if (!name)
+ return -OCF_ERR_INVAL;
+
+ if (part_id >= OCF_IO_CLASS_MAX)
+ return -OCF_ERR_INVAL;
+
+ if (cache->user_parts[part_id].config->flags.valid)
+ return -OCF_ERR_INVAL;
+
+ if (max_size > PARTITION_SIZE_MAX)
+ return -OCF_ERR_INVAL;
+
+ if (env_strnlen(name, OCF_IO_CLASS_NAME_MAX) >=
+ OCF_IO_CLASS_NAME_MAX) {
+ ocf_cache_log(cache, log_info,
+ "Name of the partition is too long\n");
+ return -OCF_ERR_INVAL;
+ }
+
+ size = sizeof(cache->user_parts[part_id].config->name);
+ if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size))
+ return -OCF_ERR_INVAL;
+
+ cache->user_parts[part_id].config->min_size = min_size;
+ cache->user_parts[part_id].config->max_size = max_size;
+ cache->user_parts[part_id].config->priority = priority;
+ cache->user_parts[part_id].config->cache_mode = ocf_cache_mode_max;
+
+ ocf_part_set_valid(cache, part_id, valid);
+ ocf_lst_add(&cache->lst_part, part_id);
+ ocf_part_sort(cache);
+
+ cache->user_parts[part_id].config->flags.added = 1;
+
+ return 0;
+}
+
+static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
+ ocf_part_id_t part_id, uint32_t min, uint32_t max)
+{
+ struct ocf_user_part *part = &cache->user_parts[part_id];
+
+ if (min > max)
+ return -OCF_ERR_INVAL;
+
+ if (_ocf_mngt_count_parts_min_size(cache) + min
+ >= cache->device->collision_table_entries) {
+ /* Illegal configuration in which sum of all min_sizes exceeds
+ * cache size.
+ */
+ return -OCF_ERR_INVAL;
+ }
+
+ if (max > PARTITION_SIZE_MAX)
+ max = PARTITION_SIZE_MAX;
+
+ part->config->min_size = min;
+ part->config->max_size = max;
+
+ return 0;
+}
+
+static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
+ const struct ocf_mngt_io_class_config *cfg)
+{
+ int result = -1;
+ struct ocf_user_part *dest_part;
+
+ ocf_part_id_t part_id = cfg->class_id;
+ const char *name = cfg->name;
+ int16_t prio = cfg->prio;
+ ocf_cache_mode_t cache_mode = cfg->cache_mode;
+ uint32_t min = cfg->min_size;
+ uint32_t max = cfg->max_size;
+
+ OCF_CHECK_NULL(cache->device);
+
+ dest_part = &cache->user_parts[part_id];
+
+ if (!ocf_part_is_added(dest_part)) {
+ ocf_cache_log(cache, log_info, "Setting IO class, id: %u, "
+ "name: '%s' [ ERROR ]\n", part_id, dest_part->config->name);
+ return -OCF_ERR_INVAL;
+ }
+
+ if (!name[0])
+ return -OCF_ERR_INVAL;
+
+ if (part_id == PARTITION_DEFAULT) {
+ /* Special behavior for default partition */
+
+ /* Try set partition size */
+ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
+ ocf_cache_log(cache, log_info,
+ "Setting IO class size, id: %u, name: '%s' "
+ "[ ERROR ]\n", part_id, dest_part->config->name);
+ return -OCF_ERR_INVAL;
+ }
+ ocf_part_set_prio(cache, dest_part, prio);
+ dest_part->config->cache_mode = cache_mode;
+
+ ocf_cache_log(cache, log_info,
+ "Updating unclassified IO class, id: "
+ "%u [ OK ]\n", part_id);
+
+ return 0;
+ }
+
+ /* Setting */
+ result = env_strncpy(dest_part->config->name,
+ sizeof(dest_part->config->name), name,
+ sizeof(dest_part->config->name));
+ if (result)
+ return result;
+
+ /* Try set partition size */
+ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
+ ocf_cache_log(cache, log_info,
+ "Setting IO class size, id: %u, name: '%s' "
+ "[ ERROR ]\n", part_id, dest_part->config->name);
+ return -OCF_ERR_INVAL;
+ }
+
+ if (ocf_part_is_valid(dest_part)) {
+ /* Updating existing */
+ ocf_cache_log(cache, log_info, "Updating existing IO "
+ "class, id: %u, name: '%s' [ OK ]\n",
+ part_id, dest_part->config->name);
+ } else {
+ /* Adding new */
+ ocf_part_set_valid(cache, part_id, true);
+
+ ocf_cache_log(cache, log_info, "Adding new IO class, "
+ "id: %u, name: '%s' [ OK ]\n", part_id,
+ dest_part->config->name);
+ }
+
+ ocf_part_set_prio(cache, dest_part, prio);
+ dest_part->config->cache_mode = cache_mode;
+
+ return result;
+}
+
+static void _ocf_mngt_io_class_remove(ocf_cache_t cache,
+ const struct ocf_mngt_io_class_config *cfg)
+{
+ struct ocf_user_part *dest_part;
+ ocf_part_id_t part_id = cfg->class_id;
+
+ dest_part = &cache->user_parts[part_id];
+
+ OCF_CHECK_NULL(cache->device);
+
+ if (part_id == PARTITION_DEFAULT) {
+ ocf_cache_log(cache, log_info,
+ "Cannot remove unclassified IO class, "
+ "id: %u [ ERROR ]\n", part_id);
+ return;
+ }
+
+ if (!ocf_part_is_valid(dest_part)) {
+ /* Does not exist */
+ return;
+ }
+
+
+ ocf_part_set_valid(cache, part_id, false);
+
+ ocf_cache_log(cache, log_info,
+ "Removing IO class, id: %u [ OK ]\n", part_id);
+}
+
+static int _ocf_mngt_io_class_edit(ocf_cache_t cache,
+ const struct ocf_mngt_io_class_config *cfg)
+{
+ int result = 0;
+
+ if (cfg->name)
+ result = _ocf_mngt_io_class_configure(cache, cfg);
+ else
+ _ocf_mngt_io_class_remove(cache, cfg);
+
+ return result;
+}
+
+static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
+ const struct ocf_mngt_io_class_config *cfg)
+{
+ if (cfg->class_id >= OCF_IO_CLASS_MAX)
+ return -OCF_ERR_INVAL;
+
+ /* Name set to null means particular io_class should be removed */
+ if (!cfg->name)
+ return 0;
+
+ if (cfg->cache_mode < ocf_cache_mode_none ||
+ cfg->cache_mode > ocf_cache_mode_max) {
+ return -OCF_ERR_INVAL;
+ }
+
+ if (!ocf_part_is_name_valid(cfg->name)) {
+ ocf_cache_log(cache, log_info,
+ "The name of the partition is not valid\n");
+ return -OCF_ERR_INVAL;
+ }
+
+ if (!ocf_part_is_prio_valid(cfg->prio)) {
+ ocf_cache_log(cache, log_info,
+ "Invalid value of the partition priority\n");
+ return -OCF_ERR_INVAL;
+ }
+
+ return 0;
+}
+
+int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
+ const struct ocf_mngt_io_classes_config *cfg)
+{
+ struct ocf_user_part *old_config;
+ int result;
+ int i;
+
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(cfg);
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ result = _ocf_mngt_io_class_validate_cfg(cache, &cfg->config[i]);
+ if (result)
+ return result;
+ }
+
+ old_config = env_malloc(sizeof(cache->user_parts), ENV_MEM_NORMAL);
+ if (!old_config)
+ return -OCF_ERR_NO_MEM;
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+
+ result = env_memcpy(old_config, sizeof(cache->user_parts),
+ cache->user_parts, sizeof(cache->user_parts));
+ if (result)
+ goto out_cpy;
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ result = _ocf_mngt_io_class_edit(cache, &cfg->config[i]);
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Failed to set new io class config\n");
+ goto out_edit;
+ }
+ }
+
+ ocf_part_sort(cache);
+
+out_edit:
+ if (result) {
+ ENV_BUG_ON(env_memcpy(cache->user_parts, sizeof(cache->user_parts),
+ old_config, sizeof(cache->user_parts)));
+ }
+
+out_cpy:
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+ env_free(old_config);
+
+ return result;
+}
diff --git a/src/spdk/ocf/src/mngt/ocf_mngt_misc.c b/src/spdk/ocf/src/mngt/ocf_mngt_misc.c
new file mode 100644
index 000000000..d7a862384
--- /dev/null
+++ b/src/spdk/ocf/src/mngt/ocf_mngt_misc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../ocf_ctx_priv.h"
+
+uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx)
+{
+ struct ocf_cache *cache;
+ uint32_t count = 0;
+
+ OCF_CHECK_NULL(ctx);
+
+ env_rmutex_lock(&ctx->lock);
+
+ /* currently, there are no macros in list.h to get list size.*/
+ list_for_each_entry(cache, &ctx->caches, list)
+ count++;
+
+ env_rmutex_unlock(&ctx->lock);
+
+ return count;
+}
diff --git a/src/spdk/ocf/src/ocf_cache.c b/src/spdk/ocf/src/ocf_cache.c
new file mode 100644
index 000000000..c61a8bd30
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_cache.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+#include "engine/cache_engine.h"
+#include "utils/utils_cache_line.h"
+#include "ocf_request.h"
+#include "utils/utils_part.h"
+#include "ocf_priv.h"
+#include "ocf_cache_priv.h"
+#include "utils/utils_stats.h"
+
+ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache)
+{
+ return cache->device ? &cache->device->volume : NULL;
+}
+
+int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size)
+{
+ OCF_CHECK_NULL(cache);
+ return env_strncpy(cache->conf_meta->name, OCF_CACHE_NAME_SIZE,
+ src, src_size);
+}
+
+const char *ocf_cache_get_name(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return cache->conf_meta->name;
+}
+
+bool ocf_cache_is_incomplete(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return env_bit_test(ocf_cache_state_incomplete, &cache->cache_state);
+}
+
+bool ocf_cache_is_running(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return env_bit_test(ocf_cache_state_running, &cache->cache_state);
+}
+
+bool ocf_cache_is_device_attached(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return !ocf_refcnt_frozen(&cache->refcnt.metadata);
+}
+
+ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+
+ return cache->conf_meta->cache_mode;
+}
+
+static uint32_t _calc_dirty_for(uint64_t dirty_since)
+{
+ return dirty_since ?
+ (env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000)
+ : 0;
+}
+
+int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
+{
+ uint32_t cache_occupancy_total = 0;
+ uint32_t dirty_blocks_total = 0;
+ uint32_t initial_dirty_blocks_total = 0;
+ uint32_t flushed_total = 0;
+ uint32_t curr_dirty_cnt;
+ uint64_t dirty_since = 0;
+ uint32_t init_dirty_cnt;
+ uint64_t core_dirty_since;
+ uint32_t dirty_blocks_inactive = 0;
+ uint32_t cache_occupancy_inactive = 0;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!info)
+ return -OCF_ERR_INVAL;
+
+ ENV_BUG_ON(env_memset(info, sizeof(*info), 0));
+
+ _ocf_stats_zero(&info->inactive);
+
+ info->attached = ocf_cache_is_device_attached(cache);
+ if (info->attached) {
+ info->volume_type = ocf_ctx_get_volume_type_id(cache->owner,
+ cache->device->volume.type);
+ info->size = cache->conf_meta->cachelines;
+ }
+ info->core_count = cache->conf_meta->core_count;
+
+ info->cache_mode = ocf_cache_get_mode(cache);
+
+ /* iterate through all possibly valid core objcts, as list of
+ * valid objects may be not continuous
+ */
+ for_each_core(cache, core, core_id) {
+ /* If current dirty blocks exceeds saved initial dirty
+ * blocks then update the latter
+ */
+ curr_dirty_cnt = env_atomic_read(
+ &core->runtime_meta->dirty_clines);
+ init_dirty_cnt = env_atomic_read(
+ &core->runtime_meta->initial_dirty_clines);
+ if (init_dirty_cnt && (curr_dirty_cnt > init_dirty_cnt)) {
+ env_atomic_set(
+ &core->runtime_meta->initial_dirty_clines,
+ env_atomic_read(
+ &core->runtime_meta->dirty_clines));
+ }
+ cache_occupancy_total += env_atomic_read(
+ &core->runtime_meta->cached_clines);
+
+ dirty_blocks_total += env_atomic_read(
+ &core->runtime_meta->dirty_clines);
+ initial_dirty_blocks_total += env_atomic_read(
+ &core->runtime_meta->initial_dirty_clines);
+
+ if (!core->opened) {
+ cache_occupancy_inactive += env_atomic_read(
+ &core->runtime_meta->cached_clines);
+
+ dirty_blocks_inactive += env_atomic_read(
+ &core->runtime_meta->dirty_clines);
+ }
+ core_dirty_since = env_atomic64_read(
+ &core->runtime_meta->dirty_since);
+ if (core_dirty_since) {
+ dirty_since = (dirty_since ?
+ OCF_MIN(dirty_since, core_dirty_since) :
+ core_dirty_since);
+ }
+
+ flushed_total += env_atomic_read(&core->flushed);
+ }
+
+ info->dirty = dirty_blocks_total;
+ info->dirty_initial = initial_dirty_blocks_total;
+ info->occupancy = cache_occupancy_total;
+ info->dirty_for = _calc_dirty_for(dirty_since);
+ info->metadata_end_offset = ocf_cache_is_device_attached(cache) ?
+ cache->device->metadata_offset / PAGE_SIZE : 0;
+
+ info->state = cache->cache_state;
+
+ if (info->attached) {
+ _set(&info->inactive.occupancy,
+ _lines4k(cache_occupancy_inactive, ocf_line_size(cache)),
+ _lines4k(info->size, ocf_line_size(cache)));
+ _set(&info->inactive.clean,
+ _lines4k(cache_occupancy_inactive - dirty_blocks_inactive,
+ ocf_line_size(cache)),
+ _lines4k(cache_occupancy_total, ocf_line_size(cache)));
+ _set(&info->inactive.dirty,
+ _lines4k(dirty_blocks_inactive, ocf_line_size(cache)),
+ _lines4k(cache_occupancy_total, ocf_line_size(cache)));
+ }
+
+ info->flushed = (env_atomic_read(&cache->flush_in_progress)) ?
+ flushed_total : 0;
+
+ info->fallback_pt.status = ocf_fallback_pt_is_on(cache);
+ info->fallback_pt.error_counter =
+ env_atomic_read(&cache->fallback_pt_error_counter);
+
+ info->eviction_policy = cache->conf_meta->eviction_policy_type;
+ info->cleaning_policy = cache->conf_meta->cleaning_policy_type;
+ info->promotion_policy = cache->conf_meta->promotion_policy_type;
+ info->metadata_footprint = ocf_cache_is_device_attached(cache) ?
+ ocf_metadata_size_of(cache) : 0;
+ info->cache_line_size = ocf_line_size(cache);
+
+ return 0;
+}
+
+const struct ocf_volume_uuid *ocf_cache_get_uuid(ocf_cache_t cache)
+{
+ if (!ocf_cache_is_device_attached(cache))
+ return NULL;
+
+ return ocf_volume_get_uuid(ocf_cache_get_volume(cache));
+}
+
+uint8_t ocf_cache_get_type_id(ocf_cache_t cache)
+{
+ if (!ocf_cache_is_device_attached(cache))
+ return 0xff;
+
+ return ocf_ctx_get_volume_type_id(ocf_cache_get_ctx(cache),
+ ocf_volume_get_type(ocf_cache_get_volume(cache)));
+}
+
+ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return ocf_line_size(cache);
+}
+
+uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes)
+{
+ OCF_CHECK_NULL(cache);
+ return ocf_bytes_2_lines(cache, bytes);
+}
+
+uint32_t ocf_cache_get_core_count(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return cache->conf_meta->core_count;
+}
+
+ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return cache->owner;
+}
+
+void ocf_cache_set_priv(ocf_cache_t cache, void *priv)
+{
+ OCF_CHECK_NULL(cache);
+ cache->priv = priv;
+}
+
+void *ocf_cache_get_priv(ocf_cache_t cache)
+{
+ OCF_CHECK_NULL(cache);
+ return cache->priv;
+}
diff --git a/src/spdk/ocf/src/ocf_cache_priv.h b/src/spdk/ocf/src/ocf_cache_priv.h
new file mode 100644
index 000000000..17488ef1e
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_cache_priv.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_CACHE_PRIV_H__
+#define __OCF_CACHE_PRIV_H__
+
+#include "ocf/ocf.h"
+#include "ocf_env.h"
+#include "ocf_volume_priv.h"
+#include "ocf_core_priv.h"
+#include "metadata/metadata_structs.h"
+#include "metadata/metadata_partition_structs.h"
+#include "metadata/metadata_updater_priv.h"
+#include "utils/utils_list.h"
+#include "utils/utils_pipeline.h"
+#include "utils/utils_refcnt.h"
+#include "utils/utils_async_lock.h"
+#include "ocf_stats_priv.h"
+#include "cleaning/cleaning.h"
+#include "ocf_logger_priv.h"
+#include "ocf/ocf_trace.h"
+#include "promotion/promotion.h"
+#include "ocf_freelist.h"
+
+#define DIRTY_FLUSHED 1
+#define DIRTY_NOT_FLUSHED 0
+
+/**
+ * @brief Structure used for aggregating trace-related ocf_cache fields
+ */
+struct ocf_trace {
+ /* Placeholder for push_event callback */
+ ocf_trace_callback_t trace_callback;
+
+ /* Telemetry context */
+ void *trace_ctx;
+
+ env_atomic64 trace_seq_ref;
+};
+
+/**
+ * @brief Initialization mode of cache instance
+ */
+enum ocf_mngt_cache_init_mode {
+ /**
+ * @brief Set up an SSD as new caching device
+ */
+ ocf_init_mode_init,
+
+ /**
+ * @brief Set up an SSD as new caching device without saving cache
+ * metadata on SSD.
+ *
+ * When using this initialization mode, after shutdown, loading cache
+ * is not possible
+ */
+ ocf_init_mode_metadata_volatile,
+
+ /**
+ * @brief Load pre-existing SSD cache state and set all parameters
+ * to previous configurations
+ */
+ ocf_init_mode_load,
+};
+
+/* Cache device */
+struct ocf_cache_device {
+ struct ocf_volume volume;
+
+ /* Hash Table contains contains pointer to the entry in
+ * Collision Table so it actually contains collision Table
+ * indexes.
+ * Invalid entry is collision_table_entries.
+ */
+ unsigned int hash_table_entries;
+ unsigned int collision_table_entries;
+
+ int metadata_error;
+ /*!< This field indicates that an error during metadata IO
+ * occurred
+ */
+
+ uint64_t metadata_offset;
+
+ struct {
+ struct ocf_cache_line_concurrency *cache_line;
+ } concurrency;
+
+ enum ocf_mngt_cache_init_mode init_mode;
+
+ struct ocf_superblock_runtime *runtime_meta;
+};
+
+struct ocf_cache {
+ ocf_ctx_t owner;
+
+ struct list_head list;
+
+ /* unset running to not serve any more I/O requests */
+ unsigned long cache_state;
+
+ struct ocf_superblock_config *conf_meta;
+
+ struct ocf_cache_device *device;
+
+ struct ocf_lst lst_part;
+ struct ocf_user_part user_parts[OCF_IO_CLASS_MAX + 1];
+
+ struct ocf_metadata metadata;
+
+ ocf_freelist_t freelist;
+
+ ocf_eviction_t eviction_policy_init;
+
+ struct {
+ /* cache get/put counter */
+ struct ocf_refcnt cache;
+ /* # of requests potentially dirtying cachelines */
+ struct ocf_refcnt dirty;
+ /* # of requests accessing attached metadata, excluding
+ * management reqs */
+ struct ocf_refcnt metadata;
+ /* # of forced cleaning requests (eviction path) */
+ struct ocf_refcnt cleaning[OCF_IO_CLASS_MAX];
+ } refcnt;
+
+ uint32_t fallback_pt_error_threshold;
+ env_atomic fallback_pt_error_counter;
+
+ env_atomic pending_read_misses_list_blocked;
+ env_atomic pending_read_misses_list_count;
+
+ env_atomic last_access_ms;
+
+ env_atomic pending_eviction_clines;
+
+ struct list_head io_queues;
+ ocf_queue_t mngt_queue;
+
+ uint16_t ocf_core_inactive_count;
+ struct ocf_core core[OCF_CORE_MAX];
+
+ env_atomic flush_in_progress;
+
+ struct ocf_cleaner cleaner;
+ struct ocf_metadata_updater metadata_updater;
+ ocf_promotion_policy_t promotion_policy;
+
+ struct ocf_async_lock lock;
+
+ /*
+ * Most of the time this variable is set to 0, unless user requested
+ * interruption of flushing process.
+ */
+ int flushing_interrupted;
+ env_mutex flush_mutex;
+
+ struct {
+ uint32_t max_queue_size;
+ uint32_t queue_unblock_size;
+ } backfill;
+
+ bool pt_unaligned_io;
+
+ bool use_submit_io_fast;
+
+ struct ocf_trace trace;
+
+ ocf_pipeline_t stop_pipeline;
+
+ void *priv;
+};
+
+static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
+ ocf_core_id_t core_id)
+{
+ if (core_id >= OCF_CORE_MAX)
+ return NULL;
+
+ return &cache->core[core_id];
+}
+
+#define for_each_core_all(_cache, _core, _id) \
+ for (_id = 0; _core = &_cache->core[_id], _id < OCF_CORE_MAX; _id++)
+
+#define for_each_core(_cache, _core, _id) \
+ for_each_core_all(_cache, _core, _id) \
+ if (_core->added)
+
+#define for_each_core_metadata(_cache, _core, _id) \
+ for_each_core_all(_cache, _core, _id) \
+ if (_core->conf_meta->valid)
+
+#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \
+ ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "%s" prefix, \
+ fmt, ocf_cache_get_name(cache), ##__VA_ARGS__)
+
+#define ocf_cache_log(cache, lvl, fmt, ...) \
+ ocf_cache_log_prefix(cache, lvl, ": ", fmt, ##__VA_ARGS__)
+
+#define ocf_cache_log_rl(cache) \
+ ocf_log_rl(ocf_cache_get_ctx(cache))
+
+static inline uint64_t ocf_get_cache_occupancy(ocf_cache_t cache)
+{
+ uint64_t result = 0;
+ ocf_core_t core;
+ ocf_core_id_t core_id;
+
+ for_each_core(cache, core, core_id)
+ result += env_atomic_read(&core->runtime_meta->cached_clines);
+
+ return result;
+}
+
+int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size);
+
+#endif /* __OCF_CACHE_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_core.c b/src/spdk/ocf/src/ocf_core.c
new file mode 100644
index 000000000..080b5ca4e
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_core.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_priv.h"
+#include "ocf_core_priv.h"
+#include "ocf_io_priv.h"
+#include "metadata/metadata.h"
+#include "engine/cache_engine.h"
+#include "utils/utils_part.h"
+#include "ocf_request.h"
+#include "ocf_trace_priv.h"
+
+struct ocf_core_volume {
+ ocf_core_t core;
+};
+
+ocf_cache_t ocf_core_get_cache(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+ return core->volume.cache;
+}
+
+ocf_volume_t ocf_core_get_volume(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+ return &core->volume;
+}
+
+ocf_volume_t ocf_core_get_front_volume(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+ return &core->front_volume;
+}
+
+ocf_core_id_t ocf_core_get_id(ocf_core_t core)
+{
+ struct ocf_cache *cache;
+ ocf_core_id_t core_id;
+
+ OCF_CHECK_NULL(core);
+
+ cache = core->volume.cache;
+ core_id = core - cache->core;
+
+ return core_id;
+}
+
+int ocf_core_get_by_name(ocf_cache_t cache, const char *name, size_t name_len,
+ ocf_core_t *core)
+{
+ ocf_core_t i_core;
+ ocf_core_id_t i_core_id;
+
+ for_each_core(cache, i_core, i_core_id) {
+ if (!env_strncmp(ocf_core_get_name(i_core), OCF_CORE_NAME_SIZE,
+ name, name_len)) {
+ *core = i_core;
+ return 0;
+ }
+ }
+
+ return -OCF_ERR_CORE_NOT_EXIST;
+}
+
+const char *ocf_core_get_name(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+
+ return core->conf_meta->name;
+}
+
+ocf_core_state_t ocf_core_get_state(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+
+ return core->opened ?
+ ocf_core_state_active : ocf_core_state_inactive;
+}
+
+bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id)
+{
+ OCF_CHECK_NULL(cache);
+
+ if (id > OCF_CORE_ID_MAX)
+ return false;
+
+ if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
+ return false;
+
+ return true;
+}
+
+int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
+{
+ OCF_CHECK_NULL(cache);
+
+ if (!ocf_core_is_valid(cache, id))
+ return -OCF_ERR_CORE_NOT_AVAIL;
+
+ *core = &cache->core[id];
+ return 0;
+}
+
+uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
+{
+ return core->conf_meta->seq_cutoff_threshold;
+}
+
+ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
+{
+ return core->conf_meta->seq_cutoff_policy;
+}
+
+int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
+ bool only_opened)
+{
+ ocf_core_id_t id;
+ int result = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!visitor)
+ return -OCF_ERR_INVAL;
+
+ for (id = 0; id < OCF_CORE_MAX; id++) {
+ if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
+ continue;
+
+ if (only_opened && !cache->core[id].opened)
+ continue;
+
+ result = visitor(&cache->core[id], cntx);
+ if (result)
+ break;
+ }
+
+ return result;
+}
+
+/* *** HELPER FUNCTIONS *** */
+
+static uint32_t _calc_dirty_for(uint64_t dirty_since)
+{
+ return dirty_since ?
+ (env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000)
+ : 0;
+}
+
+static inline struct ocf_request *ocf_io_to_req(struct ocf_io *io)
+{
+ struct ocf_io_internal *ioi;
+
+ ioi = container_of(io, struct ocf_io_internal, io);
+ return container_of(ioi, struct ocf_request, ioi);
+}
+
+static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
+{
+ struct ocf_core_volume *core_volume = ocf_volume_get_priv(volume);
+
+ return core_volume->core;
+}
+
+static inline void dec_counter_if_req_was_dirty(struct ocf_request *req)
+{
+ if (!req->dirty)
+ return;
+
+ req->dirty = 0;
+ ocf_refcnt_dec(&req->cache->refcnt.dirty);
+}
+
+static inline int ocf_core_validate_io(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+ ocf_core_t core = ocf_volume_to_core(volume);
+
+ if (io->addr + io->bytes > ocf_volume_get_length(volume))
+ return -OCF_ERR_INVAL;
+
+ if (io->io_class >= OCF_IO_CLASS_MAX)
+ return -OCF_ERR_INVAL;
+
+ if (io->dir != OCF_READ && io->dir != OCF_WRITE)
+ return -OCF_ERR_INVAL;
+
+ if (!io->io_queue)
+ return -OCF_ERR_INVAL;
+
+ if (!io->end)
+ return -OCF_ERR_INVAL;
+
+ /* Core volume I/O must not be queued on management queue - this would
+ * break I/O accounting code, resulting in use-after-free type of errors
+ * after cache detach, core remove etc. */
+ if (io->io_queue == ocf_core_get_cache(core)->mngt_queue)
+ return -OCF_ERR_INVAL;
+
+ return 0;
+}
+
+static void ocf_req_complete(struct ocf_request *req, int error)
+{
+ /* Log trace */
+ ocf_trace_io_cmpl(req);
+
+ /* Complete IO */
+ ocf_io_end(&req->ioi.io, error);
+
+ dec_counter_if_req_was_dirty(req);
+
+ /* Invalidate OCF IO, it is not valid after completion */
+ ocf_io_put(&req->ioi.io);
+}
+
+void ocf_core_volume_submit_io(struct ocf_io *io)
+{
+ struct ocf_request *req;
+ ocf_core_t core;
+ ocf_cache_t cache;
+ int ret;
+
+ OCF_CHECK_NULL(io);
+
+ ret = ocf_core_validate_io(io);
+ if (ret < 0) {
+ ocf_io_end(io, ret);
+ return;
+ }
+
+ req = ocf_io_to_req(io);
+ core = ocf_volume_to_core(ocf_io_get_volume(io));
+ cache = ocf_core_get_cache(core);
+
+ ocf_trace_init_io(req);
+
+ if (unlikely(!env_bit_test(ocf_cache_state_running,
+ &cache->cache_state))) {
+ ocf_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
+ return;
+ }
+
+ req->part_id = ocf_part_class2id(cache, io->io_class);
+ req->core = core;
+ req->complete = ocf_req_complete;
+
+ ocf_resolve_effective_cache_mode(cache, core, req);
+
+ ocf_seq_cutoff_update(core, req);
+
+ ocf_core_update_stats(core, io);
+
+ if (io->dir == OCF_WRITE)
+ ocf_trace_io(req, ocf_event_operation_wr);
+ else if (io->dir == OCF_READ)
+ ocf_trace_io(req, ocf_event_operation_rd);
+
+ ocf_io_get(io);
+
+ ret = ocf_engine_hndl_req(req);
+ if (ret) {
+ dec_counter_if_req_was_dirty(req);
+ ocf_io_end(io, ret);
+ }
+}
+
+int ocf_core_submit_io_fast(struct ocf_io *io)
+{
+ struct ocf_request *req;
+ struct ocf_event_io trace_event;
+ ocf_core_t core;
+ ocf_cache_t cache;
+ int fast;
+ int ret;
+
+ OCF_CHECK_NULL(io);
+
+ ret = ocf_core_validate_io(io);
+ if (ret < 0)
+ return ret;
+
+ req = ocf_io_to_req(io);
+ core = ocf_volume_to_core(ocf_io_get_volume(io));
+ cache = ocf_core_get_cache(core);
+
+ if (unlikely(!env_bit_test(ocf_cache_state_running,
+ &cache->cache_state))) {
+ ocf_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
+ return 0;
+ }
+
+ if (req->d2c) {
+ dec_counter_if_req_was_dirty(req);
+ return -OCF_ERR_IO;
+ }
+
+ ret = ocf_req_alloc_map(req);
+ if (ret) {
+ ocf_io_end(io, -OCF_ERR_NO_MEM);
+ return 0;
+ }
+
+ req->core = core;
+ req->complete = ocf_req_complete;
+ req->part_id = ocf_part_class2id(cache, io->io_class);
+
+ ocf_resolve_effective_cache_mode(cache, core, req);
+
+ switch (req->cache_mode) {
+ case ocf_req_cache_mode_pt:
+ return -OCF_ERR_IO;
+ case ocf_req_cache_mode_wb:
+ case ocf_req_cache_mode_wo:
+ req->cache_mode = ocf_req_cache_mode_fast;
+ break;
+ default:
+ if (cache->use_submit_io_fast)
+ break;
+ if (io->dir == OCF_WRITE)
+ return -OCF_ERR_IO;
+
+ req->cache_mode = ocf_req_cache_mode_fast;
+ }
+
+ ocf_core_update_stats(core, io);
+
+ if (cache->trace.trace_callback) {
+ if (io->dir == OCF_WRITE)
+ ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_wr);
+ else if (io->dir == OCF_READ)
+ ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_rd);
+ }
+
+ ocf_io_get(io);
+
+ fast = ocf_engine_hndl_fast_req(req);
+ if (fast != OCF_FAST_PATH_NO) {
+ ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
+ ocf_seq_cutoff_update(core, req);
+ return 0;
+ }
+
+ dec_counter_if_req_was_dirty(req);
+
+ ocf_io_put(io);
+ return -OCF_ERR_IO;
+}
+
+static void ocf_core_volume_submit_flush(struct ocf_io *io)
+{
+ struct ocf_request *req;
+ ocf_core_t core;
+ ocf_cache_t cache;
+ int ret;
+
+ OCF_CHECK_NULL(io);
+
+ ret = ocf_core_validate_io(io);
+ if (ret < 0) {
+ ocf_io_end(io, ret);
+ return;
+ }
+
+ req = ocf_io_to_req(io);
+ core = ocf_volume_to_core(ocf_io_get_volume(io));
+ cache = ocf_core_get_cache(core);
+
+ if (unlikely(!env_bit_test(ocf_cache_state_running,
+ &cache->cache_state))) {
+ ocf_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
+ return;
+ }
+
+ req->core = core;
+ req->complete = ocf_req_complete;
+
+ ocf_trace_io(req, ocf_event_operation_flush);
+ ocf_io_get(io);
+
+ ocf_engine_hndl_ops_req(req);
+}
+
+static void ocf_core_volume_submit_discard(struct ocf_io *io)
+{
+ struct ocf_request *req;
+ ocf_core_t core;
+ ocf_cache_t cache;
+ int ret;
+
+ OCF_CHECK_NULL(io);
+
+ ret = ocf_core_validate_io(io);
+ if (ret < 0) {
+ ocf_io_end(io, ret);
+ return;
+ }
+
+ req = ocf_io_to_req(io);
+ core = ocf_volume_to_core(ocf_io_get_volume(io));
+ cache = ocf_core_get_cache(core);
+
+ if (unlikely(!env_bit_test(ocf_cache_state_running,
+ &cache->cache_state))) {
+ ocf_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
+ return;
+ }
+
+ ret = ocf_req_alloc_map_discard(req);
+ if (ret) {
+ ocf_io_end(io, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ req->core = core;
+ req->complete = ocf_req_complete;
+
+ ocf_trace_io(req, ocf_event_operation_discard);
+ ocf_io_get(io);
+
+ ocf_engine_hndl_discard_req(req);
+}
+
+/* *** VOLUME OPS *** */
+
+static int ocf_core_volume_open(ocf_volume_t volume, void *volume_params)
+{
+ struct ocf_core_volume *core_volume = ocf_volume_get_priv(volume);
+ const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume);
+ ocf_core_t core = (ocf_core_t)uuid->data;
+
+ core_volume->core = core;
+
+ return 0;
+}
+
+static void ocf_core_volume_close(ocf_volume_t volume)
+{
+}
+
+static unsigned int ocf_core_volume_get_max_io_size(ocf_volume_t volume)
+{
+ ocf_core_t core = ocf_volume_to_core(volume);
+
+ return ocf_volume_get_max_io_size(&core->volume);
+}
+
+static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume)
+{
+ ocf_core_t core = ocf_volume_to_core(volume);
+
+ return ocf_volume_get_length(&core->volume);
+}
+
+
+/* *** IO OPS *** */
+
+static int ocf_core_io_set_data(struct ocf_io *io,
+ ctx_data_t *data, uint32_t offset)
+{
+ struct ocf_request *req;
+
+ OCF_CHECK_NULL(io);
+
+ if (!data || offset)
+ return -OCF_ERR_INVAL;
+
+ req = ocf_io_to_req(io);
+ req->data = data;
+
+ return 0;
+}
+
+static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
+{
+ struct ocf_request *req;
+
+ OCF_CHECK_NULL(io);
+
+ req = ocf_io_to_req(io);
+ return req->data;
+}
+
+const struct ocf_volume_properties ocf_core_volume_properties = {
+ .name = "OCF Core",
+ .io_priv_size = 0, /* Not used - custom allocator */
+ .volume_priv_size = sizeof(struct ocf_core_volume),
+ .caps = {
+ .atomic_writes = 0,
+ },
+ .ops = {
+ .submit_io = ocf_core_volume_submit_io,
+ .submit_flush = ocf_core_volume_submit_flush,
+ .submit_discard = ocf_core_volume_submit_discard,
+ .submit_metadata = NULL,
+
+ .open = ocf_core_volume_open,
+ .close = ocf_core_volume_close,
+ .get_max_io_size = ocf_core_volume_get_max_io_size,
+ .get_length = ocf_core_volume_get_byte_length,
+ },
+ .io_ops = {
+ .set_data = ocf_core_io_set_data,
+ .get_data = ocf_core_io_get_data,
+ },
+ .deinit = NULL,
+};
+
+static int ocf_core_io_allocator_init(ocf_io_allocator_t allocator,
+ uint32_t priv_size, const char *name)
+{
+ return 0;
+}
+
+static void ocf_core_io_allocator_deinit(ocf_io_allocator_t allocator)
+{
+}
+
+static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator,
+ ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir)
+{
+ struct ocf_request *req;
+
+ req = ocf_req_new(queue, NULL, addr, bytes, dir);
+ if (!req)
+ return NULL;
+
+ return &req->ioi;
+}
+
+static void ocf_core_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
+{
+ struct ocf_request *req;
+
+ req = container_of(obj, struct ocf_request, ioi);
+ ocf_req_put(req);
+}
+
+const struct ocf_io_allocator_type ocf_core_io_allocator_type = {
+ .ops = {
+ .allocator_init = ocf_core_io_allocator_init,
+ .allocator_deinit = ocf_core_io_allocator_deinit,
+ .allocator_new = ocf_core_io_allocator_new,
+ .allocator_del = ocf_core_io_allocator_del,
+ },
+};
+
+const struct ocf_volume_extended ocf_core_volume_extended = {
+ .allocator_type = &ocf_core_io_allocator_type,
+};
+
+int ocf_core_volume_type_init(ocf_ctx_t ctx)
+{
+ return ocf_ctx_register_volume_type_extended(ctx, 0,
+ &ocf_core_volume_properties,
+ &ocf_core_volume_extended);
+}
+
+int ocf_core_get_info(ocf_core_t core, struct ocf_core_info *info)
+{
+ ocf_cache_t cache;
+
+ OCF_CHECK_NULL(core);
+
+ cache = ocf_core_get_cache(core);
+
+ if (!info)
+ return -OCF_ERR_INVAL;
+
+ ENV_BUG_ON(env_memset(info, sizeof(*info), 0));
+
+ info->core_size_bytes = ocf_volume_get_length(&core->volume);
+ info->core_size = ocf_bytes_2_lines_round_up(cache,
+ info->core_size_bytes);
+ info->seq_cutoff_threshold = ocf_core_get_seq_cutoff_threshold(core);
+ info->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core);
+
+ info->flushed = env_atomic_read(&core->flushed);
+ info->dirty = env_atomic_read(&core->runtime_meta->dirty_clines);
+
+ info->dirty_for = _calc_dirty_for(
+ env_atomic64_read(&core->runtime_meta->dirty_since));
+
+ return 0;
+}
+
+void ocf_core_set_priv(ocf_core_t core, void *priv)
+{
+ OCF_CHECK_NULL(core);
+ core->priv = priv;
+}
+
+void *ocf_core_get_priv(ocf_core_t core)
+{
+ OCF_CHECK_NULL(core);
+ return core->priv;
+}
diff --git a/src/spdk/ocf/src/ocf_core_priv.h b/src/spdk/ocf/src/ocf_core_priv.h
new file mode 100644
index 000000000..722cfba89
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_core_priv.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_CORE_PRIV_H__
+#define __OCF_CORE_PRIV_H__
+
+#include "ocf/ocf.h"
+#include "ocf_env.h"
+#include "ocf_ctx_priv.h"
+#include "ocf_volume_priv.h"
+
+#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \
+ ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, ".%s" prefix, \
+ fmt, ocf_core_get_name(core), ##__VA_ARGS__)
+
+#define ocf_core_log(core, lvl, fmt, ...) \
+ ocf_core_log_prefix(core, lvl, ": ", fmt, ##__VA_ARGS__)
+
+struct ocf_metadata_uuid {
+ uint32_t size;
+ uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];
+} __packed;
+
+#define OCF_CORE_USER_DATA_SIZE 64
+
+struct ocf_core_meta_config {
+ char name[OCF_CORE_NAME_SIZE];
+
+ uint8_t type;
+
+ /* This bit means that object was saved in cache metadata */
+ uint32_t valid : 1;
+
+ /* Core sequence number used to correlate cache lines with cores
+ * when recovering from atomic device */
+ ocf_seq_no_t seq_no;
+
+ /* Sequential cutoff threshold (in bytes) */
+ uint32_t seq_cutoff_threshold;
+
+ /* Sequential cutoff policy */
+ ocf_seq_cutoff_policy seq_cutoff_policy;
+
+ /* core object size in bytes */
+ uint64_t length;
+
+ uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
+};
+
+struct ocf_core_meta_runtime {
+ /* Number of blocks from that objects that currently are cached
+ * on the caching device.
+ */
+ env_atomic cached_clines;
+ env_atomic dirty_clines;
+ env_atomic initial_dirty_clines;
+
+ env_atomic64 dirty_since;
+
+ struct {
+ /* clines within lru list (?) */
+ env_atomic cached_clines;
+ /* dirty clines assigned to this specific partition within
+ * cache device
+ */
+ env_atomic dirty_clines;
+ } part_counters[OCF_IO_CLASS_MAX];
+};
+
+
+struct ocf_core {
+ struct ocf_volume front_volume;
+ struct ocf_volume volume;
+
+ struct ocf_core_meta_config *conf_meta;
+ struct ocf_core_meta_runtime *runtime_meta;
+
+ struct {
+ uint64_t last;
+ uint64_t bytes;
+ int rw;
+ } seq_cutoff;
+
+ env_atomic flushed;
+
+ /* This bit means that object is open */
+ uint32_t opened : 1;
+ /* This bit means that core is added into cache */
+ uint32_t added : 1;
+
+ struct ocf_counters_core *counters;
+
+ void *priv;
+};
+
+bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id);
+
+ocf_core_id_t ocf_core_get_id(ocf_core_t core);
+
+int ocf_core_volume_type_init(ocf_ctx_t ctx);
+
+#endif /* __OCF_CORE_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_ctx.c b/src/spdk/ocf/src/ocf_ctx.c
new file mode 100644
index 000000000..cab484d2b
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_ctx.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_ctx_priv.h"
+#include "ocf_priv.h"
+#include "ocf_volume_priv.h"
+#include "ocf_request.h"
+#include "ocf_logger_priv.h"
+#include "ocf_core_priv.h"
+#include "mngt/ocf_mngt_core_pool_priv.h"
+
+/*
+ *
+ */
+int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
+ const struct ocf_volume_properties *properties,
+ const struct ocf_volume_extended *extended)
+{
+ int result = 0;
+
+ if (!ctx || !properties)
+ return -EINVAL;
+
+ env_rmutex_lock(&ctx->lock);
+
+ if (type_id >= OCF_VOLUME_TYPE_MAX || ctx->volume_type[type_id]) {
+ env_rmutex_unlock(&ctx->lock);
+ result = -EINVAL;
+ goto err;
+ }
+
+ ocf_volume_type_init(&ctx->volume_type[type_id], properties, extended);
+ if (!ctx->volume_type[type_id])
+ result = -EINVAL;
+
+ env_rmutex_unlock(&ctx->lock);
+
+ if (result)
+ goto err;
+
+ ocf_log(ctx, log_debug, "'%s' volume operations registered\n",
+ properties->name);
+ return 0;
+
+err:
+ ocf_log(ctx, log_err, "Failed to register volume operations '%s'\n",
+ properties->name);
+ return result;
+}
+
+int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id,
+ const struct ocf_volume_properties *properties)
+{
+ return ocf_ctx_register_volume_type_extended(ctx, type_id,
+ properties, NULL);
+}
+
+/*
+ *
+ */
+void ocf_ctx_unregister_volume_type(ocf_ctx_t ctx, uint8_t type_id)
+{
+ OCF_CHECK_NULL(ctx);
+
+ env_rmutex_lock(&ctx->lock);
+
+ if (type_id < OCF_VOLUME_TYPE_MAX && ctx->volume_type[type_id]) {
+ ocf_volume_type_deinit(ctx->volume_type[type_id]);
+ ctx->volume_type[type_id] = NULL;
+ }
+
+ env_rmutex_unlock(&ctx->lock);
+}
+
+/*
+ *
+ */
+ocf_volume_type_t ocf_ctx_get_volume_type(ocf_ctx_t ctx, uint8_t type_id)
+{
+ OCF_CHECK_NULL(ctx);
+
+ if (type_id >= OCF_VOLUME_TYPE_MAX)
+ return NULL;
+
+ return ctx->volume_type[type_id];
+}
+
+/*
+ *
+ */
+int ocf_ctx_get_volume_type_id(ocf_ctx_t ctx, ocf_volume_type_t type)
+{
+ int i;
+
+ OCF_CHECK_NULL(ctx);
+
+ for (i = 0; i < OCF_VOLUME_TYPE_MAX; ++i) {
+ if (ctx->volume_type[i] == type)
+ return i;
+ }
+
+ return -1;
+}
+
+/*
+ *
+ */
+int ocf_ctx_volume_create(ocf_ctx_t ctx, ocf_volume_t *volume,
+ struct ocf_volume_uuid *uuid, uint8_t type_id)
+{
+ OCF_CHECK_NULL(ctx);
+
+ if (type_id >= OCF_VOLUME_TYPE_MAX)
+ return -EINVAL;
+
+ return ocf_volume_create(volume, ctx->volume_type[type_id], uuid);
+}
+
+static void check_ops_provided(const struct ocf_ctx_ops *ops)
+{
+ ENV_BUG_ON(!ops->data.alloc);
+ ENV_BUG_ON(!ops->data.free);
+ ENV_BUG_ON(!ops->data.mlock);
+ ENV_BUG_ON(!ops->data.munlock);
+ ENV_BUG_ON(!ops->data.read);
+ ENV_BUG_ON(!ops->data.write);
+ ENV_BUG_ON(!ops->data.zero);
+ ENV_BUG_ON(!ops->data.seek);
+ ENV_BUG_ON(!ops->data.copy);
+ ENV_BUG_ON(!ops->data.secure_erase);
+
+ ENV_BUG_ON(!ops->cleaner.init);
+ ENV_BUG_ON(!ops->cleaner.kick);
+ ENV_BUG_ON(!ops->cleaner.stop);
+
+ ENV_BUG_ON(!ops->metadata_updater.init);
+ ENV_BUG_ON(!ops->metadata_updater.kick);
+ ENV_BUG_ON(!ops->metadata_updater.stop);
+}
+
+/*
+ *
+ */
+int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg)
+{
+ ocf_ctx_t ocf_ctx;
+ int ret;
+
+ OCF_CHECK_NULL(ctx);
+ OCF_CHECK_NULL(cfg);
+
+ check_ops_provided(&cfg->ops);
+
+ ocf_ctx = env_zalloc(sizeof(*ocf_ctx), ENV_MEM_NORMAL);
+ if (!ocf_ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ocf_ctx->caches);
+ env_atomic_set(&ocf_ctx->ref_count, 1);
+ ret = env_rmutex_init(&ocf_ctx->lock);
+ if (ret)
+ goto err_ctx;
+
+ ocf_ctx->ops = &cfg->ops;
+ ocf_ctx->cfg = cfg;
+
+ ocf_logger_init(&ocf_ctx->logger, &cfg->ops.logger, cfg->logger_priv);
+
+ ret = ocf_logger_open(&ocf_ctx->logger);
+ if (ret)
+ goto err_ctx;
+
+ ret = ocf_req_allocator_init(ocf_ctx);
+ if (ret)
+ goto err_logger;
+
+ ret = ocf_core_volume_type_init(ocf_ctx);
+ if (ret)
+ goto err_utils;
+
+ ocf_mngt_core_pool_init(ocf_ctx);
+
+ *ctx = ocf_ctx;
+
+ return 0;
+
+err_utils:
+ ocf_req_allocator_deinit(ocf_ctx);
+err_logger:
+ ocf_logger_close(&ocf_ctx->logger);
+err_ctx:
+ env_free(ocf_ctx);
+ return ret;
+}
+
+/*
+ *
+ */
+void ocf_ctx_get(ocf_ctx_t ctx)
+{
+ OCF_CHECK_NULL(ctx);
+
+ env_atomic_inc(&ctx->ref_count);
+}
+
+/*
+ *
+ */
+static void ocf_ctx_unregister_volume_types(ocf_ctx_t ctx)
+{
+ int id;
+
+ for (id = 0; id < OCF_VOLUME_TYPE_MAX; id++)
+ ocf_ctx_unregister_volume_type(ctx, id);
+}
+
+/*
+ *
+ */
+void ocf_ctx_put(ocf_ctx_t ctx)
+{
+ OCF_CHECK_NULL(ctx);
+
+ if (env_atomic_dec_return(&ctx->ref_count))
+ return;
+
+ env_rmutex_lock(&ctx->lock);
+ ENV_BUG_ON(!list_empty(&ctx->caches));
+ env_rmutex_unlock(&ctx->lock);
+
+ ocf_mngt_core_pool_deinit(ctx);
+ ocf_ctx_unregister_volume_types(ctx);
+ env_rmutex_destroy(&ctx->lock);
+
+ ocf_req_allocator_deinit(ctx);
+ ocf_logger_close(&ctx->logger);
+ env_free(ctx);
+}
diff --git a/src/spdk/ocf/src/ocf_ctx_priv.h b/src/spdk/ocf/src/ocf_ctx_priv.h
new file mode 100644
index 000000000..f7a41d23e
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_ctx_priv.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_CTX_PRIV_H__
+#define __OCF_CTX_PRIV_H__
+
+#include "ocf_env.h"
+#include "ocf/ocf_ctx.h"
+#include "ocf_logger_priv.h"
+#include "ocf_volume_priv.h"
+
+#define OCF_VOLUME_TYPE_MAX 8
+
+/**
+ * @brief OCF main control structure
+ */
+struct ocf_ctx {
+ const struct ocf_ctx_ops *ops;
+ const struct ocf_ctx_config *cfg;
+ struct ocf_logger logger;
+ struct ocf_volume_type *volume_type[OCF_VOLUME_TYPE_MAX];
+ env_atomic ref_count;
+ env_rmutex lock;
+ struct list_head caches;
+ struct {
+ struct list_head core_pool_head;
+ int core_pool_count;
+ } core_pool;
+
+ struct {
+ struct ocf_req_allocator *req;
+ } resources;
+};
+
+#define ocf_log_prefix(ctx, lvl, prefix, fmt, ...) \
+ ocf_log_raw(&ctx->logger, lvl, prefix fmt, ##__VA_ARGS__)
+
+#define ocf_log(ctx, lvl, fmt, ...) \
+ ocf_log_prefix(ctx, lvl, "", fmt, ##__VA_ARGS__)
+
+#define ocf_log_rl(ctx) \
+ ocf_log_raw_rl(&ctx->logger, __func__)
+
+#define ocf_log_stack_trace(ctx) \
+ ocf_log_stack_trace_raw(&ctx->logger)
+
+int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
+ const struct ocf_volume_properties *properties,
+ const struct ocf_volume_extended *extended);
+
+/**
+ * @name Environment data buffer operations wrappers
+ * @{
+ */
+static inline void *ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages)
+{
+ return ctx->ops->data.alloc(pages);
+}
+
+static inline void ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data)
+{
+ ctx->ops->data.free(data);
+}
+
+static inline int ctx_data_mlock(ocf_ctx_t ctx, ctx_data_t *data)
+{
+ return ctx->ops->data.mlock(data);
+}
+
+static inline void ctx_data_munlock(ocf_ctx_t ctx, ctx_data_t *data)
+{
+ ctx->ops->data.munlock(data);
+}
+
+static inline uint32_t ctx_data_rd(ocf_ctx_t ctx, void *dst,
+ ctx_data_t *src, uint32_t size)
+{
+ return ctx->ops->data.read(dst, src, size);
+}
+
+static inline uint32_t ctx_data_wr(ocf_ctx_t ctx, ctx_data_t *dst,
+ const void *src, uint32_t size)
+{
+ return ctx->ops->data.write(dst, src, size);
+}
+
+static inline void ctx_data_rd_check(ocf_ctx_t ctx, void *dst,
+ ctx_data_t *src, uint32_t size)
+{
+ uint32_t read = ctx_data_rd(ctx, dst, src, size);
+
+ ENV_BUG_ON(read != size);
+}
+
+static inline void ctx_data_wr_check(ocf_ctx_t ctx, ctx_data_t *dst,
+ const void *src, uint32_t size)
+{
+ uint32_t written = ctx_data_wr(ctx, dst, src, size);
+
+ ENV_BUG_ON(written != size);
+}
+
+static inline uint32_t ctx_data_zero(ocf_ctx_t ctx, ctx_data_t *dst,
+ uint32_t size)
+{
+ return ctx->ops->data.zero(dst, size);
+}
+
+static inline void ctx_data_zero_check(ocf_ctx_t ctx, ctx_data_t *dst,
+ uint32_t size)
+{
+ uint32_t zerored = ctx_data_zero(ctx, dst, size);
+
+ ENV_BUG_ON(zerored != size);
+}
+
+static inline uint32_t ctx_data_seek(ocf_ctx_t ctx, ctx_data_t *dst,
+ ctx_data_seek_t seek, uint32_t size)
+{
+ return ctx->ops->data.seek(dst, seek, size);
+}
+
+static inline void ctx_data_seek_check(ocf_ctx_t ctx, ctx_data_t *dst,
+ ctx_data_seek_t seek, uint32_t size)
+{
+ uint32_t bytes = ctx_data_seek(ctx, dst, seek, size);
+
+ ENV_BUG_ON(bytes != size);
+}
+
+static inline uint64_t ctx_data_cpy(ocf_ctx_t ctx, ctx_data_t *dst, ctx_data_t *src,
+ uint64_t to, uint64_t from, uint64_t bytes)
+{
+ return ctx->ops->data.copy(dst, src, to, from, bytes);
+}
+
+static inline void ctx_data_secure_erase(ocf_ctx_t ctx, ctx_data_t *dst)
+{
+ return ctx->ops->data.secure_erase(dst);
+}
+
+static inline int ctx_cleaner_init(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
+{
+ return ctx->ops->cleaner.init(cleaner);
+}
+
+static inline void ctx_cleaner_stop(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
+{
+ ctx->ops->cleaner.stop(cleaner);
+}
+
+static inline void ctx_cleaner_kick(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
+{
+ ctx->ops->cleaner.kick(cleaner);
+}
+
+static inline int ctx_metadata_updater_init(ocf_ctx_t ctx,
+ ocf_metadata_updater_t mu)
+{
+ return ctx->ops->metadata_updater.init(mu);
+}
+
+static inline void ctx_metadata_updater_kick(ocf_ctx_t ctx,
+ ocf_metadata_updater_t mu)
+{
+ ctx->ops->metadata_updater.kick(mu);
+}
+
+static inline void ctx_metadata_updater_stop(ocf_ctx_t ctx,
+ ocf_metadata_updater_t mu)
+{
+ ctx->ops->metadata_updater.stop(mu);
+}
+
+/**
+ * @}
+ */
+
+#endif /* __OCF_CTX_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_def_priv.h b/src/spdk/ocf/src/ocf_def_priv.h
new file mode 100644
index 000000000..bf399785c
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_def_priv.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_DEF_PRIV_H__
+#define __OCF_DEF_PRIV_H__
+
+#include "ocf/ocf.h"
+#include "ocf_env.h"
+
+#define BYTES_TO_SECTORS(x) ((x) >> ENV_SECTOR_SHIFT)
+#define SECTORS_TO_BYTES(x) ((x) << ENV_SECTOR_SHIFT)
+
+#define BYTES_TO_PAGES(x) ((((uint64_t)x) + (PAGE_SIZE - 1)) / PAGE_SIZE)
+#define PAGES_TO_BYTES(x) (((uint64_t)x) * PAGE_SIZE)
+
+#define OCF_DIV_ROUND_UP(x, y) \
+ ({ \
+ __typeof__ (x) __x = (x); \
+ __typeof__ (y) __y = (y); \
+ (__x + __y - 1) / __y; \
+ })
+
+#define OCF_MAX(x,y) \
+ ({ \
+ __typeof__ (x) __x = (x); \
+ __typeof__ (y) __y = (y); \
+ __x > __y ? __x : __y; \
+ })
+
+#define OCF_MIN(x,y) \
+ ({ \
+ __typeof__ (x) __x = (x); \
+ __typeof__ (y) __y = (y); \
+ __x < __y ? __x : __y; \
+ })
+
+#define METADATA_VERSION() ((OCF_VERSION_MAIN << 16) + \
+ (OCF_VERSION_MAJOR << 8) + OCF_VERSION_MINOR)
+
+/* call conditional reschedule every 'iterations' calls */
+#define OCF_COND_RESCHED(cnt, iterations) \
+ if (unlikely(++(cnt) == (iterations))) { \
+ env_cond_resched(); \
+ (cnt) = 0; \
+ }
+
+/* call conditional reschedule with default interval */
+#define OCF_COND_RESCHED_DEFAULT(cnt) OCF_COND_RESCHED(cnt, 1000000)
+
+#endif
diff --git a/src/spdk/ocf/src/ocf_freelist.c b/src/spdk/ocf/src/ocf_freelist.c
new file mode 100644
index 000000000..bf2c60f58
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_freelist.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright(c) 2019-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+
+struct ocf_part {
+ ocf_cache_line_t head;
+ ocf_cache_line_t tail;
+ env_atomic64 curr_size;
+};
+
+struct ocf_freelist {
+ /* parent cache */
+ struct ocf_cache *cache;
+
+ /* partition list array */
+ struct ocf_part *part;
+
+ /* freelist lock array */
+ env_spinlock *lock;
+
+ /* number of free lists */
+ uint32_t count;
+
+ /* next slowpath victim idx */
+ env_atomic slowpath_victim_idx;
+
+ /* total number of free lines */
+ env_atomic64 total_free;
+};
+
+static void ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
+{
+ env_spinlock_lock(&freelist->lock[ctx]);
+}
+
+static int ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
+{
+ return env_spinlock_trylock(&freelist->lock[ctx]);
+}
+
+static void ocf_freelist_unlock(ocf_freelist_t freelist, uint32_t ctx)
+{
+ env_spinlock_unlock(&freelist->lock[ctx]);
+}
+
+/* Sets the given collision_index as the new _head_ of the Partition list. */
+static void _ocf_freelist_remove_cache_line(ocf_freelist_t freelist,
+ uint32_t ctx, ocf_cache_line_t cline)
+{
+ struct ocf_cache *cache = freelist->cache;
+ struct ocf_part *freelist_part = &freelist->part[ctx];
+ int is_head, is_tail;
+ ocf_part_id_t invalid_part_id = PARTITION_INVALID;
+ ocf_cache_line_t prev, next;
+ ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
+ freelist->cache);
+ uint32_t free;
+
+ ENV_BUG_ON(cline >= line_entries);
+
+ /* Get Partition info */
+ ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
+
+ /* Find out if this node is Partition _head_ */
+ is_head = (prev == line_entries);
+ is_tail = (next == line_entries);
+
+ free = env_atomic64_read(&freelist_part->curr_size);
+
+ /* Case 1: If we are head and there is only one node. So unlink node
+ * and set that there is no node left in the list.
+ */
+ if (is_head && free == 1) {
+ ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
+ line_entries, line_entries);
+ freelist_part->head = line_entries;
+ freelist_part->tail = line_entries;
+ } else if (is_head) {
+ /* Case 2: else if this collision_index is partition list head,
+ * but many nodes, update head and return
+ */
+ ENV_BUG_ON(next >= line_entries);
+
+ freelist_part->head = next;
+ ocf_metadata_set_partition_prev(cache, next, line_entries);
+ ocf_metadata_set_partition_next(cache, cline, line_entries);
+ } else if (is_tail) {
+ /* Case 3: else if this cline is partition list tail */
+ ENV_BUG_ON(prev >= line_entries);
+
+ freelist_part->tail = prev;
+ ocf_metadata_set_partition_prev(cache, cline, line_entries);
+ ocf_metadata_set_partition_next(cache, prev, line_entries);
+ } else {
+ /* Case 4: else this collision_index is a middle node.
+ * There is no change to the head and the tail pointers.
+ */
+
+ ENV_BUG_ON(next >= line_entries || prev >= line_entries);
+
+ /* Update prev and next nodes */
+ ocf_metadata_set_partition_prev(cache, next, prev);
+ ocf_metadata_set_partition_next(cache, prev, next);
+
+ /* Update the given node */
+ ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
+ line_entries, line_entries);
+ }
+
+ env_atomic64_dec(&freelist_part->curr_size);
+ env_atomic64_dec(&freelist->total_free);
+}
+
+static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
+ ocf_cache_line_t phys)
+{
+ ocf_cache_line_t lg;
+ ocf_cache_line_t collision_table_entries =
+ ocf_metadata_collision_table_entries(cache);
+
+ if (phys == collision_table_entries)
+ return collision_table_entries;
+
+ lg = ocf_metadata_map_phy2lg(cache, phys);
+ while (metadata_test_valid_any(cache, lg)) {
+ ++phys;
+
+ if (phys == collision_table_entries)
+ break;
+
+ lg = ocf_metadata_map_phy2lg(cache, phys);
+ }
+
+ return phys;
+}
+
+/* Assign unused cachelines to freelist */
+void ocf_freelist_populate(ocf_freelist_t freelist,
+ ocf_cache_line_t num_free_clines)
+{
+ unsigned step = 0;
+ ocf_cache_t cache = freelist->cache;
+ unsigned num_freelists = freelist->count;
+ ocf_cache_line_t prev, next, idx;
+ ocf_cache_line_t phys;
+ ocf_cache_line_t collision_table_entries =
+ ocf_metadata_collision_table_entries(cache);
+ unsigned freelist_idx;
+ uint64_t freelist_size;
+
+ phys = 0;
+ for (freelist_idx = 0; freelist_idx < num_freelists; freelist_idx++)
+ {
+ /* calculate current freelist size */
+ freelist_size = num_free_clines / num_freelists;
+ if (freelist_idx < (num_free_clines % num_freelists))
+ ++freelist_size;
+
+ env_atomic64_set(&freelist->part[freelist_idx].curr_size,
+ freelist_size);
+
+ if (!freelist_size) {
+ /* init empty freelist and move to next one */
+ freelist->part[freelist_idx].head =
+ collision_table_entries;
+ freelist->part[freelist_idx].tail =
+ collision_table_entries;
+ continue;
+ }
+
+ /* find first invalid cacheline */
+ phys = next_phys_invalid(cache, phys);
+ ENV_BUG_ON(phys == collision_table_entries);
+ idx = ocf_metadata_map_phy2lg(cache, phys);
+ ++phys;
+
+ /* store freelist head */
+ freelist->part[freelist_idx].head = idx;
+
+ /* link freelist elements using partition list */
+ prev = collision_table_entries;
+ while (--freelist_size) {
+ phys = next_phys_invalid(cache, phys);
+ ENV_BUG_ON(phys == collision_table_entries);
+ next = ocf_metadata_map_phy2lg(cache, phys);
+ ++phys;
+
+ ocf_metadata_set_partition_info(cache, idx,
+ PARTITION_INVALID, next, prev);
+
+ prev = idx;
+ idx = next;
+
+ OCF_COND_RESCHED_DEFAULT(step);
+ }
+
+ /* terminate partition list */
+ ocf_metadata_set_partition_info(cache, idx, PARTITION_INVALID,
+ collision_table_entries, prev);
+
+ /* store freelist tail */
+ freelist->part[freelist_idx].tail = idx;
+ }
+
+ /* we should have reached the last invalid cache line */
+ phys = next_phys_invalid(cache, phys);
+ ENV_BUG_ON(phys != collision_table_entries);
+
+ env_atomic64_set(&freelist->total_free, num_free_clines);
+}
+
+static void ocf_freelist_add_cache_line(ocf_freelist_t freelist,
+ uint32_t ctx, ocf_cache_line_t line)
+{
+ struct ocf_cache *cache = freelist->cache;
+ struct ocf_part *freelist_part = &freelist->part[ctx];
+ ocf_cache_line_t tail;
+ ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
+ freelist->cache);
+ ocf_part_id_t invalid_part_id = PARTITION_INVALID;
+
+ ENV_BUG_ON(line >= line_entries);
+
+ if (env_atomic64_read(&freelist_part->curr_size) == 0) {
+ freelist_part->head = line;
+ freelist_part->tail = line;
+
+ ocf_metadata_set_partition_info(cache, line, invalid_part_id,
+ line_entries, line_entries);
+ } else {
+ tail = freelist_part->tail;
+
+ ENV_BUG_ON(tail >= line_entries);
+
+ ocf_metadata_set_partition_info(cache, line, invalid_part_id,
+ line_entries, tail);
+ ocf_metadata_set_partition_next(cache, tail, line);
+
+ freelist_part->tail = line;
+ }
+
+ env_atomic64_inc(&freelist_part->curr_size);
+ env_atomic64_inc(&freelist->total_free);
+}
+
+typedef enum {
+ OCF_FREELIST_ERR_NOLOCK = 1,
+ OCF_FREELIST_ERR_LIST_EMPTY,
+} ocf_freelist_get_err_t;
+
+static ocf_freelist_get_err_t ocf_freelist_get_cache_line_ctx(
+ ocf_freelist_t freelist, uint32_t ctx, bool can_wait,
+ ocf_cache_line_t *cline)
+{
+ if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0)
+ return -OCF_FREELIST_ERR_LIST_EMPTY;
+
+ if (!can_wait && ocf_freelist_trylock(freelist, ctx))
+ return -OCF_FREELIST_ERR_NOLOCK;
+
+ if (can_wait)
+ ocf_freelist_lock(freelist, ctx);
+
+ if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0) {
+ ocf_freelist_unlock(freelist, ctx);
+ return -OCF_FREELIST_ERR_LIST_EMPTY;
+ }
+
+ *cline = freelist->part[ctx].head;
+ _ocf_freelist_remove_cache_line(freelist, ctx, *cline);
+
+ ocf_freelist_unlock(freelist, ctx);
+
+ return 0;
+}
+
+static int get_next_victim_freelist(ocf_freelist_t freelist)
+{
+ int ctx, next;
+
+ do {
+ ctx = env_atomic_read(&freelist->slowpath_victim_idx);
+ next = (ctx + 1) % freelist->count;
+ } while (ctx != env_atomic_cmpxchg(&freelist->slowpath_victim_idx, ctx,
+ next));
+
+ return ctx;
+}
+
+static bool ocf_freelist_get_cache_line_slow(ocf_freelist_t freelist,
+ ocf_cache_line_t *cline)
+{
+ int i, ctx;
+ int err;
+ bool lock_err;
+
+ /* try slowpath without waiting on lock */
+ lock_err = false;
+ for (i = 0; i < freelist->count; i++) {
+ ctx = get_next_victim_freelist(freelist);
+ err = ocf_freelist_get_cache_line_ctx(freelist, ctx, false,
+ cline);
+ if (!err)
+ return true;
+ if (err == -OCF_FREELIST_ERR_NOLOCK)
+ lock_err = true;
+ }
+
+ if (!lock_err) {
+ /* Slowpath failed due to empty freelists - no point in
+ * iterating through contexts to attempt slowpath with full
+ * lock */
+ return false;
+ }
+
+ /* slow path with waiting on lock */
+ for (i = 0; i < freelist->count; i++) {
+ ctx = get_next_victim_freelist(freelist);
+ if (!ocf_freelist_get_cache_line_ctx(freelist, ctx, true,
+ cline)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool ocf_freelist_get_cache_line_fast(ocf_freelist_t freelist,
+ ocf_cache_line_t *cline)
+{
+ bool ret;
+ uint32_t ctx = env_get_execution_context();
+
+ ret = !ocf_freelist_get_cache_line_ctx(freelist, ctx, false, cline);
+
+ env_put_execution_context(ctx);
+
+ return ret;
+}
+
+bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
+ ocf_cache_line_t *cline)
+{
+ if (env_atomic64_read(&freelist->total_free) == 0)
+ return false;
+
+ if (!ocf_freelist_get_cache_line_fast(freelist, cline))
+ return ocf_freelist_get_cache_line_slow(freelist, cline);
+
+ return true;
+}
+
+void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
+ ocf_cache_line_t cline)
+{
+ uint32_t ctx = env_get_execution_context();
+
+ ocf_freelist_lock(freelist, ctx);
+ ocf_freelist_add_cache_line(freelist, ctx, cline);
+ ocf_freelist_unlock(freelist, ctx);
+ env_put_execution_context(ctx);
+}
+
+ocf_freelist_t ocf_freelist_init(struct ocf_cache *cache)
+{
+ uint32_t num;
+ int i;
+ ocf_freelist_t freelist;
+ ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
+ cache);
+
+ freelist = env_vzalloc(sizeof(*freelist));
+ if (!freelist)
+ return NULL;
+
+ num = env_get_execution_context_count();
+
+ freelist->cache = cache;
+ freelist->count = num;
+ env_atomic64_set(&freelist->total_free, 0);
+ freelist->lock = env_vzalloc(sizeof(freelist->lock[0]) * num);
+ freelist->part = env_vzalloc(sizeof(freelist->part[0]) * num);
+
+ if (!freelist->lock || !freelist->part)
+ goto free_allocs;
+
+ for (i = 0; i < num; i++) {
+ if (env_spinlock_init(&freelist->lock[i]))
+ goto spinlock_err;
+
+ freelist->part[i].head = line_entries;
+ freelist->part[i].tail = line_entries;
+ env_atomic64_set(&freelist->part[i].curr_size, 0);
+ }
+
+ return freelist;
+
+spinlock_err:
+ while (i--)
+ env_spinlock_destroy(&freelist->lock[i]);
+free_allocs:
+ env_vfree(freelist->lock);
+ env_vfree(freelist->part);
+ env_vfree(freelist);
+ return NULL;
+}
+
+void ocf_freelist_deinit(ocf_freelist_t freelist)
+{
+ int i;
+
+ for (i = 0; i < freelist->count; i++)
+ env_spinlock_destroy(&freelist->lock[i]);
+ env_vfree(freelist->lock);
+ env_vfree(freelist->part);
+ env_vfree(freelist);
+}
+
+ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist)
+{
+ return env_atomic64_read(&freelist->total_free);
+}
+
diff --git a/src/spdk/ocf/src/ocf_freelist.h b/src/spdk/ocf/src/ocf_freelist.h
new file mode 100644
index 000000000..3b67b2aba
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_freelist.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright(c) 2019-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_FREELIST_H__
+#define __OCF_FREELIST_H__
+
+#include "ocf_cache_priv.h"
+
+struct ocf_freelist;
+
+typedef struct ocf_freelist *ocf_freelist_t;
+
+/* Init / deinit freelist runtime structures */
+ocf_freelist_t ocf_freelist_init(struct ocf_cache *cache);
+void ocf_freelist_deinit(ocf_freelist_t freelist);
+
+/* Assign unused cachelines to freelist */
+void ocf_freelist_populate(ocf_freelist_t freelist,
+ ocf_cache_line_t num_free_clines);
+
+/* Get cacheline from freelist */
+bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
+ ocf_cache_line_t *cline);
+
+/* Put cacheline back to freelist */
+void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
+ ocf_cache_line_t cline);
+
+/* Return total number of free cachelines */
+ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist);
+
+#endif /* __OCF_FREELIST_H__ */
diff --git a/src/spdk/ocf/src/ocf_io.c b/src/spdk/ocf/src/ocf_io.c
new file mode 100644
index 000000000..da94ed065
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_io.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_def_priv.h"
+#include "ocf_io_priv.h"
+#include "ocf_volume_priv.h"
+#include "utils/utils_io_allocator.h"
+
+/*
+ * This is io allocator dedicated for bottom devices.
+ * Out IO structure looks like this:
+ * --------------> +-------------------------+
+ * | OCF is aware | |
+ * | of this part. | struct ocf_io_meta |
+ * | | |
+ * | +-------------------------+ <----------------
+ * | | | Bottom adapter |
+ * | | struct ocf_io | is aware of |
+ * | | | this part. |
+ * --------------> +-------------------------+ |
+ * | | |
+ * | Bottom adapter specific | |
+ * | context data structure. | |
+ * | | |
+ * +-------------------------+ <----------------
+ */
+
+#define OCF_IO_TOTAL(priv_size) \
+ (sizeof(struct ocf_io_internal) + priv_size)
+
+static int ocf_io_allocator_default_init(ocf_io_allocator_t allocator,
+ uint32_t priv_size, const char *name)
+{
+ allocator->priv = env_allocator_create(OCF_IO_TOTAL(priv_size), name);
+ if (!allocator->priv)
+ return -OCF_ERR_NO_MEM;
+
+ return 0;
+}
+
+static void ocf_io_allocator_default_deinit(ocf_io_allocator_t allocator)
+{
+ env_allocator_destroy(allocator->priv);
+ allocator->priv = NULL;
+}
+
+static void *ocf_io_allocator_default_new(ocf_io_allocator_t allocator,
+ ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir)
+{
+ return env_allocator_new(allocator->priv);
+}
+
+static void ocf_io_allocator_default_del(ocf_io_allocator_t allocator, void *obj)
+{
+ env_allocator_del(allocator->priv, obj);
+}
+
+const struct ocf_io_allocator_type type_default = {
+ .ops = {
+ .allocator_init = ocf_io_allocator_default_init,
+ .allocator_deinit = ocf_io_allocator_default_deinit,
+ .allocator_new = ocf_io_allocator_default_new,
+ .allocator_del = ocf_io_allocator_default_del,
+ },
+};
+
+ocf_io_allocator_type_t ocf_io_allocator_get_type_default(void)
+{
+ return &type_default;
+}
+
+/*
+ * IO internal API
+ */
+
+static struct ocf_io_internal *ocf_io_get_internal(struct ocf_io* io)
+{
+ return container_of(io, struct ocf_io_internal, io);
+}
+
+struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags)
+{
+ struct ocf_io_internal *ioi;
+ uint32_t sector_size = SECTORS_TO_BYTES(1);
+
+ if ((addr % sector_size) || (bytes % sector_size))
+ return NULL;
+
+ if (!ocf_refcnt_inc(&volume->refcnt))
+ return NULL;
+
+ ioi = ocf_io_allocator_new(&volume->type->allocator, volume, queue,
+ addr, bytes, dir);
+ if (!ioi) {
+ ocf_refcnt_dec(&volume->refcnt);
+ return NULL;
+ }
+
+ ioi->meta.volume = volume;
+ ioi->meta.ops = &volume->type->properties->io_ops;
+ env_atomic_set(&ioi->meta.ref_count, 1);
+
+ ioi->io.io_queue = queue;
+ ioi->io.addr = addr;
+ ioi->io.bytes = bytes;
+ ioi->io.dir = dir;
+ ioi->io.io_class = io_class;
+ ioi->io.flags = flags;
+
+ return &ioi->io;
+}
+
+/*
+ * IO external API
+ */
+
+void *ocf_io_get_priv(struct ocf_io* io)
+{
+ return (void *)io + sizeof(struct ocf_io);
+}
+
+int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, uint32_t offset)
+{
+ struct ocf_io_internal *ioi = ocf_io_get_internal(io);
+
+ return ioi->meta.ops->set_data(io, data, offset);
+}
+
+ctx_data_t *ocf_io_get_data(struct ocf_io *io)
+{
+ struct ocf_io_internal *ioi = ocf_io_get_internal(io);
+
+ return ioi->meta.ops->get_data(io);
+}
+
+void ocf_io_get(struct ocf_io *io)
+{
+ struct ocf_io_internal *ioi = ocf_io_get_internal(io);
+
+ env_atomic_inc_return(&ioi->meta.ref_count);
+}
+
+void ocf_io_put(struct ocf_io *io)
+{
+ struct ocf_io_internal *ioi = ocf_io_get_internal(io);
+
+ if (env_atomic_dec_return(&ioi->meta.ref_count))
+ return;
+
+ ocf_refcnt_dec(&ioi->meta.volume->refcnt);
+
+ ocf_io_allocator_del(&ioi->meta.volume->type->allocator, (void *)ioi);
+}
+
+ocf_volume_t ocf_io_get_volume(struct ocf_io *io)
+{
+ struct ocf_io_internal *ioi = ocf_io_get_internal(io);
+
+ return ioi->meta.volume;
+}
diff --git a/src/spdk/ocf/src/ocf_io_class.c b/src/spdk/ocf/src/ocf_io_class.c
new file mode 100644
index 000000000..ad2830687
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_io_class.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_priv.h"
+#include "metadata/metadata.h"
+#include "engine/cache_engine.h"
+#include "utils/utils_part.h"
+
+int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
+ struct ocf_io_class_info *info)
+{
+ ocf_part_id_t part_id = io_class;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!info)
+ return -OCF_ERR_INVAL;
+
+ if (io_class >= OCF_IO_CLASS_MAX)
+ return -OCF_ERR_INVAL;
+
+ if (!ocf_part_is_valid(&cache->user_parts[part_id])) {
+ /* Partition does not exist */
+ return -OCF_ERR_IO_CLASS_NOT_EXIST;
+ }
+
+ if (env_strncpy(info->name, OCF_IO_CLASS_NAME_MAX - 1,
+ cache->user_parts[part_id].config->name,
+ sizeof(cache->user_parts[part_id].config->name))) {
+ return -OCF_ERR_INVAL;
+ }
+
+ info->priority = cache->user_parts[part_id].config->priority;
+ info->curr_size = ocf_cache_is_device_attached(cache) ?
+ cache->user_parts[part_id].runtime->curr_size : 0;
+ info->min_size = cache->user_parts[part_id].config->min_size;
+ info->max_size = cache->user_parts[part_id].config->max_size;
+
+ info->eviction_policy_type = cache->conf_meta->eviction_policy_type;
+ info->cleaning_policy_type = cache->conf_meta->cleaning_policy_type;
+
+ info->cache_mode = cache->user_parts[part_id].config->cache_mode;
+
+ return 0;
+}
+
+int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
+ void *cntx)
+{
+ struct ocf_user_part *part;
+ ocf_part_id_t part_id;
+ int result = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!visitor)
+ return -OCF_ERR_INVAL;
+
+ for_each_part(cache, part, part_id) {
+ if (!ocf_part_is_valid(part))
+ continue;
+
+ result = visitor(cache, part_id, cntx);
+ if (result)
+ break;
+ }
+
+ return result;
+}
diff --git a/src/spdk/ocf/src/ocf_io_priv.h b/src/spdk/ocf/src/ocf_io_priv.h
new file mode 100644
index 000000000..175bd3bbc
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_io_priv.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_IO_PRIV_H__
+#define __OCF_IO_PRIV_H__
+
+#include "ocf/ocf.h"
+#include "utils/utils_io_allocator.h"
+
+struct ocf_io_meta {
+ ocf_volume_t volume;
+ const struct ocf_io_ops *ops;
+ env_atomic ref_count;
+ struct ocf_request *req;
+};
+
+
+struct ocf_io_internal {
+ struct ocf_io_meta meta;
+ struct ocf_io io;
+};
+
+int ocf_io_allocator_init(ocf_io_allocator_t allocator, ocf_io_allocator_type_t type,
+ uint32_t priv_size, const char *name);
+
+
+struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags);
+
+static inline void ocf_io_start(struct ocf_io *io)
+{
+ /*
+ * We want to call start() callback only once, so after calling
+ * we set it to NULL to prevent multiple calls.
+ */
+ if (io->start) {
+ io->start(io);
+ io->start = NULL;
+ }
+}
+
+static inline void ocf_io_end(struct ocf_io *io, int error)
+{
+ if (io->end)
+ io->end(io, error);
+
+}
+
+#endif /* __OCF_IO_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_logger.c b/src/spdk/ocf/src/ocf_logger.c
new file mode 100644
index 000000000..018691c7c
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_logger.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_env.h"
+#include "ocf/ocf_logger.h"
+#include "ocf_logger_priv.h"
+#include "ocf_priv.h"
+
+/*
+ *
+ */
+__attribute__((format(printf, 3, 4)))
+int ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (!logger->ops->print)
+ return -ENOTSUP;
+
+ va_start(args, fmt);
+ ret = logger->ops->print(logger, lvl, fmt, args);
+ va_end(args);
+
+ return ret;
+}
+
+int ocf_log_raw_rl(ocf_logger_t logger, const char *func_name)
+{
+ if (!logger->ops->print_rl)
+ return -ENOTSUP;
+
+ return logger->ops->print_rl(logger, func_name);
+}
+
+/*
+ *
+ */
+int ocf_log_stack_trace_raw(ocf_logger_t logger)
+{
+ if (!logger->ops->dump_stack)
+ return -ENOTSUP;
+
+ return logger->ops->dump_stack(logger);
+}
+
+void ocf_logger_init(ocf_logger_t logger,
+ const struct ocf_logger_ops *ops, void *priv)
+{
+ logger->ops = ops;
+ logger->priv = priv;
+}
+
+int ocf_logger_open(ocf_logger_t logger)
+{
+ if (!logger->ops->open)
+ return 0;
+
+ return logger->ops->open(logger);
+}
+
+void ocf_logger_close(ocf_logger_t logger)
+{
+ if (!logger->ops->close)
+ return;
+
+ logger->ops->close(logger);
+}
+
+void ocf_logger_set_priv(ocf_logger_t logger, void *priv)
+{
+ OCF_CHECK_NULL(logger);
+
+ logger->priv = priv;
+}
+
+void *ocf_logger_get_priv(ocf_logger_t logger)
+{
+ OCF_CHECK_NULL(logger);
+
+ return logger->priv;
+}
+
diff --git a/src/spdk/ocf/src/ocf_logger_priv.h b/src/spdk/ocf/src/ocf_logger_priv.h
new file mode 100644
index 000000000..ee10874b2
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_logger_priv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_LOGGER_PRIV_H__
+#define __OCF_LOGGER_PRIV_H__
+
+#include "ocf/ocf_logger.h"
+
+struct ocf_logger {
+ const struct ocf_logger_ops *ops;
+ void *priv;
+};
+
+__attribute__((format(printf, 3, 4)))
+int ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, ...);
+
+int ocf_log_raw_rl(ocf_logger_t logger, const char *func_name);
+
+int ocf_log_stack_trace_raw(ocf_logger_t logger);
+
+void ocf_logger_init(ocf_logger_t logger,
+ const struct ocf_logger_ops *ops, void *priv);
+
+int ocf_logger_open(ocf_logger_t logger);
+
+void ocf_logger_close(ocf_logger_t logger);
+
+#endif /* __OCF_LOGGER_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_metadata.c b/src/spdk/ocf/src/ocf_metadata.c
new file mode 100644
index 000000000..63c5ce7e5
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_metadata.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf_priv.h"
+#include "ocf_cache_priv.h"
+#include "utils/utils_cache_line.h"
+
+static inline ocf_cache_line_t ocf_atomic_addr2line(
+ struct ocf_cache *cache, uint64_t addr)
+{
+ addr -= cache->device->metadata_offset;
+ addr = ocf_bytes_2_lines(cache, addr);
+ return ocf_metadata_map_phy2lg(cache, addr);
+}
+
+static inline uint8_t ocf_atomic_addr2pos(struct ocf_cache *cache,
+ uint64_t addr)
+{
+ addr -= cache->device->metadata_offset;
+ addr = BYTES_TO_SECTORS(addr);
+ addr %= ocf_line_sectors(cache);
+
+ return addr;
+}
+
+int ocf_metadata_get_atomic_entry(ocf_cache_t cache,
+ uint64_t addr, struct ocf_atomic_metadata *entry)
+{
+ OCF_CHECK_NULL(cache);
+ OCF_CHECK_NULL(entry);
+
+ if (addr > ocf_volume_get_length(&cache->device->volume))
+ return -OCF_ERR_INVAL;
+
+ if (addr < cache->device->metadata_offset) {
+ /* Metadata IO of OCF */
+ ENV_BUG_ON(env_memset(entry, sizeof(*entry), 0));
+ } else {
+ ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr);
+ uint8_t pos = ocf_atomic_addr2pos(cache, addr);
+ ocf_core_id_t core_id = OCF_CORE_MAX;
+ ocf_core_t core;
+ uint64_t core_line = 0;
+
+ ocf_metadata_get_core_info(cache, line, &core_id, &core_line);
+ core = ocf_cache_get_core(cache, core_id);
+
+ entry->core_seq_no = core->conf_meta->seq_no;
+ entry->core_line = core_line;
+
+ entry->valid = metadata_test_valid_one(cache, line, pos);
+ entry->dirty = metadata_test_dirty_one(cache, line, pos);
+ }
+
+ return 0;
+}
+
+int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr)
+{
+ ocf_cache_line_t line;
+ uint8_t pos;
+ int i;
+
+ OCF_CHECK_NULL(cache);
+
+ line = ocf_atomic_addr2line(cache, addr);
+ pos = ocf_atomic_addr2pos(cache, addr);
+
+ if (!pos || addr < cache->device->metadata_offset)
+ return 0;
+
+ for (i = 0; i < pos; i++) {
+ if (metadata_test_valid_one(cache, line, i))
+ return 0;
+ }
+
+ return i;
+}
+
+int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr,
+ uint32_t bytes)
+{
+ ocf_cache_line_t line;
+ uint8_t pos;
+ int i, count = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ line = ocf_atomic_addr2line(cache, addr + bytes);
+ pos = ocf_atomic_addr2pos(cache, addr + bytes);
+
+ if (!pos || addr < cache->device->metadata_offset)
+ return 0;
+
+ for (i = pos; i < ocf_line_sectors(cache); i++) {
+ if (metadata_test_valid_one(cache, line, i))
+ return 0;
+
+ count++;
+ }
+
+ return count;
+}
diff --git a/src/spdk/ocf/src/ocf_priv.h b/src/spdk/ocf/src/ocf_priv.h
new file mode 100644
index 000000000..4502c8e88
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_priv.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __OCF_PRIV_H__
+#define __OCF_PRIV_H__
+
+#include "ocf_env.h"
+#include "ocf_def_priv.h"
+
+#define OCF_CHECK_NULL(p) ENV_BUG_ON(!(p))
+
+#define OCF_CMPL_RET(args...) ({ \
+ cmpl(args); \
+ return; \
+})
+
+#endif /* __OCF_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_queue.c b/src/spdk/ocf/src/ocf_queue.c
new file mode 100644
index 000000000..ce48f79da
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_queue.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "ocf/ocf_queue.h"
+#include "ocf_priv.h"
+#include "ocf_queue_priv.h"
+#include "ocf_cache_priv.h"
+#include "ocf_ctx_priv.h"
+#include "ocf_request.h"
+#include "mngt/ocf_mngt_common.h"
+#include "engine/cache_engine.h"
+#include "ocf_def_priv.h"
+
+int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
+ const struct ocf_queue_ops *ops)
+{
+ ocf_queue_t tmp_queue;
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ result = ocf_mngt_cache_get(cache);
+ if (result)
+ return result;
+
+ tmp_queue = env_zalloc(sizeof(*tmp_queue), ENV_MEM_NORMAL);
+ if (!tmp_queue) {
+ ocf_mngt_cache_put(cache);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ env_atomic_set(&tmp_queue->io_no, 0);
+ result = env_spinlock_init(&tmp_queue->io_list_lock);
+ if (result) {
+ ocf_mngt_cache_put(cache);
+ env_free(tmp_queue);
+ return result;
+ }
+
+ INIT_LIST_HEAD(&tmp_queue->io_list);
+ env_atomic_set(&tmp_queue->ref_count, 1);
+ tmp_queue->cache = cache;
+ tmp_queue->ops = ops;
+
+ list_add(&tmp_queue->list, &cache->io_queues);
+
+ *queue = tmp_queue;
+
+ return 0;
+}
+
+void ocf_queue_get(ocf_queue_t queue)
+{
+ OCF_CHECK_NULL(queue);
+
+ env_atomic_inc(&queue->ref_count);
+}
+
+void ocf_queue_put(ocf_queue_t queue)
+{
+ OCF_CHECK_NULL(queue);
+
+ if (env_atomic_dec_return(&queue->ref_count) == 0) {
+ list_del(&queue->list);
+ queue->ops->stop(queue);
+ ocf_mngt_cache_put(queue->cache);
+ env_spinlock_destroy(&queue->io_list_lock);
+ env_free(queue);
+ }
+}
+
+void ocf_io_handle(struct ocf_io *io, void *opaque)
+{
+ struct ocf_request *req = opaque;
+
+ OCF_CHECK_NULL(req);
+
+ if (req->rw == OCF_WRITE)
+ req->io_if->write(req);
+ else
+ req->io_if->read(req);
+}
+
+void ocf_queue_run_single(ocf_queue_t q)
+{
+ struct ocf_request *io_req = NULL;
+ ocf_cache_t cache;
+
+ OCF_CHECK_NULL(q);
+
+ cache = q->cache;
+
+ io_req = ocf_engine_pop_req(cache, q);
+
+ if (!io_req)
+ return;
+
+ if (io_req->ioi.io.handle)
+ io_req->ioi.io.handle(&io_req->ioi.io, io_req);
+ else
+ ocf_io_handle(&io_req->ioi.io, io_req);
+}
+
+void ocf_queue_run(ocf_queue_t q)
+{
+ unsigned char step = 0;
+
+ OCF_CHECK_NULL(q);
+
+ while (env_atomic_read(&q->io_no) > 0) {
+ ocf_queue_run_single(q);
+
+ OCF_COND_RESCHED(step, 128);
+ }
+}
+
+void ocf_queue_set_priv(ocf_queue_t q, void *priv)
+{
+ OCF_CHECK_NULL(q);
+ q->priv = priv;
+}
+
+void *ocf_queue_get_priv(ocf_queue_t q)
+{
+ OCF_CHECK_NULL(q);
+ return q->priv;
+}
+
+uint32_t ocf_queue_pending_io(ocf_queue_t q)
+{
+ OCF_CHECK_NULL(q);
+ return env_atomic_read(&q->io_no);
+}
+
+ocf_cache_t ocf_queue_get_cache(ocf_queue_t q)
+{
+ OCF_CHECK_NULL(q);
+ return q->cache;
+}
diff --git a/src/spdk/ocf/src/ocf_queue_priv.h b/src/spdk/ocf/src/ocf_queue_priv.h
new file mode 100644
index 000000000..6a7f4df1a
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_queue_priv.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef OCF_QUEUE_PRIV_H_
+#define OCF_QUEUE_PRIV_H_
+
+#include "ocf_env.h"
+
+struct ocf_queue {
+ ocf_cache_t cache;
+
+ env_atomic io_no;
+
+ env_atomic ref_count;
+
+ struct list_head io_list;
+ env_spinlock io_list_lock;
+
+ /* Tracing reference counter */
+ env_atomic64 trace_ref_cntr;
+
+ /* Tracing stop request */
+ env_atomic trace_stop;
+
+ struct list_head list;
+
+ const struct ocf_queue_ops *ops;
+
+ void *priv;
+};
+
+static inline void ocf_queue_kick(ocf_queue_t queue, bool allow_sync)
+{
+ if (allow_sync && queue->ops->kick_sync)
+ queue->ops->kick_sync(queue);
+ else
+ queue->ops->kick(queue);
+}
+
+#endif
diff --git a/src/spdk/ocf/src/ocf_request.c b/src/spdk/ocf/src/ocf_request.c
new file mode 100644
index 000000000..3ad6c6842
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_request.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_request.h"
+#include "ocf_cache_priv.h"
+#include "ocf_queue_priv.h"
+#include "utils/utils_cache_line.h"
+
+#define OCF_UTILS_RQ_DEBUG 0
+
+#if 1 == OCF_UTILS_RQ_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+enum ocf_req_size {
+ ocf_req_size_1 = 0,
+ ocf_req_size_2,
+ ocf_req_size_4,
+ ocf_req_size_8,
+ ocf_req_size_16,
+ ocf_req_size_32,
+ ocf_req_size_64,
+ ocf_req_size_128,
+ ocf_req_size_max,
+};
+
+struct ocf_req_allocator {
+ env_allocator *allocator[ocf_req_size_max];
+ size_t size[ocf_req_size_max];
+};
+
+static inline size_t ocf_req_sizeof_map(struct ocf_request *req)
+{
+ uint32_t lines = req->core_line_count;
+ size_t size = (lines * sizeof(struct ocf_map_info));
+
+ ENV_BUG_ON(lines == 0);
+ return size;
+}
+
+static inline size_t ocf_req_sizeof(uint32_t lines)
+{
+ size_t size = sizeof(struct ocf_request) +
+ (lines * sizeof(struct ocf_map_info));
+
+ ENV_BUG_ON(lines == 0);
+ return size;
+}
+
+#define ALLOCATOR_NAME_FMT "ocf_req_%u"
+/* Max number of digits in decimal representation of unsigned int is 10 */
+#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10)
+
+int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
+{
+ int i;
+ struct ocf_req_allocator *req;
+ char name[ALLOCATOR_NAME_MAX] = { '\0' };
+
+ OCF_DEBUG_TRACE(cache);
+
+ ocf_ctx->resources.req = env_zalloc(sizeof(*(ocf_ctx->resources.req)),
+ ENV_MEM_NORMAL);
+ req = ocf_ctx->resources.req;
+
+ if (!req)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
+ req->size[i] = ocf_req_sizeof(1 << i);
+
+ if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
+ (1 << i)) < 0) {
+ goto err;
+ }
+
+ req->allocator[i] = env_allocator_create(req->size[i], name);
+
+ if (!req->allocator[i])
+ goto err;
+
+ OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, "
+ "size = %lu", 1 << i, req->size[i]);
+ }
+
+ return 0;
+
+err:
+ ocf_req_allocator_deinit(ocf_ctx);
+ return -1;
+}
+
+void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx)
+{
+ int i;
+ struct ocf_req_allocator *req;
+
+ OCF_DEBUG_TRACE(cache);
+
+
+ if (!ocf_ctx->resources.req)
+ return;
+
+ req = ocf_ctx->resources.req;
+
+ for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
+ if (req->allocator[i]) {
+ env_allocator_destroy(req->allocator[i]);
+ req->allocator[i] = NULL;
+ }
+ }
+
+ env_free(req);
+ ocf_ctx->resources.req = NULL;
+}
+
+static inline env_allocator *_ocf_req_get_allocator_1(
+ struct ocf_cache *cache)
+{
+ return cache->owner->resources.req->allocator[0];
+}
+
+static env_allocator *_ocf_req_get_allocator(
+ struct ocf_cache *cache, uint32_t count)
+{
+ struct ocf_ctx *ocf_ctx = cache->owner;
+ unsigned int idx = 31 - __builtin_clz(count);
+
+ if (__builtin_ffs(count) <= idx)
+ idx++;
+
+ ENV_BUG_ON(count == 0);
+
+ if (idx >= ocf_req_size_max)
+ return NULL;
+
+ return ocf_ctx->resources.req->allocator[idx];
+}
+
+struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw)
+{
+ uint64_t core_line_first, core_line_last, core_line_count;
+ ocf_cache_t cache = queue->cache;
+ struct ocf_request *req;
+ env_allocator *allocator;
+
+ if (likely(bytes)) {
+ core_line_first = ocf_bytes_2_lines(cache, addr);
+ core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
+ core_line_count = core_line_last - core_line_first + 1;
+ } else {
+ core_line_first = ocf_bytes_2_lines(cache, addr);
+ core_line_last = core_line_first;
+ core_line_count = 1;
+ }
+
+ allocator = _ocf_req_get_allocator(cache, core_line_count);
+ if (allocator) {
+ req = env_allocator_new(allocator);
+ } else {
+ req = env_allocator_new(_ocf_req_get_allocator_1(cache));
+ }
+
+ if (unlikely(!req))
+ return NULL;
+
+ if (allocator)
+ req->map = req->__map;
+
+ OCF_DEBUG_TRACE(cache);
+
+ ocf_queue_get(queue);
+ req->io_queue = queue;
+
+ req->core = core;
+ req->cache = cache;
+
+ req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc(
+ &cache->refcnt.metadata);
+
+ env_atomic_set(&req->ref_count, 1);
+
+ req->byte_position = addr;
+ req->byte_length = bytes;
+ req->core_line_first = core_line_first;
+ req->core_line_last = core_line_last;
+ req->core_line_count = core_line_count;
+ req->alloc_core_line_count = core_line_count;
+ req->rw = rw;
+ req->part_id = PARTITION_DEFAULT;
+
+ req->discard.sector = BYTES_TO_SECTORS(addr);
+ req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
+ req->discard.handled = 0;
+
+ return req;
+}
+
+int ocf_req_alloc_map(struct ocf_request *req)
+{
+ if (req->map)
+ return 0;
+
+ req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO);
+ if (!req->map) {
+ req->error = -OCF_ERR_NO_MEM;
+ return -OCF_ERR_NO_MEM;
+ }
+
+ return 0;
+}
+
+int ocf_req_alloc_map_discard(struct ocf_request *req)
+{
+ ENV_BUILD_BUG_ON(MAX_TRIM_RQ_SIZE / ocf_cache_line_size_4 *
+ sizeof(struct ocf_map_info) > 4 * KiB);
+
+ if (req->byte_length <= MAX_TRIM_RQ_SIZE)
+ return ocf_req_alloc_map(req);
+
+ /*
+ * NOTE: For cache line size bigger than 8k a single-allocation mapping
+ * can handle more than MAX_TRIM_RQ_SIZE, so for these cache line sizes
+ * discard request uses only part of the mapping array.
+ */
+ req->byte_length = MAX_TRIM_RQ_SIZE;
+ req->core_line_last = ocf_bytes_2_lines(req->cache,
+ req->byte_position + req->byte_length - 1);
+ req->core_line_count = req->core_line_last - req->core_line_first + 1;
+
+ return ocf_req_alloc_map(req);
+}
+
+struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw)
+{
+ struct ocf_request *req;
+
+ req = ocf_req_new(queue, core, addr, bytes, rw);
+
+ if (likely(req) && ocf_req_alloc_map(req)) {
+ ocf_req_put(req);
+ return NULL;
+ }
+
+ return req;
+}
+
+struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw)
+{
+ struct ocf_request *req;
+
+ req = ocf_req_new_extended(queue, core, addr,
+ OCF_MIN(bytes, MAX_TRIM_RQ_SIZE), rw);
+ if (!req)
+ return NULL;
+
+ return req;
+}
+
+void ocf_req_get(struct ocf_request *req)
+{
+ OCF_DEBUG_TRACE(req->cache);
+
+ env_atomic_inc(&req->ref_count);
+}
+
+void ocf_req_put(struct ocf_request *req)
+{
+ env_allocator *allocator;
+ ocf_queue_t queue = req->io_queue;
+
+ if (env_atomic_dec_return(&req->ref_count))
+ return;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ if (!req->d2c && req->io_queue != req->cache->mngt_queue)
+ ocf_refcnt_dec(&req->cache->refcnt.metadata);
+
+ allocator = _ocf_req_get_allocator(req->cache,
+ req->alloc_core_line_count);
+ if (allocator) {
+ env_allocator_del(allocator, req);
+ } else {
+ env_free(req->map);
+ env_allocator_del(_ocf_req_get_allocator_1(req->cache), req);
+ }
+
+ ocf_queue_put(queue);
+}
+
+int ocf_req_set_dirty(struct ocf_request *req)
+{
+ req->dirty = !!ocf_refcnt_inc(&req->cache->refcnt.dirty);
+ return req->dirty ? 0 : -OCF_ERR_AGAIN;
+}
+
+void ocf_req_clear_info(struct ocf_request *req)
+{
+ ENV_BUG_ON(env_memset(&req->info, sizeof(req->info), 0));
+}
+
+void ocf_req_clear_map(struct ocf_request *req)
+{
+ if (likely(req->map))
+ ENV_BUG_ON(env_memset(req->map,
+ sizeof(req->map[0]) * req->core_line_count, 0));
+}
+
+void ocf_req_hash(struct ocf_request *req)
+{
+ int i;
+
+ for (i = 0; i < req->core_line_count; i++) {
+ req->map[i].hash = ocf_metadata_hash_func(req->cache,
+ req->core_line_first + i,
+ ocf_core_get_id(req->core));
+ }
+}
diff --git a/src/spdk/ocf/src/ocf_request.h b/src/spdk/ocf/src/ocf_request.h
new file mode 100644
index 000000000..bdb6ca4f2
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_request.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_REQUEST_H__
+#define __OCF_REQUEST_H__
+
+#include "ocf_env.h"
+#include "ocf_io_priv.h"
+#include "engine/cache_engine.h"
+
+struct ocf_req_allocator;
+
+struct ocf_req_info {
+ /* Number of hits, invalid, misses. */
+ unsigned int hit_no;
+ unsigned int invalid_no;
+
+ uint32_t dirty_all;
+ /*!< Number of dirty line in request*/
+
+ uint32_t dirty_any;
+ /*!< Indicates that at least one request is dirty */
+
+ uint32_t seq_req : 1;
+ /*!< Sequential cache request flag. */
+
+ uint32_t flush_metadata : 1;
+ /*!< This bit tells if metadata flushing is required */
+
+ uint32_t mapping_error : 1;
+ /*!< Core lines in this request were not mapped into cache */
+
+ uint32_t re_part : 1;
+ /*!< This bit indicate that in the request some cache lines
+ * has to be moved to another partition
+ */
+
+ uint32_t core_error : 1;
+ /*!< Error occured during I/O on core device */
+
+ uint32_t cleaner_cache_line_lock : 1;
+ /*!< Cleaner flag - acquire cache line lock */
+
+ uint32_t internal : 1;
+ /**!< this is an internal request */
+};
+
+struct ocf_map_info {
+ ocf_cache_line_t hash;
+ /*!< target LBA & core id hash */
+
+ ocf_cache_line_t coll_idx;
+ /*!< Index in collision table (in case of hit) */
+
+ uint64_t core_line;
+
+ ocf_core_id_t core_id;
+ /*!< Core id for multi-core requests */
+
+ uint16_t status : 8;
+ /*!< Traverse or mapping status - HIT, MISS, etc... */
+
+ uint16_t rd_locked : 1;
+ /*!< Indicates if cache line is locked for READ access */
+
+ uint16_t wr_locked : 1;
+ /*!< Indicates if cache line is locked for WRITE access */
+
+ uint16_t invalid : 1;
+ /*!< This bit indicates that mapping is invalid */
+
+ uint16_t re_part : 1;
+ /*!< This bit indicates if cache line need to be moved to the
+ * new partition
+ */
+
+ uint16_t flush : 1;
+ /*!< This bit indicates if cache line need to be flushed */
+
+ uint8_t start_flush;
+ /*!< If req need flush, contain first sector of range to flush */
+
+ uint8_t stop_flush;
+ /*!< If req need flush, contain last sector of range to flush */
+};
+
+/**
+ * @brief OCF discard request info
+ */
+struct ocf_req_discard_info {
+ sector_t sector;
+ /*!< The start sector for discard request */
+
+ sector_t nr_sects;
+ /*!< Number of sectors to be discarded */
+
+ sector_t handled;
+ /*!< Number of processed sector during discard operation */
+};
+
+/**
+ * @brief OCF IO request
+ */
+struct ocf_request {
+ struct ocf_io_internal ioi;
+ /*!< OCF IO associated with request */
+
+ env_atomic ref_count;
+ /*!< Reference usage count, once OCF request reaches zero it
+ * will be de-initialed. Get/Put method are intended to modify
+ * reference counter
+ */
+
+ env_atomic lock_remaining;
+ /*!< This filed indicates how many cache lines in the request
+ * map left to be locked
+ */
+
+ env_atomic req_remaining;
+ /*!< In case of IO this field indicates how many IO left to
+ * accomplish IO
+ */
+
+ env_atomic master_remaining;
+ /*!< Atomic counter for core device */
+
+ ocf_cache_t cache;
+ /*!< Handle to cache instance */
+
+ ocf_core_t core;
+ /*!< Handle to core instance */
+
+ const struct ocf_io_if *io_if;
+ /*!< IO interface */
+
+ void *priv;
+ /*!< Filed for private data, context */
+
+ void *master_io_req;
+ /*!< Core device request context (core private info) */
+
+ ctx_data_t *data;
+ /*!< Request data*/
+
+ ctx_data_t *cp_data;
+ /*!< Copy of request data */
+
+ ocf_req_cache_mode_t cache_mode;
+
+ uint64_t byte_position;
+ /*!< LBA byte position of request in core domain */
+
+ uint64_t core_line_first;
+ /*! First core line */
+
+ uint64_t core_line_last;
+ /*! Last core line */
+
+ uint32_t byte_length;
+ /*!< Byte length of OCF reuqest */
+
+ uint32_t core_line_count;
+ /*! Core line count */
+
+ uint32_t alloc_core_line_count;
+ /*! Core line count for which request was initially allocated */
+
+ int error;
+ /*!< This filed indicates an error for OCF request */
+
+ ocf_part_id_t part_id;
+ /*!< Targeted partition of requests */
+
+ uint8_t rw : 1;
+ /*!< Indicator of IO direction - Read/Write */
+
+ uint8_t d2c : 1;
+ /**!< request affects metadata cachelines (is not direct-to-core) */
+
+ uint8_t dirty : 1;
+ /**!< indicates that request produces dirty data */
+
+ uint8_t master_io_req_type : 2;
+ /*!< Core device request context type */
+
+ uint8_t seq_cutoff : 1;
+ /*!< Sequential cut off set for this request */
+
+ log_sid_t sid;
+ /*!< Tracing sequence ID */
+
+ uint64_t timestamp;
+ /*!< Tracing timestamp */
+
+ ocf_queue_t io_queue;
+ /*!< I/O queue handle for which request should be submitted */
+
+ struct list_head list;
+ /*!< List item for OCF IO thread workers */
+
+ struct ocf_req_info info;
+ /*!< Detailed request info */
+
+ void (*complete)(struct ocf_request *ocf_req, int error);
+ /*!< Request completion function */
+
+ struct ocf_req_discard_info discard;
+
+ struct ocf_map_info *map;
+
+ struct ocf_map_info __map[];
+};
+
+typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
+
+/**
+ * @brief Initialize OCF request allocation utility
+ *
+ * @param cache - OCF cache instance
+ * @return Operation status 0 - successful, non-zero failure
+ */
+int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx);
+
+/**
+ * @brief De-initialize OCF request allocation utility
+ *
+ * @param cache - OCF cache instance
+ */
+void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx);
+
+/**
+ * @brief Allocate new OCF request
+ *
+ * @param queue - I/O queue handle
+ * @param core - OCF core instance
+ * @param addr - LBA of request
+ * @param bytes - number of bytes of request
+ * @param rw - Read or Write
+ *
+ * @return new OCF request
+ */
+struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw);
+
+/**
+ * @brief Allocate OCF request map
+ *
+ * @param req OCF request
+ *
+ * @retval 0 Allocation succeed
+ * @retval non-zero Allocation failed
+ */
+int ocf_req_alloc_map(struct ocf_request *req);
+
+/**
+ * @brief Allocate OCF request map for discard request
+ *
+ * @param req OCF request
+ *
+ * @retval 0 Allocation succeed
+ * @retval non-zero Allocation failed
+ */
+int ocf_req_alloc_map_discard(struct ocf_request *req);
+
+/**
+ * @brief Allocate new OCF request with NOIO map allocation for huge request
+ *
+ * @param queue - I/O queue handle
+ * @param core - OCF core instance
+ * @param addr - LBA of request
+ * @param bytes - number of bytes of request
+ * @param rw - Read or Write
+ *
+ * @return new OCF request
+ */
+
+struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw);
+
+/**
+ * @brief Allocate new OCF request for DISCARD operation
+ *
+ * @param queue - I/O queue handle
+ * @param core - OCF core instance
+ * @param addr - LBA of request
+ * @param bytes - number of bytes of request
+ * @param rw - Read or Write
+ *
+ * @return new OCF request
+ */
+struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
+ uint64_t addr, uint32_t bytes, int rw);
+
+/**
+ * @brief Increment OCF request reference count
+ *
+ * @param req - OCF request
+ */
+void ocf_req_get(struct ocf_request *req);
+
+/**
+ * @brief Decrement OCF request reference. If reference is 0 then request will
+ * be deallocated
+ *
+ * @param req - OCF request
+ */
+void ocf_req_put(struct ocf_request *req);
+
+/**
+ * @brief Clear OCF request info
+ *
+ * @param req - OCF request
+ */
+void ocf_req_clear_info(struct ocf_request *req);
+
+/**
+ * @brief Clear OCF request map
+ *
+ * @param req - OCF request
+ */
+void ocf_req_clear_map(struct ocf_request *req);
+
+/**
+ * @brief Calculate hashes for all core lines within the request
+ *
+ * @param req - OCF request
+ */
+void ocf_req_hash(struct ocf_request *req);
+
+int ocf_req_set_dirty(struct ocf_request *req);
+
+/**
+ * @brief Clear OCF request
+ *
+ * @param req - OCF request
+ */
+static inline void ocf_req_clear(struct ocf_request *req)
+{
+ ocf_req_clear_info(req);
+ ocf_req_clear_map(req);
+
+ env_atomic_set(&req->lock_remaining, 0);
+ env_atomic_set(&req->req_remaining, 0);
+}
+
+/**
+ * @brief Return OCF request reference count
+ *
+ * @param req - OCF request
+ * @return OCF request reference count
+ */
+static inline int ocf_req_ref_count(struct ocf_request *req)
+{
+ return env_atomic_read(&req->ref_count);
+}
+
+static inline bool ocf_req_is_4k(uint64_t addr, uint32_t bytes)
+{
+ return !((addr % PAGE_SIZE) || (bytes % PAGE_SIZE));
+}
+
+#endif /* __OCF_REQUEST_H__ */
diff --git a/src/spdk/ocf/src/ocf_stats.c b/src/spdk/ocf/src/ocf_stats.c
new file mode 100644
index 000000000..51a14f1b1
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_stats.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_priv.h"
+#include "metadata/metadata.h"
+#include "engine/cache_engine.h"
+#include "utils/utils_part.h"
+#include "utils/utils_cache_line.h"
+
+#ifdef OCF_DEBUG_STATS
+static void ocf_stats_debug_init(struct ocf_counters_debug *stats)
+{
+ int i;
+
+ for (i = 0; i < IO_PACKET_NO; i++) {
+ env_atomic64_set(&stats->read_size[i], 0);
+ env_atomic64_set(&stats->write_size[i], 0);
+ }
+
+ for (i = 0; i < IO_ALIGN_NO; i++) {
+ env_atomic64_set(&stats->read_align[i], 0);
+ env_atomic64_set(&stats->write_align[i], 0);
+ }
+}
+#endif
+
+static void ocf_stats_req_init(struct ocf_counters_req *stats)
+{
+ env_atomic64_set(&stats->full_miss, 0);
+ env_atomic64_set(&stats->partial_miss, 0);
+ env_atomic64_set(&stats->total, 0);
+ env_atomic64_set(&stats->pass_through, 0);
+}
+
+static void ocf_stats_block_init(struct ocf_counters_block *stats)
+{
+ env_atomic64_set(&stats->read_bytes, 0);
+ env_atomic64_set(&stats->write_bytes, 0);
+}
+
+static void ocf_stats_part_init(struct ocf_counters_part *stats)
+{
+ ocf_stats_req_init(&stats->read_reqs);
+ ocf_stats_req_init(&stats->write_reqs);
+
+ ocf_stats_block_init(&stats->blocks);
+ ocf_stats_block_init(&stats->core_blocks);
+ ocf_stats_block_init(&stats->cache_blocks);
+}
+
+static void ocf_stats_error_init(struct ocf_counters_error *stats)
+{
+ env_atomic_set(&stats->read, 0);
+ env_atomic_set(&stats->write, 0);
+}
+
+static void _ocf_stats_block_update(struct ocf_counters_block *counters, int dir,
+ uint64_t bytes)
+{
+ switch (dir) {
+ case OCF_READ:
+ env_atomic64_add(bytes, &counters->read_bytes);
+ break;
+ case OCF_WRITE:
+ env_atomic64_add(bytes, &counters->write_bytes);
+ break;
+ default:
+ ENV_BUG();
+ }
+}
+
+void ocf_core_stats_vol_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes)
+{
+ struct ocf_counters_block *counters =
+ &core->counters->part_counters[part_id].blocks;
+
+ _ocf_stats_block_update(counters, dir, bytes);
+}
+
+void ocf_core_stats_cache_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes)
+{
+ struct ocf_counters_block *counters =
+ &core->counters->part_counters[part_id].cache_blocks;
+
+ _ocf_stats_block_update(counters, dir, bytes);
+}
+
+void ocf_core_stats_core_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes)
+{
+ struct ocf_counters_block *counters =
+ &core->counters->part_counters[part_id].core_blocks;
+
+ _ocf_stats_block_update(counters, dir, bytes);
+}
+
+void ocf_core_stats_request_update(ocf_core_t core, ocf_part_id_t part_id,
+ uint8_t dir, uint64_t hit_no, uint64_t core_line_count)
+{
+ struct ocf_counters_req *counters;
+
+ switch (dir) {
+ case OCF_READ:
+ counters = &core->counters->part_counters[part_id].read_reqs;
+ break;
+ case OCF_WRITE:
+ counters = &core->counters->part_counters[part_id].write_reqs;
+ break;
+ default:
+ ENV_BUG();
+ }
+
+ env_atomic64_inc(&counters->total);
+
+ if (hit_no == 0)
+ env_atomic64_inc(&counters->full_miss);
+ else if (hit_no < core_line_count)
+ env_atomic64_inc(&counters->partial_miss);
+}
+
+void ocf_core_stats_request_pt_update(ocf_core_t core, ocf_part_id_t part_id,
+ uint8_t dir, uint64_t hit_no, uint64_t core_line_count)
+{
+ struct ocf_counters_req *counters;
+
+ switch (dir) {
+ case OCF_READ:
+ counters = &core->counters->part_counters[part_id].read_reqs;
+ break;
+ case OCF_WRITE:
+ counters = &core->counters->part_counters[part_id].write_reqs;
+ break;
+ default:
+ ENV_BUG();
+ }
+
+ env_atomic64_inc(&counters->pass_through);
+}
+
+static void _ocf_core_stats_error_update(struct ocf_counters_error *counters,
+ uint8_t dir)
+{
+ switch (dir) {
+ case OCF_READ:
+ env_atomic_inc(&counters->read);
+ break;
+ case OCF_WRITE:
+ env_atomic_inc(&counters->write);
+ break;
+ default:
+ ENV_BUG();
+ }
+}
+
+void ocf_core_stats_core_error_update(ocf_core_t core, uint8_t dir)
+{
+ struct ocf_counters_error *counters = &core->counters->core_errors;
+
+ _ocf_core_stats_error_update(counters, dir);
+}
+
+void ocf_core_stats_cache_error_update(ocf_core_t core, uint8_t dir)
+{
+ struct ocf_counters_error *counters = &core->counters->cache_errors;
+
+ _ocf_core_stats_error_update(counters, dir);
+}
+
+/********************************************************************
+ * Function that resets stats, debug and breakdown counters.
+ * If reset is set the following stats won't be reset:
+ * - cache_occupancy
+ * - queue_length
+ * - debug_counters_read_reqs_issued_seq_hits
+ * - debug_counters_read_reqs_issued_not_seq_hits
+ * - debug_counters_read_reqs_issued_read_miss_schedule
+ * - debug_counters_write_reqs_thread
+ * - debug_counters_write_reqs_issued_only_hdd
+ * - debug_counters_write_reqs_issued_both_devs
+ *********************************************************************/
+void ocf_core_stats_initialize(ocf_core_t core)
+{
+ struct ocf_counters_core *exp_obj_stats;
+ int i;
+
+ OCF_CHECK_NULL(core);
+
+ exp_obj_stats = core->counters;
+
+ ocf_stats_error_init(&exp_obj_stats->cache_errors);
+ ocf_stats_error_init(&exp_obj_stats->core_errors);
+
+ for (i = 0; i != OCF_IO_CLASS_MAX; i++)
+ ocf_stats_part_init(&exp_obj_stats->part_counters[i]);
+
+#ifdef OCF_DEBUG_STATS
+ ocf_stats_debug_init(&exp_obj_stats->debug_stats);
+#endif
+}
+
+void ocf_core_stats_initialize_all(ocf_cache_t cache)
+{
+ ocf_core_id_t id;
+
+ for (id = 0; id < OCF_CORE_MAX; id++) {
+ if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
+ continue;
+
+ ocf_core_stats_initialize(&cache->core[id]);
+ }
+}
+
+static void copy_req_stats(struct ocf_stats_req *dest,
+ const struct ocf_counters_req *from)
+{
+ dest->partial_miss = env_atomic64_read(&from->partial_miss);
+ dest->full_miss = env_atomic64_read(&from->full_miss);
+ dest->total = env_atomic64_read(&from->total);
+ dest->pass_through = env_atomic64_read(&from->pass_through);
+}
+
+static void accum_req_stats(struct ocf_stats_req *dest,
+ const struct ocf_counters_req *from)
+{
+ dest->partial_miss += env_atomic64_read(&from->partial_miss);
+ dest->full_miss += env_atomic64_read(&from->full_miss);
+ dest->total += env_atomic64_read(&from->total);
+ dest->pass_through += env_atomic64_read(&from->pass_through);
+}
+
+static void copy_block_stats(struct ocf_stats_block *dest,
+ const struct ocf_counters_block *from)
+{
+ dest->read = env_atomic64_read(&from->read_bytes);
+ dest->write = env_atomic64_read(&from->write_bytes);
+}
+
+static void accum_block_stats(struct ocf_stats_block *dest,
+ const struct ocf_counters_block *from)
+{
+ dest->read += env_atomic64_read(&from->read_bytes);
+ dest->write += env_atomic64_read(&from->write_bytes);
+}
+
+static void copy_error_stats(struct ocf_stats_error *dest,
+ const struct ocf_counters_error *from)
+{
+ dest->read = env_atomic_read(&from->read);
+ dest->write = env_atomic_read(&from->write);
+}
+
+#ifdef OCF_DEBUG_STATS
+static void copy_debug_stats(struct ocf_stats_core_debug *dest,
+ const struct ocf_counters_debug *from)
+{
+ int i;
+
+ for (i = 0; i < IO_PACKET_NO; i++) {
+ dest->read_size[i] = env_atomic64_read(&from->read_size[i]);
+ dest->write_size[i] = env_atomic64_read(&from->write_size[i]);
+ }
+
+ for (i = 0; i < IO_ALIGN_NO; i++) {
+ dest->read_align[i] = env_atomic64_read(&from->read_align[i]);
+ dest->write_align[i] = env_atomic64_read(&from->write_align[i]);
+ }
+}
+#endif
+
+int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
+ struct ocf_stats_io_class *stats)
+{
+ ocf_cache_t cache;
+ uint32_t cache_occupancy_total = 0;
+ struct ocf_counters_part *part_stat;
+ ocf_core_t i_core;
+ ocf_core_id_t i_core_id;
+
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(stats);
+
+ if (part_id > OCF_IO_CLASS_ID_MAX)
+ return -OCF_ERR_INVAL;
+
+ cache = ocf_core_get_cache(core);
+
+ if (!ocf_part_is_valid(&cache->user_parts[part_id]))
+ return -OCF_ERR_IO_CLASS_NOT_EXIST;
+
+ for_each_core(cache, i_core, i_core_id) {
+ cache_occupancy_total += env_atomic_read(
+ &i_core->runtime_meta->cached_clines);
+ }
+
+ part_stat = &core->counters->part_counters[part_id];
+
+ stats->occupancy_clines = env_atomic_read(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ stats->dirty_clines = env_atomic_read(&core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+
+ stats->free_clines = cache->conf_meta->cachelines -
+ cache_occupancy_total;
+
+ copy_req_stats(&stats->read_reqs, &part_stat->read_reqs);
+ copy_req_stats(&stats->write_reqs, &part_stat->write_reqs);
+
+ copy_block_stats(&stats->blocks, &part_stat->blocks);
+ copy_block_stats(&stats->cache_blocks, &part_stat->cache_blocks);
+ copy_block_stats(&stats->core_blocks, &part_stat->core_blocks);
+
+ return 0;
+}
+
+int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
+{
+ uint32_t i;
+ struct ocf_counters_core *core_stats = NULL;
+ struct ocf_counters_part *curr = NULL;
+
+ OCF_CHECK_NULL(core);
+
+ if (!stats)
+ return -OCF_ERR_INVAL;
+
+ core_stats = core->counters;
+
+ ENV_BUG_ON(env_memset(stats, sizeof(*stats), 0));
+
+ copy_error_stats(&stats->core_errors,
+ &core_stats->core_errors);
+ copy_error_stats(&stats->cache_errors,
+ &core_stats->cache_errors);
+
+#ifdef OCF_DEBUG_STATS
+ copy_debug_stats(&stats->debug_stat,
+ &core_stats->debug_stats);
+#endif
+
+ for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
+ curr = &core_stats->part_counters[i];
+
+ accum_req_stats(&stats->read_reqs,
+ &curr->read_reqs);
+ accum_req_stats(&stats->write_reqs,
+ &curr->write_reqs);
+
+ accum_block_stats(&stats->core, &curr->blocks);
+ accum_block_stats(&stats->core_volume, &curr->core_blocks);
+ accum_block_stats(&stats->cache_volume, &curr->cache_blocks);
+
+ stats->cache_occupancy += env_atomic_read(&core->runtime_meta->
+ part_counters[i].cached_clines);
+ stats->dirty += env_atomic_read(&core->runtime_meta->
+ part_counters[i].dirty_clines);
+ }
+
+ return 0;
+}
+
+#ifdef OCF_DEBUG_STATS
+
+#define IO_ALIGNMENT_SIZE (IO_ALIGN_NO)
+#define IO_PACKET_SIZE ((IO_PACKET_NO) - 1)
+
+static uint32_t io_alignment[IO_ALIGNMENT_SIZE] = {
+ 512, 1 * KiB, 2 * KiB, 4 * KiB
+};
+
+static int to_align_idx(uint64_t off)
+{
+ int i;
+
+ for (i = IO_ALIGNMENT_SIZE - 1; i >= 0; i--) {
+ if (off % io_alignment[i] == 0)
+ return i;
+ }
+
+ return IO_ALIGNMENT_SIZE;
+}
+
+static uint32_t io_packet_size[IO_PACKET_SIZE] = {
+ 512, 1 * KiB, 2 * KiB, 4 * KiB, 8 * KiB,
+ 16 * KiB, 32 * KiB, 64 * KiB, 128 * KiB,
+ 256 * KiB, 512 * KiB
+};
+
+
+static int to_packet_idx(uint32_t len)
+{
+ int i = 0;
+
+ for (i = 0; i < IO_PACKET_SIZE; i++) {
+ if (len == io_packet_size[i])
+ return i;
+ }
+
+ return IO_PACKET_SIZE;
+}
+
+void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io)
+{
+ struct ocf_counters_debug *stats;
+ int idx;
+
+ OCF_CHECK_NULL(core);
+ OCF_CHECK_NULL(io);
+
+ core_id = ocf_core_get_id(core);
+ cache = ocf_core_get_cache(core);
+
+ stats = &core->counters->debug_stats;
+
+ idx = to_packet_idx(io->bytes);
+ if (io->dir == OCF_WRITE)
+ env_atomic64_inc(&stats->write_size[idx]);
+ else
+ env_atomic64_inc(&stats->read_size[idx]);
+
+ idx = to_align_idx(io->addr);
+ if (io->dir == OCF_WRITE)
+ env_atomic64_inc(&stats->write_align[idx]);
+ else
+ env_atomic64_inc(&stats->read_align[idx]);
+}
+
+#else
+
+void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io) {}
+
+#endif
diff --git a/src/spdk/ocf/src/ocf_stats_builder.c b/src/spdk/ocf/src/ocf_stats_builder.c
new file mode 100644
index 000000000..522f938a3
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_stats_builder.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_priv.h"
+#include "metadata/metadata.h"
+#include "engine/cache_engine.h"
+#include "utils/utils_part.h"
+#include "utils/utils_cache_line.h"
+#include "utils/utils_stats.h"
+
+static void _fill_req(struct ocf_stats_requests *req, struct ocf_stats_core *s)
+{
+ uint64_t serviced = s->read_reqs.total + s->write_reqs.total;
+ uint64_t total = serviced + s->read_reqs.pass_through +
+ s->write_reqs.pass_through;
+ uint64_t hit;
+
+ /* Reads Section */
+ hit = s->read_reqs.total - (s->read_reqs.full_miss +
+ s->read_reqs.partial_miss);
+ _set(&req->rd_hits, hit, total);
+ _set(&req->rd_partial_misses, s->read_reqs.partial_miss, total);
+ _set(&req->rd_full_misses, s->read_reqs.full_miss, total);
+ _set(&req->rd_total, s->read_reqs.total, total);
+
+ /* Write Section */
+ hit = s->write_reqs.total - (s->write_reqs.full_miss +
+ s->write_reqs.partial_miss);
+ _set(&req->wr_hits, hit, total);
+ _set(&req->wr_partial_misses, s->write_reqs.partial_miss, total);
+ _set(&req->wr_full_misses, s->write_reqs.full_miss, total);
+ _set(&req->wr_total, s->write_reqs.total, total);
+
+ /* Pass-Through section */
+ _set(&req->rd_pt, s->read_reqs.pass_through, total);
+ _set(&req->wr_pt, s->write_reqs.pass_through, total);
+
+ /* Summary */
+ _set(&req->serviced, serviced, total);
+ _set(&req->total, total, total);
+}
+
+static void _fill_req_part(struct ocf_stats_requests *req,
+ struct ocf_stats_io_class *s)
+{
+ uint64_t serviced = s->read_reqs.total + s->write_reqs.total;
+ uint64_t total = serviced + s->read_reqs.pass_through +
+ s->write_reqs.pass_through;
+ uint64_t hit;
+
+ /* Reads Section */
+ hit = s->read_reqs.total - (s->read_reqs.full_miss +
+ s->read_reqs.partial_miss);
+ _set(&req->rd_hits, hit, total);
+ _set(&req->rd_partial_misses, s->read_reqs.partial_miss, total);
+ _set(&req->rd_full_misses, s->read_reqs.full_miss, total);
+ _set(&req->rd_total, s->read_reqs.total, total);
+
+ /* Write Section */
+ hit = s->write_reqs.total - (s->write_reqs.full_miss +
+ s->write_reqs.partial_miss);
+ _set(&req->wr_hits, hit, total);
+ _set(&req->wr_partial_misses, s->write_reqs.partial_miss, total);
+ _set(&req->wr_full_misses, s->write_reqs.full_miss, total);
+ _set(&req->wr_total, s->write_reqs.total, total);
+
+ /* Pass-Through section */
+ _set(&req->rd_pt, s->read_reqs.pass_through, total);
+ _set(&req->wr_pt, s->write_reqs.pass_through, total);
+
+ /* Summary */
+ _set(&req->serviced, serviced, total);
+ _set(&req->total, total, total);
+}
+
+static void _fill_blocks(struct ocf_stats_blocks *blocks,
+ struct ocf_stats_core *s)
+{
+ uint64_t rd, wr, total;
+
+ /* Core volume */
+ rd = _bytes4k(s->core_volume.read);
+ wr = _bytes4k(s->core_volume.write);
+ total = rd + wr;
+ _set(&blocks->core_volume_rd, rd, total);
+ _set(&blocks->core_volume_wr, wr, total);
+ _set(&blocks->core_volume_total, total, total);
+
+ /* Cache volume */
+ rd = _bytes4k(s->cache_volume.read);
+ wr = _bytes4k(s->cache_volume.write);
+ total = rd + wr;
+ _set(&blocks->cache_volume_rd, rd, total);
+ _set(&blocks->cache_volume_wr, wr, total);
+ _set(&blocks->cache_volume_total, total, total);
+
+ /* Core (cache volume) */
+ rd = _bytes4k(s->core.read);
+ wr = _bytes4k(s->core.write);
+ total = rd + wr;
+ _set(&blocks->volume_rd, rd, total);
+ _set(&blocks->volume_wr, wr, total);
+ _set(&blocks->volume_total, total, total);
+}
+
+static void _fill_blocks_part(struct ocf_stats_blocks *blocks,
+ struct ocf_stats_io_class *s)
+{
+ uint64_t rd, wr, total;
+
+ /* Core volume */
+ rd = _bytes4k(s->core_blocks.read);
+ wr = _bytes4k(s->core_blocks.write);
+ total = rd + wr;
+ _set(&blocks->core_volume_rd, rd, total);
+ _set(&blocks->core_volume_wr, wr, total);
+ _set(&blocks->core_volume_total, total, total);
+
+ /* Cache volume */
+ rd = _bytes4k(s->cache_blocks.read);
+ wr = _bytes4k(s->cache_blocks.write);
+ total = rd + wr;
+ _set(&blocks->cache_volume_rd, rd, total);
+ _set(&blocks->cache_volume_wr, wr, total);
+ _set(&blocks->cache_volume_total, total, total);
+
+ /* Core (cache volume) */
+ rd = _bytes4k(s->blocks.read);
+ wr = _bytes4k(s->blocks.write);
+ total = rd + wr;
+ _set(&blocks->volume_rd, rd, total);
+ _set(&blocks->volume_wr, wr, total);
+ _set(&blocks->volume_total, total, total);
+}
+
+static void _fill_errors(struct ocf_stats_errors *errors,
+ struct ocf_stats_core *s)
+{
+ uint64_t rd, wr, total;
+
+ rd = s->core_errors.read;
+ wr = s->core_errors.write;
+ total = rd + wr;
+ _set(&errors->core_volume_rd, rd, total);
+ _set(&errors->core_volume_wr, wr, total);
+ _set(&errors->core_volume_total, total, total);
+
+ rd = s->cache_errors.read;
+ wr = s->cache_errors.write;
+ total = rd + wr;
+ _set(&errors->cache_volume_rd, rd, total);
+ _set(&errors->cache_volume_wr, wr, total);
+ _set(&errors->cache_volume_total, total, total);
+
+ total = s->core_errors.read + s->core_errors.write +
+ s->cache_errors.read + s->cache_errors.write;
+
+ _set(&errors->total, total, total);
+}
+
+static void _accumulate_block(struct ocf_stats_block *to,
+ const struct ocf_stats_block *from)
+{
+ to->read += from->read;
+ to->write += from->write;
+}
+
+static void _accumulate_reqs(struct ocf_stats_req *to,
+ const struct ocf_stats_req *from)
+{
+ to->full_miss += from->full_miss;
+ to->partial_miss += from->partial_miss;
+ to->total += from->total;
+ to->pass_through += from->pass_through;
+}
+
+static void _accumulate_errors(struct ocf_stats_error *to,
+ const struct ocf_stats_error *from)
+{
+ to->read += from->read;
+ to->write += from->write;
+}
+
+struct io_class_stats_context {
+ struct ocf_stats_io_class *stats;
+ ocf_part_id_t part_id;
+};
+
+static int _accumulate_io_class_stats(ocf_core_t core, void *cntx)
+{
+ int result;
+ struct ocf_stats_io_class stats;
+ struct ocf_stats_io_class *total =
+ ((struct io_class_stats_context*)cntx)->stats;
+ ocf_part_id_t part_id = ((struct io_class_stats_context*)cntx)->part_id;
+
+ result = ocf_core_io_class_get_stats(core, part_id, &stats);
+ if (result)
+ return result;
+
+ total->occupancy_clines += stats.occupancy_clines;
+ total->dirty_clines += stats.dirty_clines;
+ total->free_clines = stats.free_clines;
+
+ _accumulate_block(&total->cache_blocks, &stats.cache_blocks);
+ _accumulate_block(&total->core_blocks, &stats.core_blocks);
+ _accumulate_block(&total->blocks, &stats.blocks);
+
+ _accumulate_reqs(&total->read_reqs, &stats.read_reqs);
+ _accumulate_reqs(&total->write_reqs, &stats.write_reqs);
+
+ return 0;
+}
+
+static void _ocf_stats_part_fill(ocf_cache_t cache, ocf_part_id_t part_id,
+ struct ocf_stats_io_class *stats , struct ocf_stats_usage *usage,
+ struct ocf_stats_requests *req, struct ocf_stats_blocks *blocks)
+{
+ uint64_t cache_size, cache_line_size;
+
+ cache_line_size = ocf_cache_get_line_size(cache);
+ cache_size = cache->conf_meta->cachelines;
+
+ if (usage) {
+ _set(&usage->occupancy,
+ _lines4k(stats->occupancy_clines, cache_line_size),
+ _lines4k(cache_size, cache_line_size));
+
+ if (part_id == PARTITION_DEFAULT) {
+ _set(&usage->free,
+ _lines4k(stats->free_clines, cache_line_size),
+ _lines4k(cache_size, cache_line_size));
+ } else {
+ _set(&usage->free,
+ _lines4k(0, cache_line_size),
+ _lines4k(0, cache_line_size));
+ }
+
+ _set(&usage->clean,
+ _lines4k(stats->occupancy_clines - stats->dirty_clines,
+ cache_line_size),
+ _lines4k(stats->occupancy_clines, cache_line_size));
+
+ _set(&usage->dirty,
+ _lines4k(stats->dirty_clines, cache_line_size),
+ _lines4k(stats->occupancy_clines, cache_line_size));
+ }
+
+ if (req)
+ _fill_req_part(req, stats);
+
+ if (blocks)
+ _fill_blocks_part(blocks, stats);
+}
+
+int ocf_stats_collect_part_core(ocf_core_t core, ocf_part_id_t part_id,
+ struct ocf_stats_usage *usage, struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks)
+{
+ struct ocf_stats_io_class s;
+ ocf_cache_t cache;
+ int result = 0;
+
+ OCF_CHECK_NULL(core);
+
+ if (part_id > OCF_IO_CLASS_ID_MAX)
+ return -OCF_ERR_INVAL;
+
+ cache = ocf_core_get_cache(core);
+
+ _ocf_stats_zero(usage);
+ _ocf_stats_zero(req);
+ _ocf_stats_zero(blocks);
+
+ result = ocf_core_io_class_get_stats(core, part_id, &s);
+ if (result)
+ return result;
+
+ _ocf_stats_part_fill(cache, part_id, &s, usage, req, blocks);
+
+ return result;
+}
+
+int ocf_stats_collect_part_cache(ocf_cache_t cache, ocf_part_id_t part_id,
+ struct ocf_stats_usage *usage, struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks)
+{
+ struct io_class_stats_context ctx;
+ struct ocf_stats_io_class s = {};
+ int result = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (part_id > OCF_IO_CLASS_ID_MAX)
+ return -OCF_ERR_INVAL;
+
+ _ocf_stats_zero(usage);
+ _ocf_stats_zero(req);
+ _ocf_stats_zero(blocks);
+
+ ctx.part_id = part_id;
+ ctx.stats = &s;
+
+ result = ocf_core_visit(cache, _accumulate_io_class_stats, &ctx, true);
+ if (result)
+ return result;
+
+ _ocf_stats_part_fill(cache, part_id, &s, usage, req, blocks);
+
+ return result;
+}
+
+int ocf_stats_collect_core(ocf_core_t core,
+ struct ocf_stats_usage *usage,
+ struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks,
+ struct ocf_stats_errors *errors)
+{
+ ocf_cache_t cache;
+ uint64_t cache_occupancy, cache_size, cache_line_size;
+ struct ocf_stats_core s;
+ int result;
+
+ OCF_CHECK_NULL(core);
+
+ result = ocf_core_get_stats(core, &s);
+ if (result)
+ return result;
+
+ cache = ocf_core_get_cache(core);
+ cache_line_size = ocf_cache_get_line_size(cache);
+ cache_size = cache->conf_meta->cachelines;
+ cache_occupancy = ocf_get_cache_occupancy(cache);
+
+ _ocf_stats_zero(usage);
+ _ocf_stats_zero(req);
+ _ocf_stats_zero(blocks);
+ _ocf_stats_zero(errors);
+
+ if (usage) {
+ _set(&usage->occupancy,
+ _lines4k(s.cache_occupancy, cache_line_size),
+ _lines4k(cache_size, cache_line_size));
+
+ _set(&usage->free,
+ _lines4k(cache_size - cache_occupancy, cache_line_size),
+ _lines4k(cache_size, cache_line_size));
+
+ _set(&usage->clean,
+ _lines4k(s.cache_occupancy - s.dirty, cache_line_size),
+ _lines4k(s.cache_occupancy, cache_line_size));
+
+ _set(&usage->dirty,
+ _lines4k(s.dirty, cache_line_size),
+ _lines4k(s.cache_occupancy, cache_line_size));
+ }
+
+ if (req)
+ _fill_req(req, &s);
+
+ if (blocks)
+ _fill_blocks(blocks, &s);
+
+ if (errors)
+ _fill_errors(errors, &s);
+
+ return 0;
+}
+
+static int _accumulate_stats(ocf_core_t core, void *cntx)
+{
+ struct ocf_stats_core stats, *total = cntx;
+ int result;
+
+ result = ocf_core_get_stats(core, &stats);
+ if (result)
+ return result;
+
+ _accumulate_block(&total->cache_volume, &stats.cache_volume);
+ _accumulate_block(&total->core_volume, &stats.core_volume);
+ _accumulate_block(&total->core, &stats.core);
+
+ _accumulate_reqs(&total->read_reqs, &stats.read_reqs);
+ _accumulate_reqs(&total->write_reqs, &stats.write_reqs);
+
+ _accumulate_errors(&total->cache_errors, &stats.cache_errors);
+ _accumulate_errors(&total->core_errors, &stats.core_errors);
+
+ return 0;
+}
+
+int ocf_stats_collect_cache(ocf_cache_t cache,
+ struct ocf_stats_usage *usage,
+ struct ocf_stats_requests *req,
+ struct ocf_stats_blocks *blocks,
+ struct ocf_stats_errors *errors)
+{
+ uint64_t cache_line_size;
+ struct ocf_cache_info info;
+ struct ocf_stats_core s = { 0 };
+ int result;
+
+ OCF_CHECK_NULL(cache);
+
+ result = ocf_cache_get_info(cache, &info);
+ if (result)
+ return result;
+
+ cache_line_size = ocf_cache_get_line_size(cache);
+
+ _ocf_stats_zero(usage);
+ _ocf_stats_zero(req);
+ _ocf_stats_zero(blocks);
+ _ocf_stats_zero(errors);
+
+ result = ocf_core_visit(cache, _accumulate_stats, &s, true);
+ if (result)
+ return result;
+
+ if (usage) {
+ _set(&usage->occupancy,
+ _lines4k(info.occupancy, cache_line_size),
+ _lines4k(info.size, cache_line_size));
+
+ _set(&usage->free,
+ _lines4k(info.size - info.occupancy, cache_line_size),
+ _lines4k(info.size, cache_line_size));
+
+ _set(&usage->clean,
+ _lines4k(info.occupancy - info.dirty, cache_line_size),
+ _lines4k(info.size, cache_line_size));
+
+ _set(&usage->dirty,
+ _lines4k(info.dirty, cache_line_size),
+ _lines4k(info.size, cache_line_size));
+ }
+
+ if (req)
+ _fill_req(req, &s);
+
+ if (blocks)
+ _fill_blocks(blocks, &s);
+
+ if (errors)
+ _fill_errors(errors, &s);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/ocf_stats_priv.h b/src/spdk/ocf/src/ocf_stats_priv.h
new file mode 100644
index 000000000..59319dd7d
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_stats_priv.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_STATS_PRIV_H__
+#define __OCF_STATS_PRIV_H__
+
+struct ocf_counters_block {
+ env_atomic64 read_bytes;
+ env_atomic64 write_bytes;
+};
+
+struct ocf_counters_error {
+ env_atomic read;
+ env_atomic write;
+};
+
+struct ocf_counters_req {
+ env_atomic64 partial_miss;
+ env_atomic64 full_miss;
+ env_atomic64 total;
+ env_atomic64 pass_through;
+};
+
+/**
+ * @brief OCF requests statistics like hit, miss, etc...
+ *
+ * @note To calculate number of hits request do:
+ * total - (partial_miss + full_miss)
+ */
+struct ocf_stats_req {
+ /** Number of partial misses */
+ uint64_t partial_miss;
+
+ /** Number of full misses */
+ uint64_t full_miss;
+
+ /** Total of requests */
+ uint64_t total;
+
+ /** Pass-through requests */
+ uint64_t pass_through;
+};
+
+/**
+ * @brief OCF error statistics
+ */
+struct ocf_stats_error {
+ /** Read errors */
+ uint32_t read;
+
+ /** Write errors */
+ uint32_t write;
+};
+
+/**
+ * @brief OCF block statistics in bytes
+ */
+struct ocf_stats_block {
+ /** Number of blocks read */
+ uint64_t read;
+
+ /** Number of blocks written */
+ uint64_t write;
+};
+
+/**
+ * Statistics appropriate for given IO class
+ */
+struct ocf_stats_io_class {
+ /** Number of cache lines available for given partition */
+ uint64_t free_clines;
+
+ /** Number of cache lines within lru list */
+ uint64_t occupancy_clines;
+
+ /** Number of dirty cache lines assigned to specific partition */
+ uint64_t dirty_clines;
+
+ /** Read requests statistics */
+ struct ocf_stats_req read_reqs;
+
+ /** Writes requests statistics */
+ struct ocf_stats_req write_reqs;
+
+ /** Block requests for ocf volume statistics */
+ struct ocf_stats_block blocks;
+
+ /** Block requests for cache volume statistics */
+ struct ocf_stats_block cache_blocks;
+
+ /** Block requests for core volume statistics */
+ struct ocf_stats_block core_blocks;
+};
+
+#define IO_PACKET_NO 12
+#define IO_ALIGN_NO 4
+
+/**
+ * @brief Core debug statistics
+ */
+struct ocf_stats_core_debug {
+ /** I/O sizes being read (grouped by packets) */
+ uint64_t read_size[IO_PACKET_NO];
+
+ /** I/O sizes being written (grouped by packets) */
+ uint64_t write_size[IO_PACKET_NO];
+
+ /** I/O alignment for reads */
+ uint64_t read_align[IO_ALIGN_NO];
+
+ /** I/O alignment for writes */
+ uint64_t write_align[IO_ALIGN_NO];
+};
+
+/**
+ * @brief OCF core statistics
+ */
+struct ocf_stats_core {
+ /** Number of cache lines allocated in the cache for this core */
+ uint32_t cache_occupancy;
+
+ /** Number of dirty cache lines allocated in the cache for this core */
+ uint32_t dirty;
+
+ /** Read requests statistics */
+ struct ocf_stats_req read_reqs;
+
+ /** Write requests statistics */
+ struct ocf_stats_req write_reqs;
+
+ /** Block requests for cache volume statistics */
+ struct ocf_stats_block cache_volume;
+
+ /** Block requests for core volume statistics */
+ struct ocf_stats_block core_volume;
+
+ /** Block requests submitted by user to this core */
+ struct ocf_stats_block core;
+
+ /** Cache volume error statistics */
+ struct ocf_stats_error cache_errors;
+
+ /** Core volume error statistics */
+ struct ocf_stats_error core_errors;
+
+ /** Debug statistics */
+ struct ocf_stats_core_debug debug_stat;
+};
+
+/**
+ * statistics appropriate for given io class.
+ */
+struct ocf_counters_part {
+ struct ocf_counters_req read_reqs;
+ struct ocf_counters_req write_reqs;
+
+ struct ocf_counters_block blocks;
+
+ struct ocf_counters_block core_blocks;
+ struct ocf_counters_block cache_blocks;
+};
+
+#ifdef OCF_DEBUG_STATS
+struct ocf_counters_debug {
+ env_atomic64 write_size[IO_PACKET_NO];
+ env_atomic64 read_size[IO_PACKET_NO];
+
+ env_atomic64 read_align[IO_ALIGN_NO];
+ env_atomic64 write_align[IO_ALIGN_NO];
+};
+#endif
+
+struct ocf_counters_core {
+ struct ocf_counters_error core_errors;
+ struct ocf_counters_error cache_errors;
+
+ struct ocf_counters_part part_counters[OCF_IO_CLASS_MAX];
+#ifdef OCF_DEBUG_STATS
+ struct ocf_counters_debug debug_stats;
+#endif
+};
+
+void ocf_core_stats_core_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes);
+void ocf_core_stats_cache_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes);
+void ocf_core_stats_vol_block_update(ocf_core_t core, ocf_part_id_t part_id,
+ int dir, uint64_t bytes);
+
+void ocf_core_stats_request_update(ocf_core_t core, ocf_part_id_t part_id,
+ uint8_t dir, uint64_t hit_no, uint64_t core_line_count);
+void ocf_core_stats_request_pt_update(ocf_core_t core, ocf_part_id_t part_id,
+ uint8_t dir, uint64_t hit_no, uint64_t core_line_count);
+
+void ocf_core_stats_core_error_update(ocf_core_t core, uint8_t dir);
+void ocf_core_stats_cache_error_update(ocf_core_t core, uint8_t dir);
+
+/**
+ * @brief ocf_core_io_class_get_stats retrieve io class statistics
+ * for given core
+ *
+ * Retrieve buffer of cache statistics for given cache instance.
+ *
+ * @param[in] core core handle to which request pertains
+ * @param[in] part_id IO class, stats of which are requested
+ * @param[out] stats statistic structure that shall be filled as
+ * a result of this function invocation.
+ *
+ * @result zero upon successful completion; error code otherwise
+ */
+int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
+ struct ocf_stats_io_class *stats);
+
+/**
+ * @brief retrieve core stats
+ *
+ * Retrieve ocf per core stats (for all IO classes together)
+ *
+ * @param[in] core core ID to which request pertains
+ * @param[out] stats statistics structure that shall be filled as
+ * a result of this function invocation.
+ *
+ * @result zero upon successful completion; error code otherwise
+ */
+int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats);
+
+/**
+ * @brief update DEBUG stats given IO request
+ *
+ * Function meant to update DEBUG stats for IO request.
+ *
+ * @note This function shall be invoked for each IO request processed
+ *
+ * @param[in] core to which request pertains
+ * @param[in] io request for which stats are being updated
+ */
+void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io);
+
+#endif
diff --git a/src/spdk/ocf/src/ocf_trace.c b/src/spdk/ocf/src/ocf_trace.c
new file mode 100644
index 000000000..e85c51e2b
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_trace.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_env.h"
+#include "ocf_priv.h"
+#include "ocf/ocf.h"
+#include "ocf/ocf_trace.h"
+#include "ocf_core_priv.h"
+#include "ocf_cache_priv.h"
+#include "ocf_trace_priv.h"
+
+struct core_trace_visitor_ctx {
+ ocf_cache_t cache;
+ ocf_queue_t io_queue;
+};
+
+static int _ocf_core_desc(ocf_core_t core, void *ctx)
+{
+ struct ocf_event_core_desc core_desc;
+ struct core_trace_visitor_ctx *visitor_ctx =
+ (struct core_trace_visitor_ctx *) ctx;
+ ocf_cache_t cache = visitor_ctx->cache;
+
+ ocf_event_init_hdr(&core_desc.hdr, ocf_event_type_core_desc,
+ ocf_trace_seq_id(cache),
+ env_ticks_to_nsecs(env_get_tick_count()),
+ sizeof(core_desc));
+ core_desc.name = ocf_core_get_name(core);
+ core_desc.core_size = ocf_volume_get_length(
+ ocf_core_get_volume(core));
+
+ ocf_trace_push(visitor_ctx->io_queue,
+ &core_desc, sizeof(core_desc));
+
+ return 0;
+}
+
+static int _ocf_trace_cache_info(ocf_cache_t cache, ocf_queue_t io_queue)
+{
+ struct ocf_event_cache_desc cache_desc;
+ int retval;
+ struct core_trace_visitor_ctx visitor_ctx;
+
+ ocf_event_init_hdr(&cache_desc.hdr, ocf_event_type_cache_desc,
+ ocf_trace_seq_id(cache),
+ env_ticks_to_nsecs(env_get_tick_count()),
+ sizeof(cache_desc));
+
+ cache_desc.name = ocf_cache_get_name(cache);
+ cache_desc.cache_line_size = ocf_cache_get_line_size(cache);
+ cache_desc.cache_mode = ocf_cache_get_mode(cache);
+
+ if (ocf_cache_is_device_attached(cache)) {
+ /* Attached cache */
+ cache_desc.cache_size = ocf_volume_get_length(
+ ocf_cache_get_volume(cache));
+ } else {
+ cache_desc.cache_size = 0;
+ }
+
+ cache_desc.cores_no = ocf_cache_get_core_count(cache);
+ cache_desc.version = OCF_EVENT_VERSION;
+
+ ocf_trace_push(io_queue, &cache_desc, sizeof(cache_desc));
+
+ visitor_ctx.cache = cache;
+ visitor_ctx.io_queue = io_queue;
+
+ retval = ocf_core_visit(cache, _ocf_core_desc, &visitor_ctx, true);
+
+ return retval;
+}
+
+int ocf_mngt_start_trace(ocf_cache_t cache, void *trace_ctx,
+ ocf_trace_callback_t trace_callback)
+{
+ ocf_queue_t queue;
+ int result = 0;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!trace_callback)
+ return -EINVAL;
+
+ if (cache->trace.trace_callback) {
+ ocf_cache_log(cache, log_err, "Tracing already started\n");
+ return -EINVAL;
+ }
+
+ cache->trace.trace_callback = trace_callback;
+ cache->trace.trace_ctx = trace_ctx;
+
+ // Reset trace stop flag
+ list_for_each_entry(queue, &cache->io_queues, list) {
+ env_atomic_set(&queue->trace_stop, 0);
+ }
+
+ list_for_each_entry(queue, &cache->io_queues, list) {
+ result = _ocf_trace_cache_info(cache, queue);
+ if (result) {
+ cache->trace.trace_callback = NULL;
+ return result;
+ }
+ }
+
+ ocf_cache_log(cache, log_info, "Tracing started\n");
+
+ return result;
+}
+
+int ocf_mngt_stop_trace(ocf_cache_t cache)
+{
+ ocf_queue_t queue;
+
+ OCF_CHECK_NULL(cache);
+
+ if (!cache->trace.trace_callback) {
+ ocf_cache_log(cache, log_err, "Tracing not started\n");
+ return -EINVAL;
+ }
+
+ // Set trace stop flag
+ list_for_each_entry(queue, &cache->io_queues, list) {
+ env_atomic_set(&queue->trace_stop, OCF_TRACING_STOP);
+ }
+
+ cache->trace.trace_callback = NULL;
+ cache->trace.trace_ctx = NULL;
+
+ // Poll for all ongoing traces completion
+ while (ocf_is_trace_ongoing(cache))
+ env_msleep(20);
+
+ return 0;
+}
diff --git a/src/spdk/ocf/src/ocf_trace_priv.h b/src/spdk/ocf/src/ocf_trace_priv.h
new file mode 100644
index 000000000..179ff68c4
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_trace_priv.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_TRACE_PRIV_H__
+#define __OCF_TRACE_PRIV_H__
+
+#include "ocf/ocf.h"
+#include "ocf_env.h"
+#include "ocf/ocf_trace.h"
+#include "engine/engine_common.h"
+#include "ocf_request.h"
+#include "ocf_core_priv.h"
+#include "ocf_queue_priv.h"
+
+static inline bool ocf_is_trace_ongoing(ocf_cache_t cache)
+{
+ ocf_queue_t q;
+
+ list_for_each_entry(q, &cache->io_queues, list) {
+ if (env_atomic64_read(&q->trace_ref_cntr))
+ return true;
+ }
+
+ return false;
+}
+static inline void ocf_event_init_hdr(struct ocf_event_hdr *hdr,
+ ocf_event_type type, uint64_t sid, uint64_t timestamp,
+ uint32_t size)
+{
+ hdr->sid = sid;
+ hdr->timestamp = timestamp;
+ hdr->type = type;
+ hdr->size = size;
+}
+
+static inline uint64_t ocf_trace_seq_id(ocf_cache_t cache)
+{
+ return env_atomic64_inc_return(&cache->trace.trace_seq_ref);
+}
+
+static inline void ocf_trace_init_io(struct ocf_request *req)
+{
+ req->timestamp = env_ticks_to_nsecs(env_get_tick_count());
+ req->sid = ocf_trace_seq_id(req->cache);
+}
+
+static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
+ struct ocf_request *req, ocf_event_operation_t op)
+{
+ ocf_event_init_hdr(&ev->hdr, ocf_event_type_io, req->sid,
+ req->timestamp, sizeof(*ev));
+
+ ev->addr = req->byte_position;
+ if (op == ocf_event_operation_discard)
+ ev->len = req->discard.nr_sects << ENV_SECTOR_SHIFT;
+ else
+ ev->len = req->byte_length;
+
+ ev->operation = op;
+ ev->core_name = ocf_core_get_name(req->core);
+
+ ev->io_class = req->ioi.io.io_class;
+}
+
+static inline void ocf_trace_push(ocf_queue_t queue, void *trace, uint32_t size)
+{
+ ocf_cache_t cache;
+ ocf_trace_callback_t trace_callback;
+ void *trace_ctx;
+
+ OCF_CHECK_NULL(queue);
+
+ cache = ocf_queue_get_cache(queue);
+
+ if (cache->trace.trace_callback == NULL)
+ return;
+
+ env_atomic64_inc(&queue->trace_ref_cntr);
+
+ if (env_atomic_read(&queue->trace_stop)) {
+ // Tracing stop was requested
+ env_atomic64_dec(&queue->trace_ref_cntr);
+ return;
+ }
+
+ /*
+ * Remember callback and context pointers.
+ * These will be valid even when later on original pointers
+ * will be set to NULL as cleanup will wait till trace
+ * reference counter is zero
+ */
+ trace_callback = cache->trace.trace_callback;
+ trace_ctx = cache->trace.trace_ctx;
+
+ if (trace_callback && trace_ctx) {
+ trace_callback(cache, trace_ctx, queue, trace, size);
+ }
+
+ env_atomic64_dec(&queue->trace_ref_cntr);
+}
+
+static inline void ocf_trace_io(struct ocf_request *req,
+ ocf_event_operation_t dir)
+{
+ struct ocf_event_io ev;
+
+ if (!req->cache->trace.trace_callback)
+ return;
+
+ ocf_trace_prep_io_event(&ev, req, dir);
+
+ ocf_trace_push(req->io_queue, &ev, sizeof(ev));
+}
+
+static inline void ocf_trace_io_cmpl(struct ocf_request *req)
+{
+ struct ocf_event_io_cmpl ev;
+
+ if (!req->cache->trace.trace_callback)
+ return;
+
+ ocf_event_init_hdr(&ev.hdr, ocf_event_type_io_cmpl,
+ ocf_trace_seq_id(req->cache),
+ env_ticks_to_nsecs(env_get_tick_count()),
+ sizeof(ev));
+ ev.rsid = req->sid;
+ ev.is_hit = ocf_engine_is_hit(req);
+
+ ocf_trace_push(req->io_queue, &ev, sizeof(ev));
+}
+
+#endif /* __OCF_TRACE_PRIV_H__ */
diff --git a/src/spdk/ocf/src/ocf_volume.c b/src/spdk/ocf/src/ocf_volume.c
new file mode 100644
index 000000000..0f227711e
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_volume.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "ocf_priv.h"
+#include "ocf_volume_priv.h"
+#include "ocf_io_priv.h"
+#include "ocf_env.h"
+
+/* *** Bottom interface *** */
+
+/*
+ * Volume type
+ */
+
+int ocf_volume_type_init(struct ocf_volume_type **type,
+ const struct ocf_volume_properties *properties,
+ const struct ocf_volume_extended *extended)
+{
+ const struct ocf_volume_ops *ops = &properties->ops;
+ ocf_io_allocator_type_t allocator_type;
+ struct ocf_volume_type *new_type;
+ int ret;
+
+ if (!ops->submit_io || !ops->open || !ops->close ||
+ !ops->get_max_io_size || !ops->get_length) {
+ return -OCF_ERR_INVAL;
+ }
+
+ if (properties->caps.atomic_writes && !ops->submit_metadata)
+ return -OCF_ERR_INVAL;
+
+ new_type = env_zalloc(sizeof(**type), ENV_MEM_NORMAL);
+ if (!new_type)
+ return -OCF_ERR_NO_MEM;
+
+ if (extended && extended->allocator_type)
+ allocator_type = extended->allocator_type;
+ else
+ allocator_type = ocf_io_allocator_get_type_default();
+
+ ret = ocf_io_allocator_init(&new_type->allocator, allocator_type,
+ properties->io_priv_size, properties->name);
+ if (ret)
+ goto err;
+
+ new_type->properties = properties;
+
+ *type = new_type;
+
+ return 0;
+
+err:
+ env_free(new_type);
+ return ret;
+}
+
+void ocf_volume_type_deinit(struct ocf_volume_type *type)
+{
+ if (type->properties->deinit)
+ type->properties->deinit();
+
+ ocf_io_allocator_deinit(&type->allocator);
+ env_free(type);
+}
+
+/*
+ * Volume frontend API
+ */
+
+int ocf_volume_init(ocf_volume_t volume, ocf_volume_type_t type,
+ struct ocf_volume_uuid *uuid, bool uuid_copy)
+{
+ uint32_t priv_size;
+ void *data;
+ int ret;
+
+ if (!volume || !type)
+ return -OCF_ERR_INVAL;
+
+ priv_size = type->properties->volume_priv_size;
+
+ volume->opened = false;
+ volume->type = type;
+
+ volume->priv = env_zalloc(priv_size, ENV_MEM_NORMAL);
+ if (!volume->priv)
+ return -OCF_ERR_NO_MEM;
+
+ ocf_refcnt_init(&volume->refcnt);
+ ocf_refcnt_freeze(&volume->refcnt);
+
+ if (!uuid) {
+ volume->uuid.size = 0;
+ volume->uuid.data = NULL;
+ volume->uuid_copy = false;
+ return 0;
+ }
+
+ volume->uuid_copy = uuid_copy;
+
+ if (uuid_copy) {
+ data = env_vmalloc(uuid->size);
+ if (!data)
+ goto err;
+
+ ret = env_memcpy(data, uuid->size, uuid->data, uuid->size);
+ if (ret) {
+ env_vfree(data);
+ goto err;
+ }
+
+ volume->uuid.data = data;
+ } else {
+ volume->uuid.data = uuid->data;
+ }
+
+ volume->uuid.size = uuid->size;
+
+ return 0;
+
+err:
+ ocf_refcnt_unfreeze(&volume->refcnt);
+ env_free(volume->priv);
+ return -OCF_ERR_NO_MEM;
+}
+
+void ocf_volume_deinit(ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(volume);
+
+ env_free(volume->priv);
+
+ if (volume->uuid_copy && volume->uuid.data) {
+ env_vfree(volume->uuid.data);
+ volume->uuid.data = NULL;
+ volume->uuid.size = 0;
+ }
+}
+
+void ocf_volume_move(ocf_volume_t volume, ocf_volume_t from)
+{
+ OCF_CHECK_NULL(volume);
+ OCF_CHECK_NULL(from);
+
+ ocf_volume_deinit(volume);
+
+ volume->opened = from->opened;
+ volume->type = from->type;
+ volume->uuid = from->uuid;
+ volume->uuid_copy = from->uuid_copy;
+ volume->priv = from->priv;
+ volume->cache = from->cache;
+ volume->features = from->features;
+ volume->refcnt = from->refcnt;
+
+ /*
+ * Deinitialize original volume without freeing resources.
+ */
+ from->opened = false;
+ from->priv = NULL;
+ from->uuid.data = NULL;
+}
+
+int ocf_volume_create(ocf_volume_t *volume, ocf_volume_type_t type,
+ struct ocf_volume_uuid *uuid)
+{
+ ocf_volume_t tmp_volume;
+ int ret;
+
+ OCF_CHECK_NULL(volume);
+
+ tmp_volume = env_zalloc(sizeof(*tmp_volume), ENV_MEM_NORMAL);
+ if (!tmp_volume)
+ return -OCF_ERR_NO_MEM;
+
+ ret = ocf_volume_init(tmp_volume, type, uuid, true);
+ if (ret) {
+ env_free(tmp_volume);
+ return ret;
+ }
+
+ *volume = tmp_volume;
+
+ return 0;
+}
+
+void ocf_volume_destroy(ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(volume);
+
+ ocf_volume_deinit(volume);
+ env_free(volume);
+}
+
+ocf_volume_type_t ocf_volume_get_type(ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(volume);
+
+ return volume->type;
+}
+
+const struct ocf_volume_uuid *ocf_volume_get_uuid(ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(volume);
+
+ return &volume->uuid;
+}
+
+void ocf_volume_set_uuid(ocf_volume_t volume, const struct ocf_volume_uuid *uuid)
+{
+ OCF_CHECK_NULL(volume);
+
+ if (volume->uuid_copy && volume->uuid.data)
+ env_vfree(volume->uuid.data);
+
+ volume->uuid.data = uuid->data;
+ volume->uuid.size = uuid->size;
+}
+
+void *ocf_volume_get_priv(ocf_volume_t volume)
+{
+ return volume->priv;
+}
+
+ocf_cache_t ocf_volume_get_cache(ocf_volume_t volume)
+{
+ OCF_CHECK_NULL(volume);
+
+ return volume->cache;
+}
+
+int ocf_volume_is_atomic(ocf_volume_t volume)
+{
+ return volume->type->properties->caps.atomic_writes;
+}
+
+struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags)
+{
+ return ocf_io_new(volume, queue, addr, bytes, dir, io_class, flags);
+}
+
+void ocf_volume_submit_io(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+
+ ENV_BUG_ON(!volume->type->properties->ops.submit_io);
+
+ if (!volume->opened)
+ io->end(io, -OCF_ERR_IO);
+
+ volume->type->properties->ops.submit_io(io);
+}
+
+void ocf_volume_submit_flush(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+
+ ENV_BUG_ON(!volume->type->properties->ops.submit_flush);
+
+ if (!volume->opened)
+ io->end(io, -OCF_ERR_IO);
+
+ if (!volume->type->properties->ops.submit_flush) {
+ ocf_io_end(io, 0);
+ return;
+ }
+
+ volume->type->properties->ops.submit_flush(io);
+}
+
+void ocf_volume_submit_discard(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+
+ if (!volume->opened)
+ io->end(io, -OCF_ERR_IO);
+
+ if (!volume->type->properties->ops.submit_discard) {
+ ocf_io_end(io, 0);
+ return;
+ }
+
+ volume->type->properties->ops.submit_discard(io);
+}
+
+int ocf_volume_open(ocf_volume_t volume, void *volume_params)
+{
+ int ret;
+
+ ENV_BUG_ON(!volume->type->properties->ops.open);
+ ENV_BUG_ON(volume->opened);
+
+ ret = volume->type->properties->ops.open(volume, volume_params);
+ if (ret)
+ return ret;
+
+ ocf_refcnt_unfreeze(&volume->refcnt);
+ volume->opened = true;
+
+ return 0;
+}
+
+static void ocf_volume_close_end(void *ctx)
+{
+ env_completion *cmpl = ctx;
+
+ env_completion_complete(cmpl);
+ env_completion_destroy(cmpl);
+}
+
+void ocf_volume_close(ocf_volume_t volume)
+{
+ env_completion cmpl;
+
+ ENV_BUG_ON(!volume->type->properties->ops.close);
+ ENV_BUG_ON(!volume->opened);
+
+ env_completion_init(&cmpl);
+ ocf_refcnt_freeze(&volume->refcnt);
+ ocf_refcnt_register_zero_cb(&volume->refcnt, ocf_volume_close_end,
+ &cmpl);
+ env_completion_wait(&cmpl);
+
+ volume->type->properties->ops.close(volume);
+ volume->opened = false;
+}
+
+unsigned int ocf_volume_get_max_io_size(ocf_volume_t volume)
+{
+ ENV_BUG_ON(!volume->type->properties->ops.get_max_io_size);
+
+ if (!volume->opened)
+ return 0;
+
+ return volume->type->properties->ops.get_max_io_size(volume);
+}
+
+uint64_t ocf_volume_get_length(ocf_volume_t volume)
+{
+ ENV_BUG_ON(!volume->type->properties->ops.get_length);
+
+ if (!volume->opened)
+ return 0;
+
+ return volume->type->properties->ops.get_length(volume);
+}
diff --git a/src/spdk/ocf/src/ocf_volume_priv.h b/src/spdk/ocf/src/ocf_volume_priv.h
new file mode 100644
index 000000000..1e51a3763
--- /dev/null
+++ b/src/spdk/ocf/src/ocf_volume_priv.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_VOLUME_PRIV_H__
+#define __OCF_VOLUME_PRIV_H__
+
+#include "ocf_env.h"
+#include "ocf_io_priv.h"
+#include "utils/utils_refcnt.h"
+#include "utils/utils_io_allocator.h"
+
+struct ocf_volume_extended {
+ ocf_io_allocator_type_t allocator_type;
+};
+
+struct ocf_volume_type {
+ const struct ocf_volume_properties *properties;
+ struct ocf_io_allocator allocator;
+};
+
+struct ocf_volume {
+ ocf_volume_type_t type;
+ struct ocf_volume_uuid uuid;
+ bool opened;
+ bool uuid_copy;
+ void *priv;
+ ocf_cache_t cache;
+ struct list_head core_pool_item;
+ struct {
+ unsigned discard_zeroes:1;
+ /* true if reading discarded pages returns 0 */
+ } features;
+ struct ocf_refcnt refcnt;
+};
+
+int ocf_volume_type_init(struct ocf_volume_type **type,
+ const struct ocf_volume_properties *properties,
+ const struct ocf_volume_extended *extended);
+
+void ocf_volume_type_deinit(struct ocf_volume_type *type);
+
+void ocf_volume_move(ocf_volume_t volume, ocf_volume_t from);
+
+void ocf_volume_set_uuid(ocf_volume_t volume,
+ const struct ocf_volume_uuid *uuid);
+
+static inline void ocf_volume_submit_metadata(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+
+ ENV_BUG_ON(!volume->type->properties->ops.submit_metadata);
+
+ volume->type->properties->ops.submit_metadata(io);
+}
+
+static inline void ocf_volume_submit_write_zeroes(struct ocf_io *io)
+{
+ ocf_volume_t volume = ocf_io_get_volume(io);
+
+ ENV_BUG_ON(!volume->type->properties->ops.submit_write_zeroes);
+
+ volume->type->properties->ops.submit_write_zeroes(io);
+}
+
+#endif /*__OCF_VOLUME_PRIV_H__ */
diff --git a/src/spdk/ocf/src/promotion/nhit/nhit.c b/src/spdk/ocf/src/promotion/nhit/nhit.c
new file mode 100644
index 000000000..535e8211e
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/nhit/nhit.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "nhit_hash.h"
+#include "../../metadata/metadata.h"
+#include "../../ocf_priv.h"
+#include "../../engine/engine_common.h"
+
+#include "nhit.h"
+#include "../ops.h"
+
+#define NHIT_MAPPING_RATIO 2
+
+struct nhit_policy_context {
+ nhit_hash_t hash_map;
+};
+
+void nhit_setup(ocf_cache_t cache)
+{
+ struct nhit_promotion_policy_config *cfg;
+
+ cfg = (void *) &cache->conf_meta->promotion[ocf_promotion_nhit].data;
+
+ cfg->insertion_threshold = OCF_NHIT_THRESHOLD_DEFAULT;
+ cfg->trigger_threshold = OCF_NHIT_TRIGGER_DEFAULT;
+}
+
+static uint64_t nhit_sizeof(ocf_cache_t cache)
+{
+ uint64_t size = 0;
+
+ size += sizeof(struct nhit_policy_context);
+ size += nhit_hash_sizeof(ocf_metadata_get_cachelines_count(cache) *
+ NHIT_MAPPING_RATIO);
+
+ return size;
+}
+
+ocf_error_t nhit_init(ocf_cache_t cache)
+{
+ struct nhit_policy_context *ctx;
+ int result = 0;
+ uint64_t available, size;
+
+ size = nhit_sizeof(cache);
+ available = env_get_free_memory();
+
+ if (size >= available) {
+ ocf_cache_log(cache, log_err, "Not enough memory to "
+ "initialize 'nhit' promotion policy! "
+ "Required %lu, available %lu\n",
+ (long unsigned)size,
+ (long unsigned)available);
+
+ return -OCF_ERR_NO_FREE_RAM;
+ }
+
+ ctx = env_vmalloc(sizeof(*ctx));
+ if (!ctx) {
+ result = -OCF_ERR_NO_MEM;
+ goto exit;
+ }
+
+ result = nhit_hash_init(ocf_metadata_get_cachelines_count(cache) *
+ NHIT_MAPPING_RATIO, &ctx->hash_map);
+ if (result)
+ goto dealloc_ctx;
+
+ cache->promotion_policy->ctx = ctx;
+ cache->promotion_policy->config =
+ (void *) &cache->conf_meta->promotion[ocf_promotion_nhit].data;
+
+ return 0;
+
+dealloc_ctx:
+ env_vfree(ctx);
+exit:
+ ocf_cache_log(cache, log_err, "Error initializing nhit promotion policy\n");
+ return result;
+}
+
+void nhit_deinit(ocf_promotion_policy_t policy)
+{
+ struct nhit_policy_context *ctx = policy->ctx;
+
+ nhit_hash_deinit(ctx->hash_map);
+
+ env_vfree(ctx);
+ policy->ctx = NULL;
+}
+
+ocf_error_t nhit_set_param(ocf_cache_t cache, uint8_t param_id,
+ uint32_t param_value)
+{
+ struct nhit_promotion_policy_config *cfg;
+ ocf_error_t result = 0;
+
+ cfg = (void *) &cache->conf_meta->promotion[ocf_promotion_nhit].data;
+
+ switch (param_id) {
+ case ocf_nhit_insertion_threshold:
+ if (param_value >= OCF_NHIT_MIN_THRESHOLD &&
+ param_value <= OCF_NHIT_MAX_THRESHOLD) {
+ cfg->insertion_threshold = param_value;
+ ocf_cache_log(cache, log_info,
+ "Nhit PP insertion threshold value set to %u",
+ param_value);
+ } else {
+ ocf_cache_log(cache, log_err, "Invalid nhit "
+ "promotion policy insertion threshold!\n");
+ result = -OCF_ERR_INVAL;
+ }
+ break;
+
+ case ocf_nhit_trigger_threshold:
+ if (param_value >= OCF_NHIT_MIN_TRIGGER &&
+ param_value <= OCF_NHIT_MAX_TRIGGER) {
+ cfg->trigger_threshold = param_value;
+ ocf_cache_log(cache, log_info,
+ "Nhit PP trigger threshold value set to %u%%\n",
+ param_value);
+ } else {
+ ocf_cache_log(cache, log_err, "Invalid nhit "
+ "promotion policy insertion trigger "
+ "threshold!\n");
+ result = -OCF_ERR_INVAL;
+ }
+ break;
+
+ default:
+ ocf_cache_log(cache, log_err, "Invalid nhit "
+ "promotion policy parameter (%u)!\n",
+ param_id);
+ result = -OCF_ERR_INVAL;
+
+ break;
+ }
+
+ return result;
+}
+
+ocf_error_t nhit_get_param(ocf_cache_t cache, uint8_t param_id,
+ uint32_t *param_value)
+{
+ struct nhit_promotion_policy_config *cfg;
+ ocf_error_t result = 0;
+
+ cfg = (void *) &cache->conf_meta->promotion[ocf_promotion_nhit].data;
+
+ OCF_CHECK_NULL(param_value);
+
+ switch (param_id) {
+ case ocf_nhit_insertion_threshold:
+ *param_value = cfg->insertion_threshold;
+ break;
+ case ocf_nhit_trigger_threshold:
+ *param_value = cfg->trigger_threshold;
+ break;
+ default:
+ ocf_cache_log(cache, log_err, "Invalid nhit "
+ "promotion policy parameter (%u)!\n",
+ param_id);
+ result = -OCF_ERR_INVAL;
+
+ break;
+ }
+
+ return result;
+}
+
+static void core_line_purge(struct nhit_policy_context *ctx, ocf_core_id_t core_id,
+ uint64_t core_lba)
+{
+ nhit_hash_set_occurences(ctx->hash_map, core_id, core_lba, 0);
+}
+
+void nhit_req_purge(ocf_promotion_policy_t policy,
+ struct ocf_request *req)
+{
+ struct nhit_policy_context *ctx = policy->ctx;
+ uint32_t i;
+ uint64_t core_line;
+
+ for (i = 0, core_line = req->core_line_first;
+ core_line <= req->core_line_last; core_line++, i++) {
+ struct ocf_map_info *entry = &(req->map[i]);
+
+ core_line_purge(ctx, entry->core_id, entry->core_line);
+ }
+}
+
+static bool core_line_should_promote(ocf_promotion_policy_t policy,
+ ocf_core_id_t core_id, uint64_t core_lba)
+{
+ struct nhit_promotion_policy_config *cfg;
+ struct nhit_policy_context *ctx;
+ bool hit;
+ int32_t counter;
+
+ cfg = (struct nhit_promotion_policy_config*)policy->config;
+ ctx = policy->ctx;
+
+ hit = nhit_hash_query(ctx->hash_map, core_id, core_lba, &counter);
+ if (hit) {
+ /* we have a hit, return now */
+ return cfg->insertion_threshold <= counter;
+ }
+
+ nhit_hash_insert(ctx->hash_map, core_id, core_lba);
+
+ return false;
+}
+
+bool nhit_req_should_promote(ocf_promotion_policy_t policy,
+ struct ocf_request *req)
+{
+ struct nhit_promotion_policy_config *cfg;
+ bool result = true;
+ uint32_t i;
+ uint64_t core_line;
+ uint64_t occupied_cachelines =
+ ocf_metadata_collision_table_entries(policy->owner) -
+ ocf_freelist_num_free(policy->owner->freelist);
+
+ cfg = (struct nhit_promotion_policy_config*)policy->config;
+
+ if (occupied_cachelines < OCF_DIV_ROUND_UP(
+ ((uint64_t)cfg->trigger_threshold *
+ ocf_metadata_get_cachelines_count(policy->owner)), 100)) {
+ return true;
+ }
+
+ for (i = 0, core_line = req->core_line_first;
+ core_line <= req->core_line_last; core_line++, i++) {
+ struct ocf_map_info *entry = &(req->map[i]);
+
+ if (!core_line_should_promote(policy, entry->core_id,
+ entry->core_line)) {
+ result = false;
+ }
+ }
+
+ /* We don't want to reject even partially hit requests - this way we
+ * could trigger passthrough and invalidation. Let's let it in! */
+ return result || ocf_engine_mapped_count(req);
+}
+
diff --git a/src/spdk/ocf/src/promotion/nhit/nhit.h b/src/spdk/ocf/src/promotion/nhit/nhit.h
new file mode 100644
index 000000000..e405d60ea
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/nhit/nhit.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef NHIT_PROMOTION_POLICY_H_
+#define NHIT_PROMOTION_POLICY_H_
+
+#include "ocf/ocf.h"
+#include "../../ocf_request.h"
+#include "../promotion.h"
+#include "nhit_structs.h"
+
+void nhit_setup(ocf_cache_t cache);
+
+ocf_error_t nhit_init(ocf_cache_t cache);
+
+void nhit_deinit(ocf_promotion_policy_t policy);
+
+ocf_error_t nhit_set_param(ocf_cache_t cache, uint8_t param_id,
+ uint32_t param_value);
+
+ocf_error_t nhit_get_param(ocf_cache_t cache, uint8_t param_id,
+ uint32_t *param_value);
+
+void nhit_req_purge(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+
+bool nhit_req_should_promote(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+
+#endif /* NHIT_PROMOTION_POLICY_H_ */
diff --git a/src/spdk/ocf/src/promotion/nhit/nhit_hash.c b/src/spdk/ocf/src/promotion/nhit/nhit_hash.c
new file mode 100644
index 000000000..d68e55664
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/nhit/nhit_hash.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "../../ocf_priv.h"
+
+#include "nhit_hash.h"
+
+/* Implementation of hashmap-ish structure for tracking core lines in nhit
+ * promotion policy. It consists of two arrays:
+ * - hash_map - indexed by hash formed from core id and core lba pairs,
+ * contains pointers (indices) to the ring buffer. Each index in this array
+ * has its own rwsem.
+ * - ring_buffer - contains per-coreline metadata and collision info for
+ * open addressing. If we run out of space in this array, we just loop around
+ * and insert elements from the beggining. So lifetime of a core line varies
+ * depending on insertion and removal rate.
+ *
+ * and rb_pointer which is index to ring_buffer element that is going to be used
+ * for next insertion.
+ *
+ * Operations:
+ * - query(core_id, core_lba):
+ * Check if core line is present in structure, bump up counter and
+ * return its value.
+ *
+ * - insertion(core_id, core_lba):
+ * Insert new core line into structure
+ * 1. get new slot from ring buffer
+ * a. check if current slot under rb_pointer is valid
+ * and if not - exit
+ * b. set current slot as invalid and increment rb_pointer
+ * 2. lock hash bucket for new item and for ring buffer slot
+ * (if non-empty) in ascending bucket id order (to avoid deadlock)
+ * 3. insert new data, add to collision
+ * 4. unlock both hash buckets
+ * 5. commit rb_slot (mark it as valid)
+ *
+ * Insertion explained visually:
+ *
+ * Suppose that we want to add a new core line with hash value H which already has
+ * some colliding core lines
+ *
+ * hash(core_id, core_lba)
+ * +
+ * |
+ * v
+ * +--+--+--+--+--+--+--+--++-+--+
+ * | | |I | | | | | |H | | hash_map
+ * +--+--++-+--+--+--+--+--++-+--+
+ * __| rb_pointer | _______
+ * | + | | |
+ * v v v | v
+ * +--++-+--+---+-+--+--+--++-+--+--++-+--++-+
+ * | | | | |X | | | | | | | | | | ring_buffer
+ * +--++-+--+---+-+--+--+--++-+--+--++-+--+--+
+ * | ^ | ^
+ * |________| |________|
+ *
+ * We will attempt to insert new element at rb_pointer. Since rb_pointer is
+ * pointing to occupied rb slot we need to write-lock hash bucket I associated
+ * with this slot and remove it from collision list. We've gained an empty slot
+ * and we use slot X for new hash H entry.
+ *
+ * +--+--+--+--+--+--+--+--+--+--+
+ * | | |I | | | | | |H | | hash_map
+ * +--+--++-+--+--+--+--+--++-+--+
+ * __| rb_pointer | _______
+ * | + | | |
+ * v v v | v
+ * +--++-+--+-----++-+--+--++-+--+--++-+--++-+
+ * | | | | |X | | | | | | | | | | ring_buffer
+ * +--+--+--+---+-+--+--+--++-+--+--++-+--++-+
+ * ^ | ^ |
+ * | |________| |
+ * |__________________________|
+ *
+ * Valid field in nhit_list_elem is guarded by rb_pointer_lock to make sure we
+ * won't try to use the same slot in two threads. That would be possible if in
+ * time between removal from collision and insertion into the new one the
+ * rb_pointer would go around the whole structure (likeliness depends on size of
+ * ring_buffer).
+ */
+
+#define HASH_PRIME 4099
+
+struct nhit_list_elem {
+ /* Fields are ordered for memory efficiency, not for looks. */
+ uint64_t core_lba;
+ env_atomic counter;
+ ocf_cache_line_t coll_prev;
+ ocf_cache_line_t coll_next;
+ ocf_core_id_t core_id;
+ bool valid;
+};
+
+struct nhit_hash {
+ ocf_cache_line_t hash_entries;
+ uint64_t rb_entries;
+
+ ocf_cache_line_t *hash_map;
+ env_rwsem *hash_locks;
+
+ struct nhit_list_elem *ring_buffer;
+ uint64_t rb_pointer;
+ env_spinlock rb_pointer_lock;
+};
+
+static uint64_t calculate_hash_buckets(uint64_t hash_size)
+{
+ return OCF_DIV_ROUND_UP(hash_size / 4, HASH_PRIME) * HASH_PRIME - 1;
+}
+
+uint64_t nhit_hash_sizeof(uint64_t hash_size)
+{
+ uint64_t size = 0;
+ uint64_t n_buckets = calculate_hash_buckets(hash_size);
+
+ size += sizeof(struct nhit_hash);
+
+ size += n_buckets * sizeof(ocf_cache_line_t);
+ size += n_buckets * sizeof(env_rwsem);
+
+ size += hash_size * sizeof(struct nhit_list_elem);
+
+ return size;
+}
+
+ocf_error_t nhit_hash_init(uint64_t hash_size, nhit_hash_t *ctx)
+{
+ int result = 0;
+ struct nhit_hash *new_ctx;
+ uint32_t i;
+ int64_t i_locks;
+
+ new_ctx = env_vzalloc(sizeof(*new_ctx));
+ if (!new_ctx) {
+ result = -OCF_ERR_NO_MEM;
+ goto exit;
+ }
+
+ new_ctx->rb_entries = hash_size;
+ new_ctx->hash_entries = calculate_hash_buckets(hash_size);
+
+ new_ctx->hash_map = env_vzalloc(
+ new_ctx->hash_entries * sizeof(*new_ctx->hash_map));
+ if (!new_ctx->hash_map) {
+ result = -OCF_ERR_NO_MEM;
+ goto dealloc_ctx;
+ }
+ for (i = 0; i < new_ctx->hash_entries; i++)
+ new_ctx->hash_map[i] = new_ctx->rb_entries;
+
+ new_ctx->hash_locks = env_vzalloc(
+ new_ctx->hash_entries * sizeof(*new_ctx->hash_locks));
+ if (!new_ctx->hash_locks) {
+ result = -OCF_ERR_NO_MEM;
+ goto dealloc_hash;
+ }
+
+ for (i_locks = 0; i_locks < new_ctx->hash_entries; i_locks++) {
+ if (env_rwsem_init(&new_ctx->hash_locks[i_locks])) {
+ result = -OCF_ERR_UNKNOWN;
+ goto dealloc_locks;
+ }
+ }
+
+ new_ctx->ring_buffer = env_vzalloc(
+ new_ctx->rb_entries * sizeof(*new_ctx->ring_buffer));
+ if (!new_ctx->ring_buffer) {
+ result = -OCF_ERR_NO_MEM;
+ goto dealloc_locks;
+ }
+ for (i = 0; i < new_ctx->rb_entries; i++) {
+ new_ctx->ring_buffer[i].core_id = OCF_CORE_ID_INVALID;
+ new_ctx->ring_buffer[i].valid = true;
+ env_atomic_set(&new_ctx->ring_buffer[i].counter, 0);
+ }
+
+ result = env_spinlock_init(&new_ctx->rb_pointer_lock);
+ if (result)
+ goto dealloc_locks;
+
+ new_ctx->rb_pointer = 0;
+
+ *ctx = new_ctx;
+ return 0;
+
+dealloc_locks:
+ while (i_locks--)
+ ENV_BUG_ON(env_rwsem_destroy(&new_ctx->hash_locks[i_locks]));
+ env_vfree(new_ctx->hash_locks);
+dealloc_hash:
+ env_vfree(new_ctx->hash_map);
+dealloc_ctx:
+ env_vfree(new_ctx);
+exit:
+ return result;
+}
+
+void nhit_hash_deinit(nhit_hash_t ctx)
+{
+ ocf_cache_line_t i;
+
+ env_spinlock_destroy(&ctx->rb_pointer_lock);
+ for (i = 0; i < ctx->hash_entries; i++)
+ ENV_BUG_ON(env_rwsem_destroy(&ctx->hash_locks[i]));
+
+ env_vfree(ctx->ring_buffer);
+ env_vfree(ctx->hash_locks);
+ env_vfree(ctx->hash_map);
+ env_vfree(ctx);
+}
+
+static ocf_cache_line_t hash_function(ocf_core_id_t core_id, uint64_t core_lba,
+ uint64_t limit)
+{
+ if (core_id == OCF_CORE_ID_INVALID)
+ return limit;
+
+ return (ocf_cache_line_t) ((core_lba * HASH_PRIME + core_id) % limit);
+}
+
+static ocf_cache_line_t core_line_lookup(nhit_hash_t ctx,
+ ocf_core_id_t core_id, uint64_t core_lba)
+{
+ ocf_cache_line_t hash = hash_function(core_id, core_lba,
+ ctx->hash_entries);
+ ocf_cache_line_t needle = ctx->rb_entries;
+ ocf_cache_line_t cur;
+
+ for (cur = ctx->hash_map[hash]; cur != ctx->rb_entries;
+ cur = ctx->ring_buffer[cur].coll_next) {
+ struct nhit_list_elem *cur_elem = &ctx->ring_buffer[cur];
+
+ if (cur_elem->core_lba == core_lba &&
+ cur_elem->core_id == core_id) {
+ needle = cur;
+ break;
+ }
+ }
+
+ return needle;
+}
+
+static inline bool get_rb_slot(nhit_hash_t ctx, uint64_t *slot)
+{
+ bool result = true;
+
+ OCF_CHECK_NULL(slot);
+
+ env_spinlock_lock(&ctx->rb_pointer_lock);
+
+ *slot = ctx->rb_pointer;
+ result = ctx->ring_buffer[*slot].valid;
+
+ ctx->ring_buffer[*slot].valid = false;
+
+ ctx->rb_pointer = (*slot + 1) % ctx->rb_entries;
+
+ env_spinlock_unlock(&ctx->rb_pointer_lock);
+
+ return result;
+}
+
+static inline void commit_rb_slot(nhit_hash_t ctx, uint64_t slot)
+{
+ env_spinlock_lock(&ctx->rb_pointer_lock);
+
+ ctx->ring_buffer[slot].valid = true;
+
+ env_spinlock_unlock(&ctx->rb_pointer_lock);
+}
+
+static void collision_remove(nhit_hash_t ctx, uint64_t slot_id)
+{
+ struct nhit_list_elem *slot = &ctx->ring_buffer[slot_id];
+ ocf_cache_line_t hash = hash_function(slot->core_id, slot->core_lba,
+ ctx->hash_entries);
+
+ if (slot->core_id == OCF_CORE_ID_INVALID)
+ return;
+
+ slot->core_id = OCF_CORE_ID_INVALID;
+
+ if (slot->coll_prev != ctx->rb_entries)
+ ctx->ring_buffer[slot->coll_prev].coll_next = slot->coll_next;
+
+ if (slot->coll_next != ctx->rb_entries)
+ ctx->ring_buffer[slot->coll_next].coll_prev = slot->coll_prev;
+
+ if (ctx->hash_map[hash] == slot_id)
+ ctx->hash_map[hash] = slot->coll_next;
+}
+
+static void collision_insert_new(nhit_hash_t ctx,
+ uint64_t slot_id, ocf_core_id_t core_id,
+ uint64_t core_lba)
+{
+ ocf_cache_line_t hash = hash_function(core_id, core_lba,
+ ctx->hash_entries);
+ struct nhit_list_elem *slot = &ctx->ring_buffer[slot_id];
+
+ slot->core_id = core_id;
+ slot->core_lba = core_lba;
+ slot->coll_next = ctx->hash_map[hash];
+ slot->coll_prev = ctx->rb_entries;
+ env_atomic_set(&slot->counter, 1);
+
+ if (ctx->hash_map[hash] != ctx->rb_entries)
+ ctx->ring_buffer[ctx->hash_map[hash]].coll_prev = slot_id;
+
+ ctx->hash_map[hash] = slot_id;
+}
+
+static inline void write_lock_hashes(nhit_hash_t ctx, ocf_core_id_t core_id1,
+ uint64_t core_lba1, ocf_core_id_t core_id2, uint64_t core_lba2)
+{
+ ocf_cache_line_t hash1 = hash_function(core_id1, core_lba1,
+ ctx->hash_entries);
+ ocf_cache_line_t hash2 = hash_function(core_id2, core_lba2,
+ ctx->hash_entries);
+ ocf_cache_line_t lock_order[2] = {
+ OCF_MIN(hash1, hash2),
+ OCF_MAX(hash1, hash2)};
+
+ if (lock_order[0] != ctx->hash_entries)
+ env_rwsem_down_write(&ctx->hash_locks[lock_order[0]]);
+
+ if ((lock_order[1] != ctx->hash_entries) && (lock_order[0] != lock_order[1]))
+ env_rwsem_down_write(&ctx->hash_locks[lock_order[1]]);
+}
+
+static inline void write_unlock_hashes(nhit_hash_t ctx, ocf_core_id_t core_id1,
+ uint64_t core_lba1, ocf_core_id_t core_id2, uint64_t core_lba2)
+{
+ ocf_cache_line_t hash1 = hash_function(core_id1, core_lba1,
+ ctx->hash_entries);
+ ocf_cache_line_t hash2 = hash_function(core_id2, core_lba2,
+ ctx->hash_entries);
+
+ if (hash1 != ctx->hash_entries)
+ env_rwsem_up_write(&ctx->hash_locks[hash1]);
+
+ if ((hash2 != ctx->hash_entries) && (hash1 != hash2))
+ env_rwsem_up_write(&ctx->hash_locks[hash2]);
+}
+
+void nhit_hash_insert(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba)
+{
+ uint64_t slot_id;
+ struct nhit_list_elem *slot;
+ ocf_core_id_t slot_core_id;
+ uint64_t slot_core_lba;
+
+ if (!get_rb_slot(ctx, &slot_id))
+ return;
+
+ slot = &ctx->ring_buffer[slot_id];
+ slot_core_id = slot->core_id;
+ slot_core_lba = slot->core_lba;
+
+ write_lock_hashes(ctx, core_id, core_lba, slot_core_id, slot_core_lba);
+
+ collision_remove(ctx, slot_id);
+ collision_insert_new(ctx, slot_id, core_id, core_lba);
+
+ write_unlock_hashes(ctx, core_id, core_lba, slot_core_id, slot_core_lba);
+
+ commit_rb_slot(ctx, slot_id);
+}
+
+bool nhit_hash_query(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba,
+ int32_t *counter)
+{
+ ocf_cache_line_t hash = hash_function(core_id, core_lba,
+ ctx->hash_entries);
+ uint64_t rb_idx;
+
+ OCF_CHECK_NULL(counter);
+
+ env_rwsem_down_read(&ctx->hash_locks[hash]);
+ rb_idx = core_line_lookup(ctx, core_id, core_lba);
+
+ if (rb_idx == ctx->rb_entries) {
+ env_rwsem_up_read(&ctx->hash_locks[hash]);
+ return false;
+ }
+
+ *counter = env_atomic_inc_return(&ctx->ring_buffer[rb_idx].counter);
+
+ env_rwsem_up_read(&ctx->hash_locks[hash]);
+
+ return true;
+}
+
+void nhit_hash_set_occurences(nhit_hash_t ctx, ocf_core_id_t core_id,
+ uint64_t core_lba, int32_t occurences)
+{
+ ocf_cache_line_t hash = hash_function(core_id, core_lba,
+ ctx->hash_entries);
+ uint64_t rb_idx;
+
+ env_rwsem_down_read(&ctx->hash_locks[hash]);
+ rb_idx = core_line_lookup(ctx, core_id, core_lba);
+
+ if (rb_idx == ctx->rb_entries) {
+ env_rwsem_up_read(&ctx->hash_locks[hash]);
+ return;
+ }
+
+ env_atomic_set(&ctx->ring_buffer[rb_idx].counter, occurences);
+
+ env_rwsem_up_read(&ctx->hash_locks[hash]);
+}
+
diff --git a/src/spdk/ocf/src/promotion/nhit/nhit_hash.h b/src/spdk/ocf/src/promotion/nhit/nhit_hash.h
new file mode 100644
index 000000000..12c094915
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/nhit/nhit_hash.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef NHIT_HASH_H_
+#define NHIT_HASH_H_
+
+#include "ocf/ocf.h"
+
+typedef struct nhit_hash *nhit_hash_t;
+
+uint64_t nhit_hash_sizeof(uint64_t hash_size);
+
+ocf_error_t nhit_hash_init(uint64_t hash_size, nhit_hash_t *ctx);
+
+void nhit_hash_deinit(nhit_hash_t ctx);
+
+void nhit_hash_insert(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba);
+
+bool nhit_hash_query(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba,
+ int32_t *counter);
+
+void nhit_hash_set_occurences(nhit_hash_t ctx, ocf_core_id_t core_id,
+ uint64_t core_lba, int32_t occurences);
+#endif /* NHIT_HASH_H_ */
diff --git a/src/spdk/ocf/src/promotion/nhit/nhit_structs.h b/src/spdk/ocf/src/promotion/nhit/nhit_structs.h
new file mode 100644
index 000000000..bd02f0d9a
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/nhit/nhit_structs.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright(c) 2012-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#ifndef __PROMOTION_NHIT_STRUCTS_H_
+#define __PROMOTION_NHIT_STRUCTS_H_
+
+struct nhit_promotion_policy_config {
+ uint32_t insertion_threshold;
+ /*!< Number of hits */
+
+ uint32_t trigger_threshold;
+ /*!< Cache occupancy (percentage value) */
+};
+
+#endif
diff --git a/src/spdk/ocf/src/promotion/ops.h b/src/spdk/ocf/src/promotion/ops.h
new file mode 100644
index 000000000..a59e1cc35
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/ops.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef PROMOTION_OPS_H_
+#define PROMOTION_OPS_H_
+
+#include "../metadata/metadata.h"
+#include "promotion.h"
+
+struct ocf_promotion_policy {
+ ocf_cache_t owner;
+
+ ocf_promotion_t type;
+
+ void *config;
+ /* Pointer to config values stored in cache superblock */
+
+ void *ctx;
+};
+
+struct promotion_policy_ops {
+ const char *name;
+ /*!< Promotion policy name */
+
+ void (*setup)(ocf_cache_t cache);
+ /*!< initialize promotion policy default config */
+
+ ocf_error_t (*init)(ocf_cache_t cache);
+ /*!< Allocate and initialize promotion policy */
+
+ void (*deinit)(ocf_promotion_policy_t policy);
+ /*!< Deinit and free promotion policy */
+
+ ocf_error_t (*set_param)(ocf_cache_t cache, uint8_t param_id,
+ uint32_t param_value);
+ /*!< Set promotion policy parameter */
+
+ ocf_error_t (*get_param)(ocf_cache_t cache, uint8_t param_id,
+ uint32_t *param_value);
+ /*!< Get promotion policy parameter */
+
+ void (*req_purge)(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+ /*!< Call when request core lines have been inserted or it is
+ * a discard request */
+
+ bool (*req_should_promote)(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+ /*!< Should request lines be inserted into cache */
+};
+
+extern struct promotion_policy_ops ocf_promotion_policies[ocf_promotion_max];
+
+#endif /* PROMOTION_OPS_H_ */
+
diff --git a/src/spdk/ocf/src/promotion/promotion.c b/src/spdk/ocf/src/promotion/promotion.c
new file mode 100644
index 000000000..0ed7e96a9
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/promotion.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "../metadata/metadata.h"
+
+#include "promotion.h"
+#include "ops.h"
+#include "nhit/nhit.h"
+
+struct promotion_policy_ops ocf_promotion_policies[ocf_promotion_max] = {
+ [ocf_promotion_always] = {
+ .name = "always",
+ },
+ [ocf_promotion_nhit] = {
+ .name = "nhit",
+ .setup = nhit_setup,
+ .init = nhit_init,
+ .deinit = nhit_deinit,
+ .set_param = nhit_set_param,
+ .get_param = nhit_get_param,
+ .req_purge = nhit_req_purge,
+ .req_should_promote = nhit_req_should_promote,
+ },
+};
+
+ocf_error_t ocf_promotion_init(ocf_cache_t cache, ocf_promotion_t type)
+{
+ ocf_promotion_policy_t policy;
+ ocf_error_t result = 0;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ policy = env_vmalloc(sizeof(*policy));
+ if (!policy)
+ return -OCF_ERR_NO_MEM;
+
+ policy->type = type;
+ policy->owner = cache;
+ policy->config =
+ (void *)&cache->conf_meta->promotion[type].data;
+ cache->promotion_policy = policy;
+
+ if (ocf_promotion_policies[type].init)
+ result = ocf_promotion_policies[type].init(cache);
+
+ if (result) {
+ env_vfree(cache->promotion_policy);
+ cache->promotion_policy = NULL;
+ ocf_cache_log(cache, log_info,
+ "Policy '%s' failed to initialize\n",
+ ocf_promotion_policies[type].name);
+ } else {
+ ocf_cache_log(cache, log_info,
+ "Policy '%s' initialized successfully\n",
+ ocf_promotion_policies[type].name);
+ }
+
+ return result;
+}
+
+void ocf_promotion_deinit(ocf_promotion_policy_t policy)
+{
+ ocf_promotion_t type = policy->type;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ if (ocf_promotion_policies[type].deinit)
+ ocf_promotion_policies[type].deinit(policy);
+
+ env_vfree(policy);
+}
+
+ocf_error_t ocf_promotion_set_policy(ocf_promotion_policy_t policy,
+ ocf_promotion_t type)
+{
+ ocf_error_t result = 0;
+ ocf_cache_t cache = policy->owner;
+ ocf_promotion_t prev_policy;
+
+ if (type >= ocf_promotion_max)
+ return -OCF_ERR_INVAL;
+
+ prev_policy = cache->conf_meta->promotion_policy_type;
+
+ if (type == prev_policy) {
+ ocf_cache_log(cache, log_info, "Promotion policy '%s' is already set\n",
+ ocf_promotion_policies[type].name);
+ return 0;
+ }
+
+ if (ocf_promotion_policies[prev_policy].deinit)
+ ocf_promotion_policies[prev_policy].deinit(policy);
+
+ cache->conf_meta->promotion_policy_type = type;
+ policy->type = type;
+
+ if (ocf_promotion_policies[type].init)
+ result = ocf_promotion_policies[type].init(cache);
+
+ if (result) {
+ ocf_cache_log(cache, log_err,
+ "Error switching to new promotion policy\n");
+ ocf_cache_log(cache, log_err,
+ "Falling back to 'always' promotion policy\n");
+ cache->conf_meta->promotion_policy_type = ocf_promotion_always;
+ policy->type = ocf_promotion_always;
+ } else {
+ ocf_cache_log(cache, log_info,
+ "Switched to '%s' promotion policy\n",
+ ocf_promotion_policies[type].name);
+ }
+
+ return result;
+}
+
+ocf_error_t ocf_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t param_value)
+{
+ ocf_error_t result = -OCF_ERR_INVAL;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ if (ocf_promotion_policies[type].set_param) {
+ result = ocf_promotion_policies[type].set_param(cache, param_id,
+ param_value);
+ }
+
+ return result;
+}
+
+ocf_error_t ocf_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t *param_value)
+{
+ ocf_error_t result = -OCF_ERR_INVAL;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ if (ocf_promotion_policies[type].get_param) {
+ result = ocf_promotion_policies[type].get_param(cache, param_id,
+ param_value);
+ }
+
+ return result;
+}
+
+void ocf_promotion_req_purge(ocf_promotion_policy_t policy,
+ struct ocf_request *req)
+{
+ ocf_promotion_t type = policy->type;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ if (ocf_promotion_policies[type].req_purge)
+ ocf_promotion_policies[type].req_purge(policy, req);
+}
+
+bool ocf_promotion_req_should_promote(ocf_promotion_policy_t policy,
+ struct ocf_request *req)
+{
+ ocf_promotion_t type = policy->type;
+ bool result = true;
+
+ ENV_BUG_ON(type >= ocf_promotion_max);
+
+ if (ocf_promotion_policies[type].req_should_promote) {
+ result = ocf_promotion_policies[type].req_should_promote(policy,
+ req);
+ }
+
+ return result;
+}
+
diff --git a/src/spdk/ocf/src/promotion/promotion.h b/src/spdk/ocf/src/promotion/promotion.h
new file mode 100644
index 000000000..589da22d3
--- /dev/null
+++ b/src/spdk/ocf/src/promotion/promotion.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef PROMOTION_H_
+#define PROMOTION_H_
+
+#include "ocf/ocf.h"
+#include "../ocf_request.h"
+
+#define PROMOTION_POLICY_CONFIG_BYTES 256
+#define PROMOTION_POLICY_TYPE_MAX 2
+
+
+struct promotion_policy_config {
+ uint8_t data[PROMOTION_POLICY_CONFIG_BYTES];
+};
+
+typedef struct ocf_promotion_policy *ocf_promotion_policy_t;
+
+/**
+ * @brief Initialize promotion policy default values. Should be called after
+ * cache metadata has been allocated and cache->conf_meta->promotion_policy_type
+ * has been set.
+ *
+ * @param[in] cache OCF cache instance
+ */
+void ocf_promotion_setup(ocf_cache_t cache);
+
+/**
+ * @brief Allocate and initialize promotion policy. Should be called after cache
+ * metadata has been allocated and cache->conf_meta->promotion_policy_type has
+ * been set.
+ *
+ * @param[in] cache OCF cache instance
+ * @param[in] type type of promotion policy to initialize
+ *
+ * @retval ocf_error_t
+ */
+ocf_error_t ocf_promotion_init(ocf_cache_t cache, ocf_promotion_t type);
+
+/**
+ * @brief Stop, deinitialize and free promotion policy structures.
+ *
+ * @param[in] policy promotion policy handle
+ *
+ * @retval none
+ */
+void ocf_promotion_deinit(ocf_promotion_policy_t policy);
+
+/**
+ * @brief Switch promotion policy to type. On failure will fall back to 'always'
+ *
+ * @param[in] policy promotion policy handle
+ * @param[in] type promotion policy target type
+ *
+ * @retval ocf_error_t
+ */
+ocf_error_t ocf_promotion_set_policy(ocf_promotion_policy_t policy,
+ ocf_promotion_t type);
+/**
+ * @brief Set promotion policy parameter
+ *
+ * @param[in] cache cache handle
+ * @param[in] type id of promotion policy to be configured
+ * @param[in] param_id id of parameter to be set
+ * @param[in] param_value value of parameter to be set
+ *
+ * @retval ocf_error_t
+ */
+ocf_error_t ocf_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t param_value);
+
+/**
+ * @brief Get promotion policy parameter
+ *
+ * @param[in] cache cache handle
+ * @param[in] type id of promotion policy to be configured
+ * @param[in] param_id id of parameter to be set
+ * @param[out] param_value value of parameter to be set
+ *
+ * @retval ocf_error_t
+ */
+ocf_error_t ocf_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
+ uint8_t param_id, uint32_t *param_value);
+
+/**
+ * @brief Update promotion policy after cache lines have been promoted to cache
+ * or discarded from core device
+ *
+ * @param[in] policy promotion policy handle
+ * @param[in] req OCF request to be purged
+ *
+ * @retval none
+ */
+void ocf_promotion_req_purge(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+
+/**
+ * @brief Check in promotion policy whether core lines in request can be promoted
+ *
+ * @param[in] policy promotion policy handle
+ * @param[in] req OCF request which is to be promoted
+ *
+ * @retval should core lines belonging to this request be promoted
+ */
+bool ocf_promotion_req_should_promote(ocf_promotion_policy_t policy,
+ struct ocf_request *req);
+
+#endif /* PROMOTION_H_ */
diff --git a/src/spdk/ocf/src/utils/utils_async_lock.c b/src/spdk/ocf/src/utils/utils_async_lock.c
new file mode 100644
index 000000000..2321c2816
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_async_lock.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "utils_async_lock.h"
+
+struct ocf_async_lock_waiter {
+ struct list_head list;
+ ocf_async_lock_t lock;
+ bool write_lock;
+ ocf_async_lock_end_t cmpl;
+};
+
+void _ocf_async_lock_collect_waiters(ocf_async_lock_t lock,
+ struct list_head *waiters)
+{
+ ocf_async_lock_waiter_t iter, temp;
+
+ list_for_each_entry_safe(iter, temp, &lock->waiters, list) {
+ if (!iter->write_lock) {
+ list_move_tail(&iter->list, waiters);
+ lock->rd++;
+ } else {
+ if (!lock->rd) {
+ list_move_tail(&iter->list, waiters);
+ lock->wr = 1;
+ }
+ break;
+ }
+ }
+}
+
+void _ocf_async_lock_run_waiters(struct ocf_async_lock *lock,
+ struct list_head *waiters, int status)
+{
+ ocf_async_lock_waiter_t iter, temp;
+
+ /* TODO: Should we run waiters asynchronously? */
+
+ list_for_each_entry_safe(iter, temp, waiters, list) {
+ list_del(&iter->list);
+ iter->cmpl(iter, status);
+ env_vfree(iter);
+ }
+}
+
+int ocf_async_lock_init(struct ocf_async_lock *lock, uint32_t waiter_priv_size)
+{
+ int err = 0;
+
+ err = env_spinlock_init(&lock->waiters_lock);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&lock->waiters);
+ lock->rd = 0;
+ lock->wr = 0;
+ lock->waiter_priv_size = waiter_priv_size;
+
+ return 0;
+}
+
+void ocf_async_lock_deinit(struct ocf_async_lock *lock)
+{
+ struct list_head waiters;
+ ocf_async_lock_waiter_t iter, temp;
+
+ INIT_LIST_HEAD(&waiters);
+
+ env_spinlock_lock(&lock->waiters_lock);
+ list_for_each_entry_safe(iter, temp, &lock->waiters, list)
+ list_move_tail(&iter->list, &waiters);
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ env_spinlock_destroy(&lock->waiters_lock);
+
+ _ocf_async_lock_run_waiters(lock, &waiters, -OCF_ERR_NO_LOCK);
+}
+
+ocf_async_lock_waiter_t ocf_async_lock_new_waiter(ocf_async_lock_t lock,
+ ocf_async_lock_end_t cmpl)
+{
+ ocf_async_lock_waiter_t waiter;
+
+ waiter = env_vmalloc(sizeof(*waiter) + lock->waiter_priv_size);
+ if (!waiter)
+ return NULL;
+
+ waiter->lock = lock;
+ waiter->cmpl = cmpl;
+
+ return waiter;
+}
+
+ocf_async_lock_t ocf_async_lock_waiter_get_lock(ocf_async_lock_waiter_t waiter)
+{
+ return waiter->lock;
+}
+
+void *ocf_async_lock_waiter_get_priv(ocf_async_lock_waiter_t waiter)
+{
+ return (void *)waiter + sizeof(*waiter);
+}
+
+static int _ocf_async_trylock(struct ocf_async_lock *lock)
+{
+ if (lock->wr || lock->rd)
+ return -OCF_ERR_NO_LOCK;
+
+ lock->wr = 1;
+ return 0;
+}
+
+void ocf_async_lock(ocf_async_lock_waiter_t waiter)
+{
+ ocf_async_lock_t lock = waiter->lock;
+ int result;
+
+ env_spinlock_lock(&lock->waiters_lock);
+
+ result = _ocf_async_trylock(lock);
+ if (!result) {
+ env_spinlock_unlock(&lock->waiters_lock);
+ waiter->cmpl(waiter, 0);
+ env_vfree(waiter);
+ return;
+ }
+
+ waiter->write_lock = true;
+ list_add_tail(&waiter->list, &lock->waiters);
+
+ env_spinlock_unlock(&lock->waiters_lock);
+}
+
+int ocf_async_trylock(struct ocf_async_lock *lock)
+{
+ int result;
+
+ env_spinlock_lock(&lock->waiters_lock);
+ result = _ocf_async_trylock(lock);
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ return result;
+}
+
+void ocf_async_unlock(struct ocf_async_lock *lock)
+{
+ struct list_head waiters;
+
+ INIT_LIST_HEAD(&waiters);
+
+ env_spinlock_lock(&lock->waiters_lock);
+
+ ENV_BUG_ON(lock->rd);
+ ENV_BUG_ON(!lock->wr);
+
+ lock->wr = 0;
+
+ _ocf_async_lock_collect_waiters(lock, &waiters);
+
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ _ocf_async_lock_run_waiters(lock, &waiters, 0);
+}
+
+static int _ocf_async_read_trylock(struct ocf_async_lock *lock)
+{
+ if (lock->wr || !list_empty(&lock->waiters))
+ return -OCF_ERR_NO_LOCK;
+
+ lock->rd++;
+ return 0;
+}
+
+void ocf_async_read_lock(ocf_async_lock_waiter_t waiter)
+{
+ ocf_async_lock_t lock = waiter->lock;
+ int result;
+
+ env_spinlock_lock(&lock->waiters_lock);
+
+ result = _ocf_async_read_trylock(lock);
+ if (!result) {
+ env_spinlock_unlock(&lock->waiters_lock);
+ waiter->cmpl(waiter, 0);
+ env_vfree(waiter);
+ return;
+ }
+
+ waiter->write_lock = false;
+ list_add_tail(&waiter->list, &lock->waiters);
+
+ env_spinlock_unlock(&lock->waiters_lock);
+}
+
+int ocf_async_read_trylock(struct ocf_async_lock *lock)
+{
+ int result;
+
+ env_spinlock_lock(&lock->waiters_lock);
+ result = _ocf_async_read_trylock(lock);
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ return result;
+}
+
+void ocf_async_read_unlock(struct ocf_async_lock *lock)
+{
+ struct list_head waiters;
+
+ INIT_LIST_HEAD(&waiters);
+
+ env_spinlock_lock(&lock->waiters_lock);
+
+ ENV_BUG_ON(!lock->rd);
+ ENV_BUG_ON(lock->wr);
+
+ if (--lock->rd) {
+ env_spinlock_unlock(&lock->waiters_lock);
+ return;
+ }
+
+ _ocf_async_lock_collect_waiters(lock, &waiters);
+
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ _ocf_async_lock_run_waiters(lock, &waiters, 0);
+}
+
+bool ocf_async_is_locked(struct ocf_async_lock *lock)
+{
+ bool locked;
+
+ env_spinlock_lock(&lock->waiters_lock);
+ locked = lock->rd || lock->wr;
+ env_spinlock_unlock(&lock->waiters_lock);
+
+ return locked;
+}
diff --git a/src/spdk/ocf/src/utils/utils_async_lock.h b/src/spdk/ocf/src/utils/utils_async_lock.h
new file mode 100644
index 000000000..346f2a473
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_async_lock.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __UTILS_ASYNC_LOCK_H__
+#define __UTILS_ASYNC_LOCK_H__
+
+#include "ocf_env.h"
+
+struct ocf_async_lock {
+ struct list_head waiters;
+ env_spinlock waiters_lock;
+ uint32_t rd;
+ uint32_t wr;
+ uint32_t waiter_priv_size;
+};
+
+typedef struct ocf_async_lock *ocf_async_lock_t;
+
+typedef struct ocf_async_lock_waiter *ocf_async_lock_waiter_t;
+
+typedef void (*ocf_async_lock_end_t)(ocf_async_lock_waiter_t waiter, int error);
+
+int ocf_async_lock_init(ocf_async_lock_t lock, uint32_t waiter_priv_size);
+
+void ocf_async_lock_deinit(ocf_async_lock_t lock);
+
+ocf_async_lock_waiter_t ocf_async_lock_new_waiter(ocf_async_lock_t lock,
+ ocf_async_lock_end_t cmpl);
+
+ocf_async_lock_t ocf_async_lock_waiter_get_lock(ocf_async_lock_waiter_t waiter);
+
+void *ocf_async_lock_waiter_get_priv(ocf_async_lock_waiter_t waiter);
+
+void ocf_async_lock(ocf_async_lock_waiter_t waiter);
+
+int ocf_async_trylock(struct ocf_async_lock *lock);
+
+void ocf_async_unlock(struct ocf_async_lock *lock);
+
+void ocf_async_read_lock(ocf_async_lock_waiter_t waiter);
+
+int ocf_async_read_trylock(struct ocf_async_lock *lock);
+
+void ocf_async_read_unlock(struct ocf_async_lock *lock);
+
+bool ocf_async_is_locked(struct ocf_async_lock *lock);
+
+#endif /* __UTILS_ASYNC_LOCK_H__ */
diff --git a/src/spdk/ocf/src/utils/utils_cache_line.c b/src/spdk/ocf/src/utils/utils_cache_line.c
new file mode 100644
index 000000000..8b734710b
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_cache_line.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "utils_cache_line.h"
+#include "../promotion/promotion.h"
+
+static inline void ocf_cleaning_set_hot_cache_line(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(cleaning_type >= ocf_cleaning_max);
+
+ if (cleaning_policy_ops[cleaning_type].set_hot_cache_line) {
+ cleaning_policy_ops[cleaning_type].
+ set_hot_cache_line(cache, line);
+ }
+}
+
+static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, ocf_cache_line_t line,
+ ocf_core_id_t core_id, ocf_part_id_t part_id)
+{
+ ocf_core_t core;
+ bool is_valid;
+
+ ENV_BUG_ON(core_id >= OCF_CORE_MAX);
+ core = ocf_cache_get_core(cache, core_id);
+
+ if (metadata_clear_valid_sec_changed(cache, line, start_bit, end_bit,
+ &is_valid)) {
+ /*
+ * Update the number of cached data for that core object
+ */
+ env_atomic_dec(&core->runtime_meta->cached_clines);
+ env_atomic_dec(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ }
+
+ /* If we have waiters, do not remove cache line
+ * for this cache line which will use one, clear
+ * only valid bits
+ */
+ if (!is_valid && !ocf_cache_line_are_waiters(cache, line)) {
+ ocf_purge_eviction_policy(cache, line);
+ ocf_metadata_sparse_cache_line(cache, line);
+ }
+}
+
+void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
+{
+ ocf_cache_line_t line = req->map[map_idx].coll_idx;
+ ocf_part_id_t part_id;
+ ocf_core_id_t core_id;
+
+ ENV_BUG_ON(!req);
+
+ part_id = ocf_metadata_get_partition_id(cache, line);
+ core_id = ocf_core_get_id(req->core);
+
+ __set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
+ part_id);
+
+ ocf_metadata_flush_mark(cache, req, map_idx, INVALID, start_bit,
+ end_bit);
+}
+
+void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, ocf_cache_line_t line)
+{
+ ocf_part_id_t part_id;
+ ocf_core_id_t core_id;
+
+ ocf_metadata_get_core_and_part_id(cache, line, &core_id, &part_id);
+
+ __set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
+ part_id);
+}
+
+void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
+{
+ ocf_cache_line_t line = req->map[map_idx].coll_idx;
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
+
+ if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) {
+ /*
+ * Update the number of cached data for that core object
+ */
+ env_atomic_inc(&req->core->runtime_meta->cached_clines);
+ env_atomic_inc(&req->core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ }
+}
+
+void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
+{
+ ocf_cache_line_t line = req->map[map_idx].coll_idx;
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
+ uint8_t evp_type = cache->conf_meta->eviction_policy_type;
+ bool line_is_clean;
+
+ if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit,
+ &line_is_clean)) {
+ ocf_metadata_flush_mark(cache, req, map_idx, CLEAN, start_bit,
+ end_bit);
+ if (line_is_clean) {
+ /*
+ * Update the number of dirty cached data for that
+ * core object
+ */
+ if (env_atomic_dec_and_test(&req->core->runtime_meta->
+ dirty_clines)) {
+ /*
+ * If this is last dirty cline reset dirty
+ * timestamp
+ */
+ env_atomic64_set(&req->core->runtime_meta->
+ dirty_since, 0);
+ }
+
+ /*
+ * decrement dirty clines statistic for given cline
+ */
+ env_atomic_dec(&req->core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+
+ if (likely(evict_policy_ops[evp_type].clean_cline))
+ evict_policy_ops[evp_type].clean_cline(cache, part_id, line);
+
+ ocf_purge_cleaning_policy(cache, line);
+ }
+ }
+
+}
+
+void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
+{
+ ocf_cache_line_t line = req->map[map_idx].coll_idx;
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
+ uint8_t evp_type = cache->conf_meta->eviction_policy_type;
+ bool line_was_dirty;
+
+ if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit,
+ &line_was_dirty)) {
+ ocf_metadata_flush_mark(cache, req, map_idx, DIRTY, start_bit,
+ end_bit);
+ if (!line_was_dirty) {
+ /*
+ * If this is first dirty cline set dirty timestamp
+ */
+ env_atomic64_cmpxchg(&req->core->runtime_meta->dirty_since,
+ 0, env_get_tick_count());
+
+ /*
+ * Update the number of dirty cached data for that
+ * core object
+ */
+ env_atomic_inc(&req->core->runtime_meta->dirty_clines);
+
+ /*
+ * increment dirty clines statistic for given cline
+ */
+ env_atomic_inc(&req->core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+
+ if (likely(evict_policy_ops[evp_type].dirty_cline))
+ evict_policy_ops[evp_type].dirty_cline(cache, part_id, line);
+ }
+ }
+
+
+ ocf_cleaning_set_hot_cache_line(cache, line);
+}
diff --git a/src/spdk/ocf/src/utils/utils_cache_line.h b/src/spdk/ocf/src/utils/utils_cache_line.h
new file mode 100644
index 000000000..d91de44c0
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_cache_line.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef UTILS_CACHE_LINE_H_
+#define UTILS_CACHE_LINE_H_
+
+#include "../metadata/metadata.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+#include "../eviction/eviction.h"
+#include "../eviction/ops.h"
+#include "../engine/cache_engine.h"
+#include "../ocf_request.h"
+#include "../ocf_def_priv.h"
+
+/**
+ * @file utils_cache_line.h
+ * @brief OCF utilities for cache line operations
+ */
+
+static inline ocf_cache_line_size_t ocf_line_size(
+ struct ocf_cache *cache)
+{
+ return cache->metadata.settings.size;
+}
+
+static inline uint64_t ocf_line_pages(struct ocf_cache *cache)
+{
+ return cache->metadata.settings.size / PAGE_SIZE;
+}
+
+static inline uint64_t ocf_line_sectors(struct ocf_cache *cache)
+{
+ return cache->metadata.settings.sector_count;
+}
+
+static inline uint64_t ocf_line_end_sector(struct ocf_cache *cache)
+{
+ return cache->metadata.settings.sector_end;
+}
+
+static inline uint64_t ocf_line_start_sector(struct ocf_cache *cache)
+{
+ return cache->metadata.settings.sector_start;
+}
+
+static inline uint64_t ocf_bytes_round_lines(struct ocf_cache *cache,
+ uint64_t bytes)
+{
+ return (bytes + ocf_line_size(cache) - 1) / ocf_line_size(cache);
+}
+
+static inline uint64_t ocf_bytes_2_lines(struct ocf_cache *cache,
+ uint64_t bytes)
+{
+ return bytes / ocf_line_size(cache);
+}
+
+static inline uint64_t ocf_bytes_2_lines_round_up(
+ struct ocf_cache *cache, uint64_t bytes)
+{
+ return OCF_DIV_ROUND_UP(bytes, ocf_line_size(cache));
+}
+
+static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache,
+ uint64_t lines)
+{
+ return lines * ocf_line_size(cache);
+}
+
+/**
+ * @brief Set cache line invalid
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start_bit Start bit of cache line for which state will be set
+ * @param end_bit End bit of cache line for which state will be set
+ * @param req OCF request
+ * @param map_idx Array index to map containing cache line to invalid
+ */
+void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
+
+
+/**
+ * @brief Set cache line invalid without flush
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start_bit Start bit of cache line for which state will be set
+ * @param end_bit End bit of cache line for which state will be set
+ * @param line Cache line to invalid
+ */
+void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, ocf_cache_line_t line);
+
+/**
+ * @brief Set cache line valid
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start_bit Start bit of cache line for which state will be set
+ * @param end_bit End bit of cache line for which state will be set
+ * @param req OCF request
+ * @param map_idx Array index to map containing cache line to invalid
+ */
+void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
+
+/**
+ * @brief Set cache line clean
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start_bit Start bit of cache line for which state will be set
+ * @param end_bit End bit of cache line for which state will be set
+ * @param req OCF request
+ * @param map_idx Array index to map containing cache line to invalid
+ */
+void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
+
+/**
+ * @brief Set cache line dirty
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start_bit Start bit of cache line for which state will be set
+ * @param end_bit End bit of cache line for which state will be set
+ * @param req OCF request
+ * @param map_idx Array index to map containing cache line to invalid
+ */
+void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
+ uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
+
+/**
+ * @brief Remove cache line from cleaning policy
+ *
+ * @param cache - cache instance
+ * @param line - cache line to be removed
+ *
+ */
+static inline void ocf_purge_cleaning_policy(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ ocf_cleaning_t clean_type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(clean_type >= ocf_cleaning_max);
+
+ /* Remove from cleaning policy */
+ if (cleaning_policy_ops[clean_type].purge_cache_block != NULL)
+ cleaning_policy_ops[clean_type].purge_cache_block(cache, line);
+}
+
+/**
+ * @brief Remove cache line from eviction policy
+ *
+ * @param cache - cache instance
+ * @param line - cache line to be removed
+ */
+static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
+ ocf_cache_line_t line)
+{
+ ocf_eviction_purge_cache_line(cache, line);
+}
+
+/**
+ * @brief Set cache line clean and invalid and remove form lists
+ *
+ * @note Collision page must be locked by the caller (either exclusive access
+ * to collision table page OR write lock on metadata hash bucket combined with
+ * shared access to the collision page)
+ *
+ * @param cache Cache instance
+ * @param start Start bit of range in cache line to purge
+ * @param end End bit of range in cache line to purge
+ * @param req OCF request
+ * @param map_idx Array index to map containing cache line to purge
+ */
+static inline void _ocf_purge_cache_line_sec(struct ocf_cache *cache,
+ uint8_t start, uint8_t stop, struct ocf_request *req,
+ uint32_t map_idx)
+{
+
+ set_cache_line_clean(cache, start, stop, req, map_idx);
+
+ set_cache_line_invalid(cache, start, stop, req, map_idx);
+}
+
+/**
+ * @brief Purge cache line (remove completely, form collision, move to free
+ * partition, from cleaning policy and eviction policy)
+ *
+ * @param req - OCF request to purge
+ */
+static inline void ocf_purge_map_info(struct ocf_request *req)
+{
+ uint32_t map_idx = 0;
+ uint8_t start_bit;
+ uint8_t end_bit;
+ struct ocf_map_info *map = req->map;
+ struct ocf_cache *cache = req->cache;
+ uint32_t count = req->core_line_count;
+
+ /* Purge range on the basis of map info
+ *
+ * | 01234567 | 01234567 | ... | 01234567 | 01234567 |
+ * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
+ * | first | Middle | last |
+ */
+
+ for (map_idx = 0; map_idx < count; map_idx++) {
+ if (map[map_idx].status == LOOKUP_MISS)
+ continue;
+
+ start_bit = 0;
+ end_bit = ocf_line_end_sector(cache);
+
+ if (map_idx == 0) {
+ /* First */
+
+ start_bit = BYTES_TO_SECTORS(req->byte_position)
+ % ocf_line_sectors(cache);
+
+ }
+
+ if (map_idx == (count - 1)) {
+ /* Last */
+
+ end_bit = BYTES_TO_SECTORS(req->byte_position +
+ req->byte_length - 1) %
+ ocf_line_sectors(cache);
+ }
+
+ ocf_metadata_start_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ _ocf_purge_cache_line_sec(cache, start_bit, end_bit, req,
+ map_idx);
+ ocf_metadata_end_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ }
+}
+
+static inline
+uint8_t ocf_map_line_start_sector(struct ocf_request *req, uint32_t line)
+{
+ if (line == 0) {
+ return BYTES_TO_SECTORS(req->byte_position)
+ % ocf_line_sectors(req->cache);
+ }
+
+ return 0;
+}
+
+static inline
+uint8_t ocf_map_line_end_sector(struct ocf_request *req, uint32_t line)
+{
+ if (line == req->core_line_count - 1) {
+ return BYTES_TO_SECTORS(req->byte_position +
+ req->byte_length - 1) %
+ ocf_line_sectors(req->cache);
+ }
+
+ return ocf_line_end_sector(req->cache);
+}
+
+static inline void ocf_set_valid_map_info(struct ocf_request *req)
+{
+ uint32_t map_idx = 0;
+ uint8_t start_bit;
+ uint8_t end_bit;
+ struct ocf_cache *cache = req->cache;
+ uint32_t count = req->core_line_count;
+ struct ocf_map_info *map = req->map;
+
+ /* Set valid bits for sectors on the basis of map info
+ *
+ * | 01234567 | 01234567 | ... | 01234567 | 01234567 |
+ * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
+ * | first | Middle | last |
+ */
+ for (map_idx = 0; map_idx < count; map_idx++) {
+ ENV_BUG_ON(map[map_idx].status == LOOKUP_MISS);
+
+ start_bit = ocf_map_line_start_sector(req, map_idx);
+ end_bit = ocf_map_line_end_sector(req, map_idx);
+
+ ocf_metadata_start_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ set_cache_line_valid(cache, start_bit, end_bit, req, map_idx);
+ ocf_metadata_end_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ }
+}
+
+static inline void ocf_set_dirty_map_info(struct ocf_request *req)
+{
+ uint32_t map_idx = 0;
+ uint8_t start_bit;
+ uint8_t end_bit;
+ struct ocf_cache *cache = req->cache;
+ uint32_t count = req->core_line_count;
+ struct ocf_map_info *map = req->map;
+
+ /* Set valid bits for sectors on the basis of map info
+ *
+ * | 01234567 | 01234567 | ... | 01234567 | 01234567 |
+ * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
+ * | first | Middle | last |
+ */
+
+ for (map_idx = 0; map_idx < count; map_idx++) {
+ start_bit = ocf_map_line_start_sector(req, map_idx);
+ end_bit = ocf_map_line_end_sector(req, map_idx);
+
+ ocf_metadata_start_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ set_cache_line_dirty(cache, start_bit, end_bit, req, map_idx);
+ ocf_metadata_end_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ }
+}
+
+static inline void ocf_set_clean_map_info(struct ocf_request *req)
+{
+ uint32_t map_idx = 0;
+ uint8_t start_bit;
+ uint8_t end_bit;
+ struct ocf_cache *cache = req->cache;
+ uint32_t count = req->core_line_count;
+ struct ocf_map_info *map = req->map;
+
+ /* Set valid bits for sectors on the basis of map info
+ *
+ * | 01234567 | 01234567 | ... | 01234567 | 01234567 |
+ * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
+ * | first | Middle | last |
+ */
+
+ for (map_idx = 0; map_idx < count; map_idx++) {
+ start_bit = ocf_map_line_start_sector(req, map_idx);
+ end_bit = ocf_map_line_end_sector(req, map_idx);
+
+ ocf_metadata_start_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ set_cache_line_clean(cache, start_bit, end_bit, req, map_idx);
+ ocf_metadata_end_collision_shared_access(cache, map[map_idx].
+ coll_idx);
+ }
+}
+
+/**
+ * @brief Validate cache line size
+ *
+ * @param[in] size Cache line size
+ *
+ * @retval true cache line size is valid
+ * @retval false cache line is invalid
+ */
+static inline bool ocf_cache_line_size_is_valid(uint64_t size)
+{
+ switch (size) {
+ case ocf_cache_line_size_4:
+ case ocf_cache_line_size_8:
+ case ocf_cache_line_size_16:
+ case ocf_cache_line_size_32:
+ case ocf_cache_line_size_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#endif /* UTILS_CACHE_LINE_H_ */
diff --git a/src/spdk/ocf/src/utils/utils_cleaner.c b/src/spdk/ocf/src/utils/utils_cleaner.c
new file mode 100644
index 000000000..f2149d0a7
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_cleaner.c
@@ -0,0 +1,1058 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../ocf_request.h"
+#include "utils_cleaner.h"
+#include "utils_part.h"
+#include "utils_io.h"
+#include "utils_cache_line.h"
+
+#define OCF_UTILS_CLEANER_DEBUG 0
+
+#if 1 == OCF_UTILS_CLEANER_DEBUG
+#define OCF_DEBUG_TRACE(cache) \
+ ocf_cache_log(cache, log_info, "[Utils][cleaner] %s\n", __func__)
+
+#define OCF_DEBUG_MSG(cache, msg) \
+ ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - %s\n", \
+ __func__, msg)
+
+#define OCF_DEBUG_PARAM(cache, format, ...) \
+ ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - "format"\n", \
+ __func__, ##__VA_ARGS__)
+#else
+#define OCF_DEBUG_TRACE(cache)
+#define OCF_DEBUG_MSG(cache, msg)
+#define OCF_DEBUG_PARAM(cache, format, ...)
+#endif
+
+/*
+ * Allocate cleaning request
+ */
+static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
+ uint32_t count, const struct ocf_cleaner_attribs *attribs)
+{
+ struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL,
+ 0, count * ocf_line_size(cache), OCF_READ);
+ int ret;
+
+ if (!req)
+ return NULL;
+
+ req->info.internal = true;
+ req->info.cleaner_cache_line_lock = attribs->cache_line_lock;
+
+ /* Allocate pages for cleaning IO */
+ req->data = ctx_data_alloc(cache->owner,
+ ocf_line_size(cache) / PAGE_SIZE * count);
+ if (!req->data) {
+ ocf_req_put(req);
+ return NULL;
+ }
+
+ ret = ctx_data_mlock(cache->owner, req->data);
+ if (ret) {
+ ctx_data_free(cache->owner, req->data);
+ ocf_req_put(req);
+ return NULL;
+ }
+
+ return req;
+}
+
+enum {
+ ocf_cleaner_req_type_master = 1,
+ ocf_cleaner_req_type_slave = 2
+};
+
+static struct ocf_request *_ocf_cleaner_alloc_master_req(
+ struct ocf_cache *cache, uint32_t count,
+ const struct ocf_cleaner_attribs *attribs)
+{
+ struct ocf_request *req = _ocf_cleaner_alloc_req(cache, count, attribs);
+
+ if (req) {
+ /* Set type of cleaning request */
+ req->master_io_req_type = ocf_cleaner_req_type_master;
+
+ /* In master, save completion context and function */
+ req->priv = attribs->cmpl_context;
+ req->master_io_req = attribs->cmpl_fn;
+
+ /* The count of all requests */
+ env_atomic_set(&req->master_remaining, 1);
+
+ OCF_DEBUG_PARAM(cache, "New master request, count = %u",
+ count);
+ }
+ return req;
+}
+
+static struct ocf_request *_ocf_cleaner_alloc_slave_req(
+ struct ocf_request *master,
+ uint32_t count, const struct ocf_cleaner_attribs *attribs)
+{
+ struct ocf_request *req = _ocf_cleaner_alloc_req(
+ master->cache, count, attribs);
+
+ if (req) {
+ /* Set type of cleaning request */
+ req->master_io_req_type = ocf_cleaner_req_type_slave;
+
+ /* Slave refers to master request, get its reference counter */
+ ocf_req_get(master);
+
+ /* Slave request contains reference to master */
+ req->master_io_req = master;
+
+ /* One more additional slave request, increase global counter
+ * of requests count
+ */
+ env_atomic_inc(&master->master_remaining);
+
+ OCF_DEBUG_PARAM(req->cache,
+ "New slave request, count = %u,all requests count = %d",
+ count, env_atomic_read(&master->master_remaining));
+ }
+ return req;
+}
+
+static void _ocf_cleaner_dealloc_req(struct ocf_request *req)
+{
+ if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
+ /* Slave contains reference to the master request,
+ * release reference counter
+ */
+ struct ocf_request *master = req->master_io_req;
+
+ OCF_DEBUG_MSG(req->cache, "Put master request by slave");
+ ocf_req_put(master);
+
+ OCF_DEBUG_MSG(req->cache, "Free slave request");
+ } else if (ocf_cleaner_req_type_master == req->master_io_req_type) {
+ OCF_DEBUG_MSG(req->cache, "Free master request");
+ } else {
+ ENV_BUG();
+ }
+
+ ctx_data_secure_erase(req->cache->owner, req->data);
+ ctx_data_munlock(req->cache->owner, req->data);
+ ctx_data_free(req->cache->owner, req->data);
+ ocf_req_put(req);
+}
+
+/*
+ * cleaner - Get clean result
+ */
+static void _ocf_cleaner_set_error(struct ocf_request *req)
+{
+ struct ocf_request *master = NULL;
+
+ if (ocf_cleaner_req_type_master == req->master_io_req_type) {
+ master = req;
+ } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
+ master = req->master_io_req;
+ } else {
+ ENV_BUG();
+ return;
+ }
+
+ master->error = -OCF_ERR_IO;
+}
+
+static void _ocf_cleaner_complete_req(struct ocf_request *req)
+{
+ struct ocf_request *master = NULL;
+ ocf_req_end_t cmpl;
+
+ if (ocf_cleaner_req_type_master == req->master_io_req_type) {
+ OCF_DEBUG_MSG(req->cache, "Master completion");
+ master = req;
+ } else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
+ OCF_DEBUG_MSG(req->cache, "Slave completion");
+ master = req->master_io_req;
+ } else {
+ ENV_BUG();
+ return;
+ }
+
+ OCF_DEBUG_PARAM(req->cache, "Master requests remaining = %d",
+ env_atomic_read(&master->master_remaining));
+
+ if (env_atomic_dec_return(&master->master_remaining)) {
+ /* Not all requests completed */
+ return;
+ }
+
+ OCF_DEBUG_MSG(req->cache, "All cleaning request completed");
+
+ /* Only master contains completion function and completion context */
+ cmpl = master->master_io_req;
+ cmpl(master->priv, master->error);
+}
+
+static void _ocf_cleaner_on_resume(struct ocf_request *req)
+{
+ OCF_DEBUG_TRACE(req->cache);
+ ocf_engine_push_req_front(req, true);
+}
+
+/*
+ * cleaner - Cache line lock, function lock cache lines depends on attributes
+ */
+static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
+{
+ if (!req->info.cleaner_cache_line_lock)
+ return OCF_LOCK_ACQUIRED;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ return ocf_req_async_lock_rd(req, _ocf_cleaner_on_resume);
+}
+
+/*
+ * cleaner - Cache line unlock, function unlock cache lines
+ * depends on attributes
+ */
+static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req)
+{
+ if (req->info.cleaner_cache_line_lock) {
+ OCF_DEBUG_TRACE(req->cache);
+ ocf_req_unlock(req);
+ }
+}
+
+static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache,
+ ocf_cache_line_t line, uint8_t sector)
+{
+ bool dirty = metadata_test_dirty_one(cache, line, sector);
+ bool valid = metadata_test_valid_one(cache, line, sector);
+
+ if (!valid && dirty) {
+ /* not valid but dirty - IMPROPER STATE!!! */
+ ENV_BUG();
+ }
+
+ return valid ? dirty : false;
+}
+
+static void _ocf_cleaner_finish_req(struct ocf_request *req)
+{
+ /* Handle cache lines unlocks */
+ _ocf_cleaner_cache_line_unlock(req);
+
+ /* Signal completion to the caller of cleaning */
+ _ocf_cleaner_complete_req(req);
+
+ /* Free allocated resources */
+ _ocf_cleaner_dealloc_req(req);
+}
+
+static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
+{
+ struct ocf_request *req = io->priv1;
+
+ if (error) {
+ ocf_metadata_error(req->cache);
+ req->error = error;
+ }
+
+ OCF_DEBUG_MSG(req->cache, "Cache flush finished");
+
+ _ocf_cleaner_finish_req(req);
+
+ ocf_io_put(io);
+}
+
+static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
+{
+ struct ocf_io *io;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0, 0);
+ if (!io) {
+ ocf_metadata_error(req->cache);
+ req->error = -OCF_ERR_NO_MEM;
+ return -OCF_ERR_NO_MEM;
+ }
+
+ ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
+
+ ocf_volume_submit_flush(io);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_flush_cache = {
+ .read = _ocf_cleaner_fire_flush_cache,
+ .write = _ocf_cleaner_fire_flush_cache,
+};
+
+static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
+{
+ if (error) {
+ ocf_metadata_error(req->cache);
+ req->error = error;
+ _ocf_cleaner_finish_req(req);
+ return;
+ }
+
+ OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
+
+ req->io_if = &_io_if_flush_cache;
+ ocf_engine_push_req_front(req, true);
+}
+
+static int _ocf_cleaner_update_metadata(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+ const struct ocf_map_info *iter = req->map;
+ uint32_t i;
+ ocf_cache_line_t cache_line;
+ ocf_core_id_t core_id;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ ocf_metadata_start_exclusive_access(&cache->metadata.lock);
+ /* Update metadata */
+ for (i = 0; i < req->core_line_count; i++, iter++) {
+ if (iter->status == LOOKUP_MISS)
+ continue;
+
+ if (iter->invalid) {
+ /* An error, do not clean */
+ continue;
+ }
+
+ cache_line = iter->coll_idx;
+
+ if (!metadata_test_dirty(cache, cache_line))
+ continue;
+
+ ocf_metadata_get_core_and_part_id(cache, cache_line,
+ &core_id, &req->part_id);
+ req->core = &cache->core[core_id];
+
+ set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
+ i);
+ }
+
+ ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end);
+ ocf_metadata_end_exclusive_access(&cache->metadata.lock);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_update_metadata = {
+ .read = _ocf_cleaner_update_metadata,
+ .write = _ocf_cleaner_update_metadata,
+};
+
+static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
+ struct ocf_request *req, int error)
+{
+ uint32_t i;
+ struct ocf_map_info *iter = req->map;
+
+ if (error) {
+ /* Flush error, set error for all cache line of this core */
+ for (i = 0; i < req->core_line_count; i++, iter++) {
+ if (iter->status == LOOKUP_MISS)
+ continue;
+
+ if (iter->core_id == map->core_id)
+ iter->invalid = true;
+ }
+
+ _ocf_cleaner_set_error(req);
+ }
+
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_MSG(req->cache, "Core flush finished");
+
+ /*
+ * All core writes done, switch to post cleaning activities
+ */
+ req->io_if = &_io_if_update_metadata;
+ ocf_engine_push_req_front(req, true);
+}
+
+static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
+{
+ _ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error);
+
+ ocf_io_put(io);
+}
+
+static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
+{
+ uint32_t i;
+ ocf_core_id_t core_id = OCF_CORE_MAX;
+ struct ocf_cache *cache = req->cache;
+ struct ocf_map_info *iter = req->map;
+ ocf_core_t core;
+ struct ocf_io *io;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ /* Protect IO completion race */
+ env_atomic_set(&req->req_remaining, 1);
+
+ /* Submit flush requests */
+ for (i = 0; i < req->core_line_count; i++, iter++) {
+ if (iter->invalid) {
+ /* IO error, skip this item */
+ continue;
+ }
+
+ if (iter->status == LOOKUP_MISS)
+ continue;
+
+ if (core_id == iter->core_id)
+ continue;
+
+ core_id = iter->core_id;
+
+ env_atomic_inc(&req->req_remaining);
+
+ core = ocf_cache_get_core(cache, core_id);
+ io = ocf_new_core_io(core, req->io_queue, 0, 0,
+ OCF_WRITE, 0, 0);
+ if (!io) {
+ _ocf_cleaner_flush_cores_io_end(iter, req, -OCF_ERR_NO_MEM);
+ continue;
+ }
+
+ ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
+
+ ocf_volume_submit_flush(io);
+ }
+
+ /* Protect IO completion race */
+ _ocf_cleaner_flush_cores_io_end(NULL, req, 0);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_flush_cores = {
+ .read = _ocf_cleaner_fire_flush_cores,
+ .write = _ocf_cleaner_fire_flush_cores,
+};
+
+static void _ocf_cleaner_core_io_end(struct ocf_request *req)
+{
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ OCF_DEBUG_MSG(req->cache, "Core writes finished");
+
+ /*
+ * All cache read requests done, now we can submit writes to cores,
+ * Move processing to thread, where IO will be (and can be) submitted
+ */
+ req->io_if = &_io_if_flush_cores;
+ ocf_engine_push_req_front(req, true);
+}
+
+static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
+{
+ struct ocf_map_info *map = io->priv1;
+ struct ocf_request *req = io->priv2;
+ ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
+
+ if (error) {
+ map->invalid |= 1;
+ _ocf_cleaner_set_error(req);
+ ocf_core_stats_core_error_update(core, OCF_WRITE);
+ }
+
+ _ocf_cleaner_core_io_end(req);
+
+ ocf_io_put(io);
+}
+
+static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
+ struct ocf_map_info *iter, uint64_t begin, uint64_t end)
+{
+ uint64_t addr, offset;
+ int err;
+ ocf_cache_t cache = req->cache;
+ struct ocf_io *io;
+ ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
+ ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
+ iter->coll_idx);
+
+ addr = (ocf_line_size(cache) * iter->core_line)
+ + SECTORS_TO_BYTES(begin);
+ offset = (ocf_line_size(cache) * iter->hash)
+ + SECTORS_TO_BYTES(begin);
+
+ io = ocf_new_core_io(core, req->io_queue, addr,
+ SECTORS_TO_BYTES(end - begin), OCF_WRITE, part_id, 0);
+ if (!io)
+ goto error;
+
+ err = ocf_io_set_data(io, req->data, offset);
+ if (err) {
+ ocf_io_put(io);
+ goto error;
+ }
+
+ ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
+
+ ocf_core_stats_core_block_update(core, part_id, OCF_WRITE,
+ SECTORS_TO_BYTES(end - begin));
+
+ OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
+ "sector = %llu, count = %llu", iter->core_line, begin,
+ end - begin);
+
+ /* Increase IO counter to be processed */
+ env_atomic_inc(&req->req_remaining);
+
+ /* Send IO */
+ ocf_volume_submit_io(io);
+
+ return;
+error:
+ iter->invalid = true;
+ _ocf_cleaner_set_error(req);
+}
+
+static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
+ struct ocf_map_info *iter)
+{
+ uint64_t i, dirty_start = 0;
+ struct ocf_cache *cache = req->cache;
+ bool counting_dirty = false;
+
+ /* Check integrity of entry to be cleaned */
+ if (metadata_test_valid(cache, iter->coll_idx)
+ && metadata_test_dirty(cache, iter->coll_idx)) {
+
+ _ocf_cleaner_core_io_for_dirty_range(req, iter, 0,
+ ocf_line_sectors(cache));
+
+ return;
+ }
+
+ /* Sector cleaning, a little effort is required to this */
+ for (i = 0; i < ocf_line_sectors(cache); i++) {
+ if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) {
+ if (counting_dirty) {
+ counting_dirty = false;
+ _ocf_cleaner_core_io_for_dirty_range(req, iter,
+ dirty_start, i);
+ }
+
+ continue;
+ }
+
+ if (!counting_dirty) {
+ counting_dirty = true;
+ dirty_start = i;
+ }
+
+ }
+
+ if (counting_dirty)
+ _ocf_cleaner_core_io_for_dirty_range(req, iter, dirty_start, i);
+}
+
+static int _ocf_cleaner_fire_core(struct ocf_request *req)
+{
+ uint32_t i;
+ struct ocf_map_info *iter;
+
+ OCF_DEBUG_TRACE(req->cache);
+
+ /* Protect IO completion race */
+ env_atomic_set(&req->req_remaining, 1);
+
+ /* Submits writes to the core */
+ for (i = 0; i < req->core_line_count; i++) {
+ iter = &(req->map[i]);
+
+ if (iter->invalid) {
+ /* IO read error on cache, skip this item */
+ continue;
+ }
+
+ if (iter->status == LOOKUP_MISS)
+ continue;
+
+ _ocf_cleaner_core_submit_io(req, iter);
+ }
+
+ /* Protect IO completion race */
+ _ocf_cleaner_core_io_end(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_fire_core = {
+ .read = _ocf_cleaner_fire_core,
+ .write = _ocf_cleaner_fire_core,
+};
+
+static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
+{
+ if (env_atomic_dec_return(&req->req_remaining))
+ return;
+
+ /*
+ * All cache read requests done, now we can submit writes to cores,
+ * Move processing to thread, where IO will be (and can be) submitted
+ */
+ req->io_if = &_io_if_fire_core;
+ ocf_engine_push_req_front(req, true);
+
+ OCF_DEBUG_MSG(req->cache, "Cache reads finished");
+}
+
+static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
+{
+ struct ocf_map_info *map = io->priv1;
+ struct ocf_request *req = io->priv2;
+ ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
+
+ if (error) {
+ map->invalid |= 1;
+ _ocf_cleaner_set_error(req);
+ ocf_core_stats_cache_error_update(core, OCF_READ);
+ }
+
+ _ocf_cleaner_cache_io_end(req);
+
+ ocf_io_put(io);
+}
+
+/*
+ * cleaner - Traverse cache lines to be cleaned, detect sequential IO, and
+ * perform cache reads and core writes
+ */
+static int _ocf_cleaner_fire_cache(struct ocf_request *req)
+{
+ ocf_cache_t cache = req->cache;
+ ocf_core_t core;
+ uint32_t i;
+ struct ocf_map_info *iter = req->map;
+ uint64_t addr, offset;
+ ocf_part_id_t part_id;
+ struct ocf_io *io;
+ int err;
+
+ /* Protect IO completion race */
+ env_atomic_inc(&req->req_remaining);
+
+ for (i = 0; i < req->core_line_count; i++, iter++) {
+ core = ocf_cache_get_core(cache, iter->core_id);
+ if (!core)
+ continue;
+ if (iter->status == LOOKUP_MISS)
+ continue;
+
+ OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
+ iter->coll_idx);
+
+ addr = ocf_metadata_map_lg2phy(cache,
+ iter->coll_idx);
+ addr *= ocf_line_size(cache);
+ addr += cache->device->metadata_offset;
+
+ offset = ocf_line_size(cache) * iter->hash;
+
+ part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
+
+ io = ocf_new_cache_io(cache, req->io_queue,
+ addr, ocf_line_size(cache),
+ OCF_READ, part_id, 0);
+ if (!io) {
+ /* Allocation error */
+ iter->invalid = true;
+ _ocf_cleaner_set_error(req);
+ continue;
+ }
+
+ ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
+ err = ocf_io_set_data(io, req->data, offset);
+ if (err) {
+ ocf_io_put(io);
+ iter->invalid = true;
+ _ocf_cleaner_set_error(req);
+ continue;
+ }
+
+ ocf_core_stats_cache_block_update(core, part_id, OCF_READ,
+ ocf_line_size(cache));
+
+ ocf_volume_submit_io(io);
+ }
+
+ /* Protect IO completion race */
+ _ocf_cleaner_cache_io_end(req);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_fire_cache = {
+ .read = _ocf_cleaner_fire_cache,
+ .write = _ocf_cleaner_fire_cache,
+};
+
+static int _ocf_cleaner_fire(struct ocf_request *req)
+{
+ int result;
+
+ req->io_if = &_io_if_fire_cache;
+
+ /* Handle cache lines locks */
+ result = _ocf_cleaner_cache_line_lock(req);
+
+ if (result >= 0) {
+ if (result == OCF_LOCK_ACQUIRED) {
+ OCF_DEBUG_MSG(req->cache, "Lock acquired");
+ _ocf_cleaner_fire_cache(req);
+ } else {
+ OCF_DEBUG_MSG(req->cache, "NO Lock");
+ }
+ return 0;
+ } else {
+ OCF_DEBUG_MSG(req->cache, "Lock error");
+ }
+
+ return result;
+}
+
+/* Helper function for 'sort' */
+static int _ocf_cleaner_cmp_private(const void *a, const void *b)
+{
+ struct ocf_map_info *_a = (struct ocf_map_info *)a;
+ struct ocf_map_info *_b = (struct ocf_map_info *)b;
+
+ static uint32_t step = 0;
+
+ OCF_COND_RESCHED_DEFAULT(step);
+
+ if (_a->core_id == _b->core_id)
+ return (_a->core_line > _b->core_line) ? 1 : -1;
+
+ return (_a->core_id > _b->core_id) ? 1 : -1;
+}
+
+/**
+ * Prepare cleaning request to be fired
+ *
+ * @param req cleaning request
+ * @param i_out number of already filled map requests (remaining to be filled
+ * with missed
+ */
+static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out,
+ bool do_sort)
+{
+ uint32_t i;
+ /* Set counts of cache IOs */
+ env_atomic_set(&req->req_remaining, i_out);
+
+ /* fill tail of a request with fake MISSes so that it won't
+ * be cleaned
+ */
+ for (; i_out < req->core_line_count; ++i_out) {
+ req->map[i_out].core_id = OCF_CORE_MAX;
+ req->map[i_out].core_line = ULLONG_MAX;
+ req->map[i_out].status = LOOKUP_MISS;
+ req->map[i_out].hash = i_out;
+ }
+
+ if (do_sort) {
+ /* Sort by core id and core line */
+ env_sort(req->map, req->core_line_count, sizeof(req->map[0]),
+ _ocf_cleaner_cmp_private, NULL);
+ for (i = 0; i < req->core_line_count; i++)
+ req->map[i].hash = i;
+ }
+
+ /* issue actual request */
+ return _ocf_cleaner_fire(req);
+}
+
+static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count,
+ bool low_mem)
+{
+ if (low_mem || count <= 4096)
+ return count < 128 ? count : 128;
+
+ return 1024;
+}
+
+static void _ocf_cleaner_fire_error(struct ocf_request *master,
+ struct ocf_request *req, int err)
+{
+ master->error = err;
+ _ocf_cleaner_complete_req(req);
+ _ocf_cleaner_dealloc_req(req);
+}
+
+/*
+ * cleaner - Main function
+ */
+void ocf_cleaner_fire(struct ocf_cache *cache,
+ const struct ocf_cleaner_attribs *attribs)
+{
+ uint32_t i, i_out = 0, count = attribs->count;
+ /* max cache lines to be cleaned with one request: 1024 if over 4k lines
+ * to be flushed, otherwise 128. for large cleaning operations, 1024 is
+ * optimal number, but for smaller 1024 is too large to benefit from
+ * cleaning request overlapping
+ */
+ uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
+ ocf_cache_line_t cache_line;
+ /* it is possible that more than one cleaning request will be generated
+ * for each cleaning order, thus multiple allocations. At the end of
+ * loop, req is set to zero and NOT deallocated, as deallocation is
+ * handled in completion.
+ * In addition first request we call master which contains completion
+ * contexts. Then succeeding request we call salve requests which
+ * contains reference to the master request
+ */
+ struct ocf_request *req = NULL, *master;
+ int err;
+ ocf_core_id_t core_id;
+ uint64_t core_sector;
+
+ /* Allocate master request */
+ master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
+
+ if (!master) {
+ /* Some memory allocation error, try re-allocate request */
+ max = _ocf_cleaner_get_req_max_count(count, true);
+ master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
+ }
+
+ if (!master) {
+ attribs->cmpl_fn(attribs->cmpl_context, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ req = master;
+
+ /* prevent cleaning completion race */
+ ocf_req_get(master);
+ env_atomic_inc(&master->master_remaining);
+
+ for (i = 0; i < count; i++) {
+
+ /* when request hasn't yet been allocated or is just issued */
+ if (!req) {
+ if (max > count - i) {
+ /* less than max left */
+ max = count - i;
+ }
+
+ req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
+ }
+
+ if (!req) {
+ /* Some memory allocation error,
+ * try re-allocate request
+ */
+ max = _ocf_cleaner_get_req_max_count(max, true);
+ req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
+ }
+
+ /* when request allocation failed stop processing */
+ if (!req) {
+ master->error = -OCF_ERR_NO_MEM;
+ break;
+ }
+
+ if (attribs->getter(cache, attribs->getter_context,
+ i, &cache_line)) {
+ OCF_DEBUG_MSG(cache, "Skip");
+ continue;
+ }
+
+ /* when line already cleaned - rare condition under heavy
+ * I/O workload.
+ */
+ if (!metadata_test_dirty(cache, cache_line)) {
+ OCF_DEBUG_MSG(cache, "Not dirty");
+ continue;
+ }
+
+ if (!metadata_test_valid_any(cache, cache_line)) {
+ OCF_DEBUG_MSG(cache, "No any valid");
+
+ /*
+ * Extremely disturbing cache line state
+ * Cache line (sector) cannot be dirty and not valid
+ */
+ ENV_BUG();
+ continue;
+ }
+
+ /* Get mapping info */
+ ocf_metadata_get_core_info(cache, cache_line, &core_id,
+ &core_sector);
+
+ if (unlikely(!cache->core[core_id].opened)) {
+ OCF_DEBUG_MSG(cache, "Core object inactive");
+ continue;
+ }
+
+ req->map[i_out].core_id = core_id;
+ req->map[i_out].core_line = core_sector;
+ req->map[i_out].coll_idx = cache_line;
+ req->map[i_out].status = LOOKUP_HIT;
+ req->map[i_out].hash = i_out;
+ i_out++;
+
+ if (max == i_out) {
+ err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
+ if (err) {
+ _ocf_cleaner_fire_error(master, req, err);
+ req = NULL;
+ break;
+ }
+ i_out = 0;
+ req = NULL;
+ }
+ }
+
+ if (req) {
+ err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
+ if (err)
+ _ocf_cleaner_fire_error(master, req, err);
+ req = NULL;
+ }
+
+ /* prevent cleaning completion race */
+ _ocf_cleaner_complete_req(master);
+ ocf_req_put(master);
+}
+
+static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,
+ void *context, uint32_t item, ocf_cache_line_t *line)
+{
+ struct flush_data *flush = context;
+
+ if (flush[item].cache_line < cache->device->collision_table_entries) {
+ (*line) = flush[item].cache_line;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
+ struct flush_data *flush, uint32_t count,
+ struct ocf_cleaner_attribs *attribs)
+{
+ attribs->getter = _ocf_cleaner_do_flush_data_getter;
+ attribs->getter_context = flush;
+ attribs->count = count;
+
+ ocf_cleaner_fire(cache, attribs);
+
+ return 0;
+}
+
+/* Helper function for 'sort' */
+static int _ocf_cleaner_cmp(const void *a, const void *b)
+{
+ struct flush_data *_a = (struct flush_data *)a;
+ struct flush_data *_b = (struct flush_data *)b;
+
+ /* TODO: FIXME get rid of static */
+ static uint32_t step = 0;
+
+ OCF_COND_RESCHED(step, 1000000)
+
+ if (_a->core_id == _b->core_id)
+ return (_a->core_line > _b->core_line) ? 1 : -1;
+
+ return (_a->core_id > _b->core_id) ? 1 : -1;
+}
+
+static void _ocf_cleaner_swap(void *a, void *b, int size)
+{
+ struct flush_data *_a = (struct flush_data *)a;
+ struct flush_data *_b = (struct flush_data *)b;
+ struct flush_data t;
+
+ t = *_a;
+ *_a = *_b;
+ *_b = t;
+}
+
+void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num)
+{
+ env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap);
+}
+
+void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
+ uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ env_sort(fctbl[i].flush_data, fctbl[i].count,
+ sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp,
+ _ocf_cleaner_swap);
+ }
+}
+
+void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
+{
+ struct ocf_user_part *curr_part;
+ ocf_part_id_t part_id;
+
+ for_each_part(cache, curr_part, part_id)
+ ocf_refcnt_freeze(&cache->refcnt.cleaning[part_id]);
+}
+
+void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
+{
+ struct ocf_user_part *curr_part;
+ ocf_part_id_t part_id;
+
+ for_each_part(cache, curr_part, part_id)
+ ocf_refcnt_unfreeze(&cache->refcnt.cleaning[part_id]);
+}
+
+static void ocf_cleaner_refcnt_register_zero_cb_finish(void *priv)
+{
+ struct ocf_cleaner_wait_context *ctx = priv;
+
+ if (!env_atomic_dec_return(&ctx->waiting))
+ ctx->cb(ctx->priv);
+}
+
+void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
+ struct ocf_cleaner_wait_context *ctx,
+ ocf_cleaner_refcnt_zero_cb_t cb, void *priv)
+{
+ struct ocf_user_part *curr_part;
+ ocf_part_id_t part_id;
+
+ env_atomic_set(&ctx->waiting, 1);
+ ctx->cb = cb;
+ ctx->priv = priv;
+
+ for_each_part(cache, curr_part, part_id) {
+ env_atomic_inc(&ctx->waiting);
+ ocf_refcnt_register_zero_cb(&cache->refcnt.cleaning[part_id],
+ ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
+ }
+
+ ocf_cleaner_refcnt_register_zero_cb_finish(ctx);
+}
diff --git a/src/spdk/ocf/src/utils/utils_cleaner.h b/src/spdk/ocf/src/utils/utils_cleaner.h
new file mode 100644
index 000000000..80d19a9e2
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_cleaner.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef UTILS_CLEANER_H_
+#define UTILS_CLEANER_H_
+
+#include "../ocf_request.h"
+
+/**
+ * @brief Getter for next cache line to be cleaned
+ *
+ * @param cache[in] Cache instance
+ * @param getter_context[in] Context for cleaner caller
+ * @param item[in] Current iteration item when collection cache lines
+ * @param line[out] line to be cleaned
+ * @retval 0 When caller return zero it means take this cache line to clean
+ * @retval Non-zero Means skip this cache line and do not clean it
+ */
+typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
+ void *getter_context, uint32_t item, ocf_cache_line_t *line);
+
+/**
+ * @brief Cleaning attributes for clean request
+ */
+struct ocf_cleaner_attribs {
+ uint8_t cache_line_lock : 1; /*!< Clean under cache line lock */
+
+ uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
+
+ uint32_t count; /*!< max number of cache lines to be cleaned */
+
+ void *cmpl_context; /*!< Completion context of cleaning requester */
+ void (*cmpl_fn)(void *priv, int error); /*!< Completion function of requester */
+
+ ocf_cleaner_get_item getter;
+ /*!< Getter for collecting cache lines which will be cleaned */
+ void *getter_context;
+ /*!< Context for getting cache lines */
+ uint32_t getter_item;
+ /*!< Additional variable that can be used by cleaner caller
+ * to iterate over items
+ */
+
+ ocf_queue_t io_queue;
+};
+
+/**
+ * @brief Flush table entry structure
+ */
+struct flush_data {
+ uint64_t core_line;
+ uint32_t cache_line;
+ ocf_core_id_t core_id;
+};
+
+typedef void (*ocf_flush_containter_coplete_t)(void *ctx);
+
+/**
+ * @brief Flush table container
+ */
+struct flush_container {
+ ocf_core_id_t core_id;
+ struct flush_data *flush_data;
+ uint32_t count;
+ uint32_t iter;
+
+ struct ocf_cleaner_attribs attribs;
+ ocf_cache_t cache;
+
+ struct ocf_request *req;
+
+ uint64_t flush_portion;
+ uint64_t ticks1;
+ uint64_t ticks2;
+
+ ocf_flush_containter_coplete_t end;
+ struct ocf_mngt_cache_flush_context *context;
+};
+
+typedef void (*ocf_cleaner_refcnt_zero_cb_t)(void *priv);
+
+/**
+ * @brief Context for ocf_cleaner_refcnt_register_zero_cb
+ */
+struct ocf_cleaner_wait_context
+{
+ env_atomic waiting;
+ ocf_cleaner_refcnt_zero_cb_t cb;
+ void *priv;
+};
+
+/**
+ * @brief Run cleaning procedure
+ *
+ * @param cache - Cache instance
+ * @param attribs - Cleaning attributes
+ */
+void ocf_cleaner_fire(struct ocf_cache *cache,
+ const struct ocf_cleaner_attribs *attribs);
+
+/**
+ * @brief Perform cleaning procedure for specified flush data. Only dirty
+ * cache lines will be cleaned.
+ *
+ * @param cache - Cache instance
+ * @param flush - flush data to be cleaned
+ * @param count - Count of cache lines to be cleaned
+ * @param attribs - Cleaning attributes
+ * @return - Cleaning result. 0 - no errors, non zero errors occurred
+ */
+int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
+ struct flush_data *flush, uint32_t count,
+ struct ocf_cleaner_attribs *attribs);
+
+/**
+ * @brief Sort flush data by core sector
+ *
+ * @param tbl Flush data to sort
+ * @param num Number of entries in tbl
+ */
+void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num);
+
+/**
+ * @brief Sort flush data in all flush containters
+ *
+ * @param tbl Flush containers to sort
+ * @param num Number of entries in fctbl
+ */
+void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
+ uint32_t num);
+
+/**
+ * @brief Disable incrementing of cleaner reference counters
+ *
+ * @param cache - Cache instance
+ */
+void ocf_cleaner_refcnt_freeze(ocf_cache_t cache);
+
+/**
+ * @brief Enable incrementing of cleaner reference counters
+ *
+ * @param cache - Cache instance
+ */
+void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache);
+
+/**
+ * @brief Register callback for cleaner reference counters dropping to 0
+ *
+ * @param cache - Cache instance
+ * @param ctx - Routine private context, allocated by caller to avoid ENOMEM
+ * @param cb - Caller callback
+ * @param priv - Caller callback private data
+ */
+void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
+ struct ocf_cleaner_wait_context *ctx,
+ ocf_cleaner_refcnt_zero_cb_t cb, void *priv);
+
+#endif /* UTILS_CLEANER_H_ */
diff --git a/src/spdk/ocf/src/utils/utils_io.c b/src/spdk/ocf/src/utils/utils_io.c
new file mode 100644
index 000000000..9c86608d8
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_io.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_volume_priv.h"
+#include "../ocf_request.h"
+#include "utils_io.h"
+#include "utils_cache_line.h"
+
+struct ocf_submit_volume_context {
+ env_atomic req_remaining;
+ int error;
+ ocf_submit_end_t cmpl;
+ void *priv;
+};
+
+static void _ocf_volume_flush_end(struct ocf_io *io, int error)
+{
+ ocf_submit_end_t cmpl = io->priv1;
+
+ cmpl(io->priv2, error);
+ ocf_io_put(io);
+}
+
+void ocf_submit_volume_flush(ocf_volume_t volume,
+ ocf_submit_end_t cmpl, void *priv)
+{
+ struct ocf_io *io;
+
+ io = ocf_volume_new_io(volume, NULL, 0, 0, OCF_WRITE, 0, 0);
+ if (!io)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ ocf_io_set_cmpl(io, cmpl, priv, _ocf_volume_flush_end);
+
+ ocf_volume_submit_flush(io);
+}
+
+static void ocf_submit_volume_end(struct ocf_io *io, int error)
+{
+ struct ocf_submit_volume_context *context = io->priv1;
+
+ if (error)
+ context->error = error;
+
+ ocf_io_put(io);
+
+ if (env_atomic_dec_return(&context->req_remaining))
+ return;
+
+ context->cmpl(context->priv, context->error);
+ env_vfree(context);
+}
+
+void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
+ uint64_t length, ocf_submit_end_t cmpl, void *priv)
+{
+ struct ocf_submit_volume_context *context;
+ uint64_t bytes;
+ uint64_t sector_mask = (1 << ENV_SECTOR_SHIFT) - 1;
+ uint64_t max_length = (uint32_t)~0 & ~sector_mask;
+ struct ocf_io *io;
+
+ context = env_vzalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ env_atomic_set(&context->req_remaining, 1);
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ while (length) {
+ bytes = OCF_MIN(length, max_length);
+
+ io = ocf_volume_new_io(volume, NULL, addr, bytes,
+ OCF_WRITE, 0, 0);
+ if (!io) {
+ context->error = -OCF_ERR_NO_MEM;
+ break;
+ }
+
+ env_atomic_inc(&context->req_remaining);
+
+ ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
+ ocf_volume_submit_discard(io);
+
+ addr += bytes;
+ length -= bytes;
+ }
+
+ if (env_atomic_dec_return(&context->req_remaining))
+ return;
+
+ cmpl(priv, context->error);
+ env_vfree(context);
+}
+
+void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
+ uint64_t length, ocf_submit_end_t cmpl, void *priv)
+{
+ struct ocf_submit_volume_context *context;
+ uint32_t bytes;
+ uint32_t max_length = ~((uint32_t)PAGE_SIZE - 1);
+ struct ocf_io *io;
+
+ context = env_vzalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ env_atomic_set(&context->req_remaining, 1);
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ while (length) {
+ bytes = OCF_MIN(length, max_length);
+
+ io = ocf_volume_new_io(volume, NULL, addr, bytes,
+ OCF_WRITE, 0, 0);
+ if (!io) {
+ context->error = -OCF_ERR_NO_MEM;
+ break;
+ }
+
+ env_atomic_inc(&context->req_remaining);
+
+ ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
+ ocf_volume_submit_write_zeroes(io);
+
+ addr += bytes;
+ length -= bytes;
+ }
+
+ if (env_atomic_dec_return(&context->req_remaining))
+ return;
+
+ cmpl(priv, context->error);
+ env_vfree(context);
+}
+
+struct ocf_submit_cache_page_context {
+ ocf_cache_t cache;
+ void *buffer;
+ ocf_submit_end_t cmpl;
+ void *priv;
+};
+
+static void ocf_submit_cache_page_end(struct ocf_io *io, int error)
+{
+ struct ocf_submit_cache_page_context *context = io->priv1;
+ ctx_data_t *data = ocf_io_get_data(io);
+
+ if (io->dir == OCF_READ) {
+ ctx_data_rd_check(context->cache->owner, context->buffer,
+ data, PAGE_SIZE);
+ }
+
+ context->cmpl(context->priv, error);
+ ctx_data_free(context->cache->owner, data);
+ env_vfree(context);
+ ocf_io_put(io);
+}
+
+void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
+ void *buffer, ocf_submit_end_t cmpl, void *priv)
+{
+ struct ocf_submit_cache_page_context *context;
+ ctx_data_t *data;
+ struct ocf_io *io;
+ int result = 0;
+
+ context = env_vmalloc(sizeof(*context));
+ if (!context)
+ OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
+
+ context->cache = cache;
+ context->buffer = buffer;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ io = ocf_new_cache_io(cache, NULL, addr, PAGE_SIZE, dir, 0, 0);
+ if (!io) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_io;
+ }
+
+ data = ctx_data_alloc(cache->owner, 1);
+ if (!data) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_data;
+ }
+
+ if (dir == OCF_WRITE)
+ ctx_data_wr_check(cache->owner, data, buffer, PAGE_SIZE);
+
+ result = ocf_io_set_data(io, data, 0);
+ if (result)
+ goto err_set_data;
+
+ ocf_io_set_cmpl(io, context, NULL, ocf_submit_cache_page_end);
+
+ ocf_volume_submit_io(io);
+ return;
+
+err_set_data:
+ ctx_data_free(cache->owner, data);
+err_data:
+ ocf_io_put(io);
+err_io:
+ env_vfree(context);
+ cmpl(priv, result);
+}
+
+static void ocf_submit_volume_req_cmpl(struct ocf_io *io, int error)
+{
+ struct ocf_request *req = io->priv1;
+ ocf_req_end_t callback = io->priv2;
+
+ callback(req, error);
+
+ ocf_io_put(io);
+}
+
+void ocf_submit_cache_reqs(struct ocf_cache *cache,
+ struct ocf_request *req, int dir, uint64_t offset,
+ uint64_t size, unsigned int reqs, ocf_req_end_t callback)
+{
+ uint64_t flags = req->ioi.io.flags;
+ uint32_t io_class = req->ioi.io.io_class;
+ uint64_t addr, bytes, total_bytes = 0;
+ struct ocf_io *io;
+ int err;
+ uint32_t i;
+ uint32_t first_cl = ocf_bytes_2_lines(cache, req->byte_position +
+ offset) - ocf_bytes_2_lines(cache, req->byte_position);
+
+ ENV_BUG_ON(req->byte_length < offset + size);
+ ENV_BUG_ON(first_cl + reqs > req->core_line_count);
+
+ if (reqs == 1) {
+ addr = ocf_metadata_map_lg2phy(cache,
+ req->map[first_cl].coll_idx);
+ addr *= ocf_line_size(cache);
+ addr += cache->device->metadata_offset;
+ addr += ((req->byte_position + offset) % ocf_line_size(cache));
+ bytes = size;
+
+ io = ocf_new_cache_io(cache, req->io_queue,
+ addr, bytes, dir, io_class, flags);
+ if (!io) {
+ callback(req, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
+
+ err = ocf_io_set_data(io, req->data, offset);
+ if (err) {
+ ocf_io_put(io);
+ callback(req, err);
+ return;
+ }
+
+ ocf_core_stats_cache_block_update(req->core, io_class,
+ dir, bytes);
+
+ ocf_volume_submit_io(io);
+ return;
+ }
+
+ /* Issue requests to cache. */
+ for (i = 0; i < reqs; i++) {
+ addr = ocf_metadata_map_lg2phy(cache,
+ req->map[first_cl + i].coll_idx);
+ addr *= ocf_line_size(cache);
+ addr += cache->device->metadata_offset;
+ bytes = ocf_line_size(cache);
+
+ if (i == 0) {
+ uint64_t seek = ((req->byte_position + offset) %
+ ocf_line_size(cache));
+
+ addr += seek;
+ bytes -= seek;
+ } else if (i == (reqs - 1)) {
+ uint64_t skip = (ocf_line_size(cache) -
+ ((req->byte_position + offset + size) %
+ ocf_line_size(cache))) % ocf_line_size(cache);
+
+ bytes -= skip;
+ }
+
+ bytes = OCF_MIN(bytes, size - total_bytes);
+ ENV_BUG_ON(bytes == 0);
+
+ io = ocf_new_cache_io(cache, req->io_queue,
+ addr, bytes, dir, io_class, flags);
+ if (!io) {
+ /* Finish all IOs which left with ERROR */
+ for (; i < reqs; i++)
+ callback(req, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
+
+ err = ocf_io_set_data(io, req->data, offset + total_bytes);
+ if (err) {
+ ocf_io_put(io);
+ /* Finish all IOs which left with ERROR */
+ for (; i < reqs; i++)
+ callback(req, err);
+ return;
+ }
+ ocf_core_stats_cache_block_update(req->core, io_class,
+ dir, bytes);
+ ocf_volume_submit_io(io);
+ total_bytes += bytes;
+ }
+
+ ENV_BUG_ON(total_bytes != size);
+}
+
+void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
+ ocf_req_end_t callback)
+{
+ uint64_t flags = req->ioi.io.flags;
+ uint32_t io_class = req->ioi.io.io_class;
+ int dir = req->rw;
+ struct ocf_io *io;
+ int err;
+
+ ocf_core_stats_core_block_update(req->core, io_class, dir,
+ req->byte_length);
+
+ io = ocf_volume_new_io(volume, req->io_queue, req->byte_position,
+ req->byte_length, dir, io_class, flags);
+ if (!io) {
+ callback(req, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
+ err = ocf_io_set_data(io, req->data, 0);
+ if (err) {
+ ocf_io_put(io);
+ callback(req, err);
+ return;
+ }
+ ocf_volume_submit_io(io);
+}
diff --git a/src/spdk/ocf/src/utils/utils_io.h b/src/spdk/ocf/src/utils/utils_io.h
new file mode 100644
index 000000000..418491c1c
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_io.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef UTILS_IO_H_
+#define UTILS_IO_H_
+
+#include "../ocf_request.h"
+
+/**
+ * Checks if 2 IOs are overlapping.
+ * @param start1 start of first range (inclusive)
+ * @param end1 end of first range (exclusive)
+ * @param start2 start of second range (inclusive)
+ * @param end2 end of second range (exclusive)
+ * @return 0 in case overlap is not detected, otherwise 1
+ */
+static inline int ocf_io_range_overlaps(uint32_t start1, uint32_t end1,
+ uint32_t start2, uint32_t end2)
+{
+ if (start2 <= start1 && end2 >= start1)
+ return 1;
+
+ if (start2 >= start1 && end1 >= start2)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Checks if 2 IOs are overlapping.
+ * @param start1 start of first range (inclusive)
+ * @param count1 no of bytes, cachelines (etc) for first range
+ * @param start2 start of second range (inclusive)
+ * @param count2 no of bytes, cachelines (etc) for second range
+ * @return 0 in case overlap is not detected, otherwise 1
+ */
+static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1,
+ uint32_t start2, uint32_t count2)
+{
+ return ocf_io_range_overlaps(start1, start1 + count1 - 1, start2,
+ start2 + count2 - 1);
+}
+
+typedef void (*ocf_submit_end_t)(void *priv, int error);
+
+void ocf_submit_volume_flush(ocf_volume_t volume,
+ ocf_submit_end_t cmpl, void *priv);
+
+void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
+ uint64_t length, ocf_submit_end_t cmpl, void *priv);
+
+void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
+ uint64_t length, ocf_submit_end_t cmpl, void *priv);
+
+void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
+ void *buffer, ocf_submit_end_t cmpl, void *priv);
+
+void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
+ ocf_req_end_t callback);
+
+void ocf_submit_cache_reqs(struct ocf_cache *cache,
+ struct ocf_request *req, int dir, uint64_t offset,
+ uint64_t size, unsigned int reqs, ocf_req_end_t callback);
+
+static inline struct ocf_io *ocf_new_cache_io(ocf_cache_t cache,
+ ocf_queue_t queue, uint64_t addr, uint32_t bytes,
+ uint32_t dir, uint32_t io_class, uint64_t flags)
+
+{
+ return ocf_volume_new_io(ocf_cache_get_volume(cache), queue,
+ addr, bytes, dir, io_class, flags);
+}
+
+static inline struct ocf_io *ocf_new_core_io(ocf_core_t core,
+ ocf_queue_t queue, uint64_t addr, uint32_t bytes,
+ uint32_t dir, uint32_t io_class, uint64_t flags)
+{
+ return ocf_volume_new_io(ocf_core_get_volume(core), queue,
+ addr, bytes, dir, io_class, flags);
+}
+
+#endif /* UTILS_IO_H_ */
diff --git a/src/spdk/ocf/src/utils/utils_io_allocator.h b/src/spdk/ocf/src/utils/utils_io_allocator.h
new file mode 100644
index 000000000..2a7bae200
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_io_allocator.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __UTILS_IO_ALLOCATOR_H__
+#define __UTILS_IO_ALLOCATOR_H__
+
+#include "ocf/ocf_types.h"
+
+typedef struct ocf_io_allocator *ocf_io_allocator_t;
+
+struct ocf_io_allocator_ops {
+ int (*allocator_init)(ocf_io_allocator_t allocator,
+ uint32_t priv_size, const char *name);
+ void (*allocator_deinit)(ocf_io_allocator_t allocator);
+ void *(*allocator_new)(ocf_io_allocator_t allocator,
+ ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir);
+ void (*allocator_del)(ocf_io_allocator_t allocator, void *obj);
+};
+
+struct ocf_io_allocator_type {
+ struct ocf_io_allocator_ops ops;
+};
+
+typedef const struct ocf_io_allocator_type *ocf_io_allocator_type_t;
+
+struct ocf_io_allocator {
+ const struct ocf_io_allocator_type *type;
+ void *priv;
+};
+
+static inline void *ocf_io_allocator_new(ocf_io_allocator_t allocator,
+ ocf_volume_t volume, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir)
+{
+ return allocator->type->ops.allocator_new(allocator, volume, queue,
+ addr, bytes, dir);
+}
+
+static inline void ocf_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
+{
+ allocator->type->ops.allocator_del(allocator, obj);
+}
+
+static inline int ocf_io_allocator_init(ocf_io_allocator_t allocator,
+ ocf_io_allocator_type_t type, uint32_t size, const char *name)
+
+{
+ allocator->type = type;
+ return allocator->type->ops.allocator_init(allocator, size, name);
+}
+
+static inline void ocf_io_allocator_deinit(ocf_io_allocator_t allocator)
+{
+ allocator->type->ops.allocator_deinit(allocator);
+}
+
+ocf_io_allocator_type_t ocf_io_allocator_get_type_default(void);
+
+#endif /* __UTILS_IO_ALLOCATOR__ */
diff --git a/src/spdk/ocf/src/utils/utils_list.c b/src/spdk/ocf/src/utils/utils_list.c
new file mode 100644
index 000000000..a10337f4a
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_list.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "utils_list.h"
+
+void ocf_lst_sort(struct ocf_lst *lst)
+{
+ ocf_cache_line_t iter_idx;
+ ocf_cache_line_t next_idx;
+ struct ocf_lst_entry *iter;
+
+ if (!lst->cmp) {
+ /* No comparator, no needed to sort */
+ return;
+ }
+
+ if (ocf_lst_empty(lst)) {
+ /* List is empty nothing to do */
+ return;
+ }
+
+ /* Get iterator - first element on the list, and one after */
+ iter_idx = lst->head->next;
+ iter = lst->getter(lst->cache, iter_idx);
+ next_idx = iter->next;
+ lst->getter(lst->cache, iter->next);
+
+ /* Initialize list to initial empty state, it will be empty */
+ lst->head->next = lst->invalid;
+ lst->head->prev = lst->invalid;
+
+ while (iter_idx != lst->invalid) {
+ ocf_lst_init_entry(lst, iter);
+
+ if (ocf_lst_empty(lst)) {
+ /* Put first at the the list */
+ ocf_lst_add(lst, iter_idx);
+ } else {
+ /* search for place where put element at the list */
+ struct ocf_lst_entry *pos;
+ ocf_cache_line_t pos_idx;
+
+ for_each_lst(lst, pos, pos_idx)
+ if (lst->cmp(lst->cache, pos, iter) > 0)
+ break;
+
+ if (lst->invalid == pos_idx) {
+ /* Put at the end of list */
+ ocf_lst_add_tail(lst, iter_idx);
+ } else {
+ /* Position is known, put it before */
+ ocf_lst_add_before(lst, pos_idx, iter_idx);
+ }
+ }
+
+ /* Switch to next */
+ iter_idx = next_idx;
+ iter = lst->getter(lst->cache, iter_idx);
+ next_idx = iter->next;
+ }
+}
diff --git a/src/spdk/ocf/src/utils/utils_list.h b/src/spdk/ocf/src/utils/utils_list.h
new file mode 100644
index 000000000..50115477a
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_list.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __UTILS_LIST_H__
+#define __UTILS_LIST_H__
+
+#include "ocf_env.h"
+#include "../ocf_ctx_priv.h"
+#include "ocf/ocf_cache.h"
+
+#define OCF_LST_DBG 1
+
+#if 1 == OCF_LST_DBG
+#define OCF_LST_DBG_ON(lst, cond) ({ \
+ if (cond) { \
+ ocf_log(ocf_cache_get_ctx(lst->cache), log_crit, \
+ "OCF list critical problem (%s:%u)\n", \
+ __func__, __LINE__); \
+ ocf_log_stack_trace(ocf_cache_get_ctx(lst->cache)); \
+ } \
+})
+#else
+#define OCF_LST_DBG_ON(lst, cond)
+#endif
+
+#define OCF_LST_ENTRY_OUT(lst) ((lst)->invalid + 1)
+
+struct ocf_lst_entry {
+ ocf_cache_line_t next;
+ ocf_cache_line_t prev;
+};
+
+typedef struct ocf_lst_entry *(*ocf_mlst_getter)(
+ struct ocf_cache *cache, ocf_cache_line_t idx);
+
+typedef int (*ocf_mlst_cmp)(struct ocf_cache *cache,
+ struct ocf_lst_entry *e1, struct ocf_lst_entry *e2);
+
+struct ocf_lst {
+ struct ocf_lst_entry *head;
+ ocf_cache_line_t invalid;
+ ocf_mlst_getter getter;
+ ocf_mlst_cmp cmp;
+ struct ocf_cache *cache;
+
+ struct {
+ uint32_t active : 1;
+ } flags;
+};
+
+static inline void ocf_lst_init_entry(struct ocf_lst *lst,
+ struct ocf_lst_entry *entry)
+{
+ entry->next = entry->prev = OCF_LST_ENTRY_OUT(lst);
+}
+
+static inline bool ocf_lst_is_entry(struct ocf_lst *lst,
+ struct ocf_lst_entry *entry)
+{
+ if (entry->next == OCF_LST_ENTRY_OUT(lst) &&
+ entry->prev == OCF_LST_ENTRY_OUT(lst))
+ return false;
+
+ if (entry->next < OCF_LST_ENTRY_OUT(lst) &&
+ entry->prev < OCF_LST_ENTRY_OUT(lst))
+ return true;
+
+ ENV_BUG();
+ return false;
+}
+
+static inline void ocf_lst_init(struct ocf_cache *cache,
+ struct ocf_lst *lst, ocf_cache_line_t invalid,
+ ocf_mlst_getter getter, ocf_mlst_cmp cmp)
+{
+ ocf_cache_line_t idx;
+
+ ENV_BUG_ON(env_memset(lst, sizeof(*lst), 0));
+
+ lst->head = getter(cache, invalid);
+ lst->head->next = invalid;
+ lst->head->prev = invalid;
+ lst->invalid = invalid;
+ lst->getter = getter;
+ lst->cmp = cmp;
+ lst->cache = cache;
+
+ for (idx = 0; idx < lst->invalid; idx++) {
+ struct ocf_lst_entry *entry = getter(cache, idx);
+
+ ocf_lst_init_entry(lst, entry);
+ }
+}
+
+static inline void ocf_lst_add_after(struct ocf_lst *lst,
+ ocf_cache_line_t at, ocf_cache_line_t idx)
+{
+ struct ocf_lst_entry *after = lst->getter(lst->cache, at);
+ struct ocf_lst_entry *next = lst->getter(lst->cache, after->next);
+ struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
+
+ OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, after));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
+
+ this->next = after->next;
+ this->prev = at;
+ after->next = idx;
+ next->prev = idx;
+}
+
+static inline void ocf_lst_add_before(struct ocf_lst *lst,
+ ocf_cache_line_t at, ocf_cache_line_t idx)
+{
+ struct ocf_lst_entry *before = lst->getter(lst->cache, at);
+ struct ocf_lst_entry *prev = lst->getter(lst->cache, before->prev);
+ struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
+
+ OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, before));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
+
+ this->next = at;
+ this->prev = before->prev;
+ before->prev = idx;
+ prev->next = idx;
+}
+
+static inline void ocf_lst_add(struct ocf_lst *lst, ocf_cache_line_t idx)
+{
+ struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
+ struct ocf_lst_entry *next = lst->getter(lst->cache, lst->head->next);
+
+ OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
+
+ this->next = lst->head->next;
+ next->prev = idx;
+ lst->head->next = idx;
+ this->prev = lst->invalid;
+}
+
+static inline void ocf_lst_add_tail(struct ocf_lst *lst, ocf_cache_line_t idx)
+{
+ struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
+ struct ocf_lst_entry *prev = lst->getter(lst->cache, lst->head->prev);
+
+ OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
+
+ this->next = lst->invalid;
+ this->prev = lst->head->prev;
+ prev->next = idx;
+ lst->head->prev = idx;
+}
+
+static inline void ocf_lst_del(struct ocf_lst *lst, ocf_cache_line_t idx)
+{
+ struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
+ struct ocf_lst_entry *next = lst->getter(lst->cache, this->next);
+ struct ocf_lst_entry *prev = lst->getter(lst->cache, this->prev);
+
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, this));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
+ OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
+
+ prev->next = this->next;
+ next->prev = this->prev;
+
+ ocf_lst_init_entry(lst, this);
+}
+
+static inline ocf_cache_line_t ocf_lst_head(struct ocf_lst *lst)
+{
+ return lst->head->next;
+}
+
+static inline ocf_cache_line_t ocf_lst_tail(struct ocf_lst *lst)
+{
+ return lst->head->prev;
+}
+
+static inline bool ocf_lst_empty(struct ocf_lst *lst)
+{
+ if (lst->head->next == lst->invalid)
+ return true;
+ else
+ return false;
+}
+
+void ocf_lst_sort(struct ocf_lst *lst);
+
+#define for_each_lst(lst, entry, id) \
+for (id = (lst)->head->next, entry = (lst)->getter((lst)->cache, id); \
+ entry != (lst)->head; id = entry->next, \
+ entry = (lst)->getter((lst)->cache, id))
+
+#define for_each_lst_entry(lst, entry, id, type, member) \
+for (id = (lst)->head->next, \
+ entry = container_of((lst)->getter((lst)->cache, id), type, member); \
+ entry != container_of((lst)->head, type, member); \
+ id = entry->member.next, \
+ entry = container_of((lst)->getter((lst)->cache, id), type, member))
+
+#endif /* __UTILS_LIST_H__ */
diff --git a/src/spdk/ocf/src/utils/utils_part.c b/src/spdk/ocf/src/utils/utils_part.c
new file mode 100644
index 000000000..9bc0d53b4
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_part.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_request.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../eviction/ops.h"
+#include "utils_part.h"
+
+static struct ocf_lst_entry *ocf_part_lst_getter_valid(
+ struct ocf_cache *cache, ocf_cache_line_t idx)
+{
+ ENV_BUG_ON(idx > OCF_IO_CLASS_MAX);
+ return &cache->user_parts[idx].lst_valid;
+}
+
+
+static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
+ struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
+{
+ struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
+ lst_valid);
+ struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
+ lst_valid);
+ size_t p1_size = ocf_cache_is_device_attached(cache) ?
+ p1->runtime->curr_size : 0;
+ size_t p2_size = ocf_cache_is_device_attached(cache) ?
+ p2->runtime->curr_size : 0;
+
+ int v1 = p1->config->priority;
+ int v2 = p2->config->priority;
+
+ /*
+ * If partition is invalid the priority depends on current size:
+ * 1. Partition is empty - move to the end of list
+ * 2. Partition is not empty - move to the beginning of the list. This
+ * partition will be evicted first
+ */
+
+ if (p1->config->priority == OCF_IO_CLASS_PRIO_PINNED)
+ p1->config->flags.eviction = false;
+ else
+ p1->config->flags.eviction = true;
+
+ if (p2->config->priority == OCF_IO_CLASS_PRIO_PINNED)
+ p2->config->flags.eviction = false;
+ else
+ p2->config->flags.eviction = true;
+
+ if (!p1->config->flags.valid) {
+ if (p1_size) {
+ v1 = SHRT_MAX;
+ p1->config->flags.eviction = true;
+ } else {
+ v1 = SHRT_MIN;
+ p1->config->flags.eviction = false;
+ }
+ }
+
+ if (!p2->config->flags.valid) {
+ if (p2_size) {
+ v2 = SHRT_MAX;
+ p2->config->flags.eviction = true;
+ } else {
+ v2 = SHRT_MIN;
+ p2->config->flags.eviction = false;
+ }
+ }
+
+ if (v1 == v2) {
+ v1 = p1 - cache->user_parts;
+ v2 = p2 - cache->user_parts;
+ }
+
+ return v2 - v1;
+}
+
+void ocf_part_init(struct ocf_cache *cache)
+{
+ ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX,
+ ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid);
+}
+
+void ocf_part_move(struct ocf_request *req)
+{
+ struct ocf_cache *cache = req->cache;
+ struct ocf_map_info *entry;
+ ocf_cache_line_t line;
+ ocf_part_id_t id_old, id_new;
+ uint32_t i;
+ ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type;
+
+ ENV_BUG_ON(type >= ocf_cleaning_max);
+
+ entry = &req->map[0];
+ for (i = 0; i < req->core_line_count; i++, entry++) {
+ if (!entry->re_part) {
+ /* Changing partition not required */
+ continue;
+ }
+
+ if (entry->status != LOOKUP_HIT) {
+ /* No HIT */
+ continue;
+ }
+
+ line = entry->coll_idx;
+ id_old = ocf_metadata_get_partition_id(cache, line);
+ id_new = req->part_id;
+
+ ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
+ id_new >= OCF_IO_CLASS_MAX);
+
+ if (id_old == id_new) {
+ /* Partition of the request and cache line is the same,
+ * no need to change partition
+ */
+ continue;
+ }
+
+ /* Remove from old eviction */
+ ocf_eviction_purge_cache_line(cache, line);
+
+ if (metadata_test_dirty(cache, line)) {
+ /*
+ * Remove cline from cleaning - this if for ioclass
+ * oriented cleaning policy (e.g. ALRU).
+ * TODO: Consider adding update_cache_line() ops
+ * to cleaning policy to let policies handle this.
+ */
+ if (cleaning_policy_ops[type].purge_cache_block)
+ cleaning_policy_ops[type].
+ purge_cache_block(cache, line);
+ }
+
+ /* Let's change partition */
+ ocf_metadata_remove_from_partition(cache, id_old, line);
+ ocf_metadata_add_to_partition(cache, id_new, line);
+
+ /* Add to new eviction */
+ ocf_eviction_init_cache_line(cache, line, id_new);
+ ocf_eviction_set_hot_cache_line(cache, line);
+
+ /* Check if cache line is dirty. If yes then need to change
+ * cleaning policy and update partition dirty clines
+ * statistics.
+ */
+ if (metadata_test_dirty(cache, line)) {
+ /* Add cline back to cleaning policy */
+ if (cleaning_policy_ops[type].set_hot_cache_line)
+ cleaning_policy_ops[type].
+ set_hot_cache_line(cache, line);
+
+ env_atomic_inc(&req->core->runtime_meta->
+ part_counters[id_new].dirty_clines);
+ env_atomic_dec(&req->core->runtime_meta->
+ part_counters[id_old].dirty_clines);
+ }
+
+ env_atomic_inc(&req->core->runtime_meta->
+ part_counters[id_new].cached_clines);
+ env_atomic_dec(&req->core->runtime_meta->
+ part_counters[id_old].cached_clines);
+
+ /* DONE */
+ }
+}
+
+void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
+ bool valid)
+{
+ struct ocf_user_part *part = &cache->user_parts[id];
+
+ if (valid ^ part->config->flags.valid) {
+ if (valid) {
+ part->config->flags.valid = true;
+ cache->conf_meta->valid_parts_no++;
+ } else {
+ part->config->flags.valid = false;
+ cache->conf_meta->valid_parts_no--;
+ part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
+ part->config->min_size = 0;
+ part->config->max_size = PARTITION_SIZE_MAX;
+ ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name),
+ "Inactive", 9));
+ }
+ }
+}
diff --git a/src/spdk/ocf/src/utils/utils_part.h b/src/spdk/ocf/src/utils/utils_part.h
new file mode 100644
index 000000000..c37684c48
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_part.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __UTILS_PARTITION_H__
+#define __UTILS_PARTITION_H__
+
+#include "../ocf_request.h"
+#include "../engine/cache_engine.h"
+#include "../metadata/metadata_partition.h"
+
+void ocf_part_init(struct ocf_cache *cache);
+
+static inline bool ocf_part_is_valid(struct ocf_user_part *part)
+{
+ return !!part->config->flags.valid;
+}
+
+static inline void ocf_part_set_prio(struct ocf_cache *cache,
+ struct ocf_user_part *part, int16_t prio)
+{
+ if (part->config->priority != prio)
+ part->config->priority = prio;
+}
+
+static inline int16_t ocf_part_get_prio(struct ocf_cache *cache,
+ ocf_part_id_t part_id)
+{
+ if (part_id < OCF_IO_CLASS_MAX)
+ return cache->user_parts[part_id].config->priority;
+
+ return OCF_IO_CLASS_PRIO_LOWEST;
+}
+
+void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
+ bool valid);
+
+static inline bool ocf_part_is_added(struct ocf_user_part *part)
+{
+ return !!part->config->flags.added;
+}
+
+static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
+{
+ if (class < OCF_IO_CLASS_MAX)
+ if (cache->user_parts[class].config->flags.valid)
+ return class;
+
+ return PARTITION_DEFAULT;
+}
+
+void ocf_part_move(struct ocf_request *req);
+
+#define for_each_part(cache, part, id) \
+ for_each_lst_entry(&cache->lst_part, part, id, \
+ struct ocf_user_part, lst_valid)
+
+static inline void ocf_part_sort(struct ocf_cache *cache)
+{
+ ocf_lst_sort(&cache->lst_part);
+}
+
+static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache,
+ ocf_part_id_t part_id)
+{
+ if (part_id < OCF_IO_CLASS_MAX)
+ return cache->user_parts[part_id].config->cache_mode;
+ return ocf_cache_mode_none;
+}
+
+static inline bool ocf_part_is_prio_valid(int64_t prio)
+{
+ switch (prio) {
+ case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
+ case OCF_IO_CLASS_PRIO_PINNED:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/**
+ * routine checks for validity of a partition name.
+ *
+ * Following condition is checked:
+ * - string too long
+ * - string containing invalid characters (outside of low ascii)
+ * Following condition is NOT cheched:
+ * - empty string. (empty string is NOT a valid partition name, but
+ * this function returns true on empty string nevertheless).
+ *
+ * @return returns true if partition name is a valid name
+ */
+static inline bool ocf_part_is_name_valid(const char *name)
+{
+ uint32_t length = 0;
+
+ while (*name) {
+ if (*name < ' ' || *name > '~')
+ return false;
+
+ if (',' == *name || '"' == *name)
+ return false;
+
+ name++;
+ length++;
+
+ if (length >= OCF_IO_CLASS_NAME_MAX)
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* __UTILS_PARTITION_H__ */
diff --git a/src/spdk/ocf/src/utils/utils_pipeline.c b/src/spdk/ocf/src/utils/utils_pipeline.c
new file mode 100644
index 000000000..63514fff4
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_pipeline.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
+#include "../ocf_request.h"
+#include "utils_pipeline.h"
+
+struct ocf_pipeline {
+ struct ocf_pipeline_properties *properties;
+ struct ocf_request *req;
+ int next_step;
+ int next_arg;
+ bool finish;
+ int error;
+
+ void *priv;
+};
+
+static int _ocf_pipeline_run_step(struct ocf_request *req)
+{
+ ocf_pipeline_t pipeline = req->priv;
+ struct ocf_pipeline_step *step;
+ ocf_pipeline_arg_t arg;
+
+ if (pipeline->finish) {
+ pipeline->properties->finish(pipeline, pipeline->priv,
+ pipeline->error);
+ return 0;
+ }
+
+ while (true) {
+ step = &pipeline->properties->steps[pipeline->next_step];
+ switch (step->type) {
+ case ocf_pipeline_step_single:
+ pipeline->next_step++;
+ step->hndl(pipeline, pipeline->priv, &step->arg);
+ return 0;
+ case ocf_pipeline_step_foreach:
+ arg = &step->args[pipeline->next_arg++];
+ if (arg->type == ocf_pipeline_arg_terminator) {
+ pipeline->next_arg = 0;
+ pipeline->next_step++;
+ continue;
+ }
+ step->hndl(pipeline, pipeline->priv, arg);
+ return 0;
+ case ocf_pipeline_step_terminator:
+ pipeline->properties->finish(pipeline, pipeline->priv,
+ pipeline->error);
+ return 0;
+ default:
+ ENV_BUG();
+ }
+ }
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_pipeline = {
+ .read = _ocf_pipeline_run_step,
+ .write = _ocf_pipeline_run_step,
+};
+
+int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache,
+ struct ocf_pipeline_properties *properties)
+{
+ ocf_pipeline_t tmp_pipeline;
+ struct ocf_request *req;
+
+ tmp_pipeline = env_vzalloc(sizeof(*tmp_pipeline) +
+ properties->priv_size);
+ if (!tmp_pipeline)
+ return -OCF_ERR_NO_MEM;
+
+ if (properties->priv_size > 0) {
+ tmp_pipeline->priv = (void *)tmp_pipeline +
+ sizeof(*tmp_pipeline);
+ }
+
+ req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
+ if (!req) {
+ env_vfree(tmp_pipeline);
+ return -OCF_ERR_NO_MEM;
+ }
+
+ tmp_pipeline->properties = properties;
+ tmp_pipeline->req = req;
+ tmp_pipeline->next_step = 0;
+ tmp_pipeline->finish = false;
+ tmp_pipeline->error = 0;
+
+ req->info.internal = true;
+ req->io_if = &_io_if_pipeline;
+ req->priv = tmp_pipeline;
+
+ *pipeline = tmp_pipeline;
+
+ return 0;
+}
+
+void ocf_pipeline_destroy(ocf_pipeline_t pipeline)
+{
+ ocf_req_put(pipeline->req);
+ env_vfree(pipeline);
+}
+
+void ocf_pipeline_set_priv(ocf_pipeline_t pipeline, void *priv)
+{
+ pipeline->priv = priv;
+}
+
+void *ocf_pipeline_get_priv(ocf_pipeline_t pipeline)
+{
+ return pipeline->priv;
+}
+
+void ocf_pipeline_next(ocf_pipeline_t pipeline)
+{
+ ocf_engine_push_req_front(pipeline->req, true);
+}
+
+void ocf_pipeline_finish(ocf_pipeline_t pipeline, int error)
+{
+ pipeline->finish = true;
+ pipeline->error = error;
+ ocf_engine_push_req_front(pipeline->req, true);
+}
diff --git a/src/spdk/ocf/src/utils/utils_pipeline.h b/src/spdk/ocf/src/utils/utils_pipeline.h
new file mode 100644
index 000000000..2ada241e3
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_pipeline.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __UTILS_PIPELINE_H__
+#define __UTILS_PIPELINE_H__
+
+#include "../ocf_cache_priv.h"
+
+enum ocf_pipeline_step_type {
+ ocf_pipeline_step_single,
+ ocf_pipeline_step_foreach,
+ ocf_pipeline_step_terminator,
+};
+
+enum ocf_pipeline_arg_type {
+ ocf_pipeline_arg_none,
+ ocf_pipeline_arg_int,
+ ocf_pipeline_arg_ptr,
+ ocf_pipeline_arg_terminator,
+};
+
+struct ocf_pipeline_arg {
+ enum ocf_pipeline_arg_type type;
+ union {
+ int i;
+ void *p;
+ } val;
+};
+
+typedef struct ocf_pipeline_arg *ocf_pipeline_arg_t;
+
+#define OCF_PL_ARG_NONE() \
+ { .type = ocf_pipeline_arg_none, }
+
+#define OCF_PL_ARG_INT(_int) \
+ { .type = ocf_pipeline_arg_int, .val.i = _int }
+
+#define OCF_PL_ARG_PTR(_ptr) \
+ { .type = ocf_pipeline_arg_ptr, .val.p = _ptr }
+
+#define OCF_PL_ARG_TERMINATOR() \
+ { .type = ocf_pipeline_arg_terminator, }
+
+static inline int ocf_pipeline_arg_get_int(ocf_pipeline_arg_t arg)
+{
+ ENV_BUG_ON(arg->type != ocf_pipeline_arg_int);
+
+ return arg->val.i;
+}
+
+static inline void *ocf_pipeline_arg_get_ptr(ocf_pipeline_arg_t arg)
+{
+ ENV_BUG_ON(arg->type != ocf_pipeline_arg_ptr);
+
+ return arg->val.p;
+}
+
+typedef struct ocf_pipeline *ocf_pipeline_t;
+
+typedef void (*ocf_pipeline_step_hndl_t)(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg);
+
+typedef void (*ocf_pipeline_finish_t)(ocf_pipeline_t pipeline,
+ void *priv, int error);
+
+struct ocf_pipeline_step {
+ enum ocf_pipeline_step_type type;
+ ocf_pipeline_step_hndl_t hndl;
+ union {
+ struct ocf_pipeline_arg arg;
+ struct ocf_pipeline_arg *args;
+ };
+};
+
+#define OCF_PL_STEP(_hndl) \
+ { \
+ .type = ocf_pipeline_step_single, \
+ .hndl = _hndl, \
+ }
+
+#define OCF_PL_STEP_ARG_INT(_hndl, _int) \
+ { \
+ .type = ocf_pipeline_step_single, \
+ .hndl = _hndl, \
+ .arg = { \
+ .type = ocf_pipeline_arg_int, \
+ .val.i = _int, \
+ } \
+ }
+
+#define OCF_PL_STEP_ARG_PTR(_hndl, _ptr) \
+ { \
+ .type = ocf_pipeline_step_single, \
+ .hndl = _hndl, \
+ .arg = { \
+ .type = ocf_pipeline_arg_ptr, \
+ .val.p = _ptr, \
+ } \
+ }
+
+#define OCF_PL_STEP_FOREACH(_hndl, _args) \
+ { \
+ .type = ocf_pipeline_step_foreach, \
+ .hndl = _hndl, \
+ .args = _args, \
+ }
+
+#define OCF_PL_STEP_TERMINATOR() \
+ { \
+ .type = ocf_pipeline_step_terminator, \
+ }
+
+struct ocf_pipeline_properties {
+ uint32_t priv_size;
+ ocf_pipeline_finish_t finish;
+ struct ocf_pipeline_step steps[];
+};
+
+int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache,
+ struct ocf_pipeline_properties *properties);
+
+void ocf_pipeline_set_priv(ocf_pipeline_t pipeline, void *priv);
+
+void *ocf_pipeline_get_priv(ocf_pipeline_t pipeline);
+
+void ocf_pipeline_destroy(ocf_pipeline_t pipeline);
+
+void ocf_pipeline_next(ocf_pipeline_t pipeline);
+
+void ocf_pipeline_finish(ocf_pipeline_t pipeline, int error);
+
+#define OCF_PL_NEXT_RET(pipeline) ({ \
+ ocf_pipeline_next(pipeline); \
+ return; \
+})
+
+#define OCF_PL_FINISH_RET(pipeline, error) ({ \
+ ocf_pipeline_finish(pipeline, error); \
+ return; \
+})
+
+#define OCF_PL_NEXT_ON_SUCCESS_RET(pipeline, error) ({ \
+ if (error) \
+ ocf_pipeline_finish(pipeline, error); \
+ else \
+ ocf_pipeline_next(pipeline); \
+ return; \
+})
+
+
+#endif /* __UTILS_PIPELINE_H__ */
diff --git a/src/spdk/ocf/src/utils/utils_realloc.c b/src/spdk/ocf/src/utils/utils_realloc.c
new file mode 100644
index 000000000..7c805fb42
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_realloc.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+#include "ocf/ocf.h"
+#include "utils_realloc.h"
+#include "ocf_env.h"
+
+#define OCF_REALLOC_K_MAX (128 * KiB)
+
+static int _ocf_realloc_with_cp(void **mem, size_t size, size_t count,
+ size_t *limit, bool cp)
+{
+ size_t alloc_size = size * count;
+
+ ENV_BUG_ON(!mem);
+ ENV_BUG_ON(!limit);
+
+ if (size && count) {
+ /* Memory reallocation request */
+
+ if (alloc_size > *limit) {
+ /* The space is not enough, we need allocate new one */
+
+ void *new_mem;
+
+ if (alloc_size > OCF_REALLOC_K_MAX)
+ new_mem = env_vzalloc(alloc_size);
+ else
+ new_mem = env_zalloc(alloc_size, ENV_MEM_NOIO);
+
+ if (!new_mem) {
+ /* Allocation error */
+ return -1;
+ }
+
+ /* Free previous memory */
+ if (*mem) {
+ if (cp) {
+ /* copy previous content into new allocated
+ * memory
+ */
+ ENV_BUG_ON(env_memcpy(new_mem, alloc_size, *mem, *limit));
+
+ }
+
+ if (*limit > OCF_REALLOC_K_MAX)
+ env_vfree(*mem);
+ else
+ env_free(*mem);
+ }
+
+ /* Update limit */
+ *limit = alloc_size;
+
+ /* Update memory pointer */
+ *mem = new_mem;
+
+ return 0;
+ }
+
+ /*
+ * The memory space is enough, no action required.
+ * Space after allocation set to '0'
+ */
+ if (cp)
+ ENV_BUG_ON(env_memset(*mem + alloc_size, *limit - alloc_size, 0));
+
+ return 0;
+
+ }
+
+ if ((size == 0) && (count == 0)) {
+
+ if ((*mem) && (*limit)) {
+ /* Need to free memory */
+ if (*limit > OCF_REALLOC_K_MAX)
+ env_vfree(*mem);
+ else
+ env_free(*mem);
+
+ /* Update limit */
+ *((size_t *)limit) = 0;
+ *mem = NULL;
+
+ return 0;
+ }
+
+ if ((!*mem) && (*limit == 0)) {
+ /* No allocation before do nothing */
+ return 0;
+
+ }
+ }
+
+ ENV_BUG();
+ return -1;
+}
+
+int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit)
+{
+ return _ocf_realloc_with_cp(mem, size, count, limit, false);
+}
+
+int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit)
+{
+ return _ocf_realloc_with_cp(mem, size, count, limit, true);
+}
+
+void ocf_realloc_init(void **mem, size_t *limit)
+{
+ ENV_BUG_ON(!mem);
+ ENV_BUG_ON(!limit);
+
+ *mem = NULL;
+ *((size_t *)limit) = 0;
+}
diff --git a/src/spdk/ocf/src/utils/utils_realloc.h b/src/spdk/ocf/src/utils/utils_realloc.h
new file mode 100644
index 000000000..ee2fd6f51
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_realloc.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef UTILS_REALLOC_H_
+#define UTILS_REALLOC_H_
+
+/**
+ * @file utils_realloc.h
+ * @brief OCF realloc
+ */
+
+void ocf_realloc_init(void **mem, size_t *limit);
+
+int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit);
+
+int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit);
+
+/**
+ * @brief Initialize memory pointer and limit before reallocator usage
+ *
+ * @param[inout] mem - Pointer to the memory
+ * @param[inout] limit - Variable used internally by reallocator and indicates
+ * last allocation size
+ */
+#define OCF_REALLOC_INIT(mem, limit) \
+ ocf_realloc_init((void **)mem, limit)
+
+/**
+ * @brief De-Initialize memory pointer and limit, free memory
+ *
+ * @param[inout] mem - Pointer to the memory
+ * @param[inout] limit - Variable used internally by reallocator and indicates
+ * last allocation size
+ */
+#define OCF_REALLOC_DEINIT(mem, limit) \
+ ocf_realloc((void **)mem, 0, 0, limit)
+
+/**
+ * @brief Reallocate referenced memory if it is required.
+ *
+ * @param[inout] mem - Pointer to the memory
+ * @param[in] size - Size of particular element
+ * @param[in] count - Counts of element
+ * @param[inout] limit - Variable used internally by reallocator and indicates
+ * last allocation size
+ *
+ * @return 0 - Reallocation successful, Non zero - Realocation ERROR
+ */
+#define OCF_REALLOC(mem, size, count, limit) \
+ ocf_realloc((void **)mem, size, count, limit)
+
+/**
+ * @brief Reallocate referenced memory if it is required and copy old content
+ * into new memory space, new memory space is set to '0'
+ *
+ * @param[inout] mem - Pointer to the memory
+ * @param[in] size - Size of particular element
+ * @param[in] count - Counts of element
+ * @param[inout] limit - Variable used internally by reallocator and indicates
+ * last allocation size
+ *
+ * @return 0 - Reallocation successful, Non zero - Realocation ERROR
+ */
+#define OCF_REALLOC_CP(mem, size, count, limit) \
+ ocf_realloc_cp((void **)mem, size, count, limit)
+
+#endif /* UTILS_REALLOC_H_ */
diff --git a/src/spdk/ocf/src/utils/utils_refcnt.c b/src/spdk/ocf/src/utils/utils_refcnt.c
new file mode 100644
index 000000000..983a3f524
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_refcnt.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "../utils/utils_refcnt.h"
+
+void ocf_refcnt_init(struct ocf_refcnt *rc)
+{
+ env_atomic_set(&rc->counter, 0);
+ env_atomic_set(&rc->freeze, 0);
+ env_atomic_set(&rc->callback, 0);
+ rc->cb = NULL;
+}
+
+int ocf_refcnt_dec(struct ocf_refcnt *rc)
+{
+ int val = env_atomic_dec_return(&rc->counter);
+ ENV_BUG_ON(val < 0);
+
+ if (!val && env_atomic_cmpxchg(&rc->callback, 1, 0))
+ rc->cb(rc->priv);
+
+ return val;
+}
+
+int ocf_refcnt_inc(struct ocf_refcnt *rc)
+{
+ int val;
+
+ if (!env_atomic_read(&rc->freeze)) {
+ val = env_atomic_inc_return(&rc->counter);
+ if (!env_atomic_read(&rc->freeze))
+ return val;
+ else
+ ocf_refcnt_dec(rc);
+ }
+
+ return 0;
+}
+
+
+void ocf_refcnt_freeze(struct ocf_refcnt *rc)
+{
+ env_atomic_inc(&rc->freeze);
+}
+
+void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb,
+ void *priv)
+{
+ ENV_BUG_ON(!env_atomic_read(&rc->freeze));
+ ENV_BUG_ON(env_atomic_read(&rc->callback));
+
+ env_atomic_inc(&rc->counter);
+ rc->cb = cb;
+ rc->priv = priv;
+ env_atomic_set(&rc->callback, 1);
+ ocf_refcnt_dec(rc);
+}
+
+void ocf_refcnt_unfreeze(struct ocf_refcnt *rc)
+{
+ int val = env_atomic_dec_return(&rc->freeze);
+ ENV_BUG_ON(val < 0);
+}
+
+bool ocf_refcnt_frozen(struct ocf_refcnt *rc)
+{
+ return !!env_atomic_read(&rc->freeze);
+}
diff --git a/src/spdk/ocf/src/utils/utils_refcnt.h b/src/spdk/ocf/src/utils/utils_refcnt.h
new file mode 100644
index 000000000..556756031
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_refcnt.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_REFCNT_H__
+#define __OCF_REFCNT_H__
+
+#include "ocf_env.h"
+
+typedef void (*ocf_refcnt_cb_t)(void *priv);
+
+struct ocf_refcnt
+{
+ env_atomic counter;
+ env_atomic freeze;
+ env_atomic callback;
+ ocf_refcnt_cb_t cb;
+ void *priv;
+};
+
+/* Initialize reference counter */
+void ocf_refcnt_init(struct ocf_refcnt *rc);
+
+/* Try to increment counter. Returns counter value (> 0) if successfull, 0
+ * if counter is frozen */
+int ocf_refcnt_inc(struct ocf_refcnt *rc);
+
+/* Decrement reference counter and return post-decrement value */
+int ocf_refcnt_dec(struct ocf_refcnt *rc);
+
+/* Disallow incrementing of underlying counter - attempts to increment counter
+ * will be failing until ocf_refcnt_unfreeze is calleed.
+ * It's ok to call freeze multiple times, in which case counter is frozen
+ * until all freeze calls are offset by a corresponding unfreeze.*/
+void ocf_refcnt_freeze(struct ocf_refcnt *rc);
+
+/* Cancel the effect of single ocf_refcnt_freeze call */
+void ocf_refcnt_unfreeze(struct ocf_refcnt *rc);
+
+bool ocf_refcnt_frozen(struct ocf_refcnt *rc);
+
+/* Register callback to be called when reference counter drops to 0.
+ * Must be called after counter is frozen.
+ * Cannot be called until previously regsitered callback had fired. */
+void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb,
+ void *priv);
+
+#endif // __OCF_REFCNT_H__
diff --git a/src/spdk/ocf/src/utils/utils_stats.h b/src/spdk/ocf/src/utils/utils_stats.h
new file mode 100644
index 000000000..d0c9faccf
--- /dev/null
+++ b/src/spdk/ocf/src/utils/utils_stats.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright(c) 2012-2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef UTILS_STATS_H_
+#define UTILS_STATS_H_
+
+#define _ocf_stats_zero(stats) \
+ do { \
+ if (stats) { \
+ typeof(*stats) zero = { { 0 } }; \
+ *stats = zero; \
+ } \
+ } while (0)
+
+static inline uint64_t _fraction(uint64_t numerator, uint64_t denominator)
+{
+ uint64_t result;
+ if (denominator) {
+ result = 10000 * numerator / denominator;
+ } else {
+ result = 0;
+ }
+ return result;
+}
+
+static inline uint64_t _lines4k(uint64_t size,
+ ocf_cache_line_size_t cache_line_size)
+{
+ long unsigned int result;
+
+ result = size * (cache_line_size / 4096);
+
+ return result;
+}
+
+static inline uint64_t _bytes4k(uint64_t bytes)
+{
+ return (bytes + 4095UL) >> 12;
+}
+
+static inline void _set(struct ocf_stat *stat, uint64_t value,
+ uint64_t denominator)
+{
+ stat->value = value;
+ stat->fraction = _fraction(value, denominator);
+}
+
+#endif
diff --git a/src/spdk/ocf/tests/build/Makefile b/src/spdk/ocf/tests/build/Makefile
new file mode 100644
index 000000000..133bc1004
--- /dev/null
+++ b/src/spdk/ocf/tests/build/Makefile
@@ -0,0 +1,40 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+#
+# This Makefile performs basic build test of OCF with posix environment.
+# It doesn't generate any final executable, but just compiles every
+# single *.c file into *.o object, to check if compilation succeeds.
+#
+# It's intended to be used as part of CI process.
+#
+
+OCFDIR=../../
+SRCDIR=src/
+INCDIR=include/
+
+SRC=$(shell find ${SRCDIR} -name \*.c)
+OBJS = $(patsubst %.c, %.o, $(SRC))
+CFLAGS = -Wall -Werror -I${INCDIR} -I${SRCDIR}/ocf/env/
+
+all: sync
+ $(MAKE) build
+
+build: $(OBJS)
+
+sync:
+ @$(MAKE) -C ${OCFDIR} inc O=$(PWD)
+ @$(MAKE) -C ${OCFDIR} src O=$(PWD)
+ @$(MAKE) -C ${OCFDIR} env O=$(PWD) OCF_ENV=posix
+
+clean:
+ @rm -rf $(OBJS)
+
+distclean:
+ @rm -rf $(OBJS)
+ @rm -rf src/ocf
+ @rm -rf include/ocf
+
+.PHONY: all build sync clean distclean
diff --git a/src/spdk/ocf/tests/functional/.gitignore b/src/spdk/ocf/tests/functional/.gitignore
new file mode 100644
index 000000000..76988e6da
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/.gitignore
@@ -0,0 +1,9 @@
+__pycache__
+pyocf/__pycache__
+pyocf/libocf.so
+*.o
+pyocf/ocf/*
+*.pyc
+*.gcov
+*.gcda
+*.gcno
diff --git a/src/spdk/ocf/tests/functional/Makefile b/src/spdk/ocf/tests/functional/Makefile
new file mode 100755
index 000000000..c074d23de
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/Makefile
@@ -0,0 +1,52 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+PWD=$(shell pwd)
+OCFDIR=$(PWD)/../../
+ADAPTERDIR=$(PWD)/pyocf
+SRCDIR=$(ADAPTERDIR)/ocf/src
+INCDIR=$(ADAPTERDIR)/ocf/include
+WRAPDIR=$(ADAPTERDIR)/wrappers
+
+CC=gcc
+CFLAGS=-g -Wall -I$(INCDIR) -I$(SRCDIR)/ocf/env
+LDFLAGS=-pthread -lz
+
+SRC=$(shell find $(SRCDIR) $(WRAPDIR) -name \*.c)
+OBJS=$(patsubst %.c, %.o, $(SRC))
+OCFLIB=$(ADAPTERDIR)/libocf.so
+
+all: | sync config_random
+ $(MAKE) $(OCFLIB)
+
+$(OCFLIB): $(OBJS)
+ @echo "Building $@"
+ @$(CC) -coverage -shared -o $@ $(CFLAGS) $^ -fPIC $(LDFLAGS)
+
+%.o: %.c
+ @echo "Compiling $@"
+ @$(CC) -coverage -c $(CFLAGS) -o $@ -fPIC $^ $(LDFLAGS)
+
+sync:
+ @echo "Syncing OCF sources"
+ @mkdir -p $(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) inc O=$(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) src O=$(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) env O=$(ADAPTERDIR)/ocf OCF_ENV=posix
+
+config_random:
+ @python3 utils/configure_random.py
+
+clean:
+ @rm -rf $(OCFLIB) $(OBJS)
+ @echo " CLEAN "
+
+distclean: clean
+ @rm -rf $(OCFLIB) $(OBJS)
+ @rm -rf $(SRCDIR)/ocf
+ @rm -rf $(INCDIR)/ocf
+ @echo " DISTCLEAN "
+
+.PHONY: all clean sync config_random distclean
diff --git a/src/spdk/ocf/tests/functional/__init__.py b/src/spdk/ocf/tests/functional/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/__init__.py
diff --git a/src/spdk/ocf/tests/functional/config/random.cfg b/src/spdk/ocf/tests/functional/config/random.cfg
new file mode 100644
index 000000000..f7ab21256
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/config/random.cfg
@@ -0,0 +1,2 @@
+# This file content will be generated by utils/configure_random.py
+# triggered from the Makefile
diff --git a/src/spdk/ocf/tests/functional/pyocf/__init__.py b/src/spdk/ocf/tests/functional/pyocf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/ocf.py b/src/spdk/ocf/tests/functional/pyocf/ocf.py
new file mode 100644
index 000000000..b24d8265f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/ocf.py
@@ -0,0 +1,30 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+from ctypes import c_void_p, cdll
+import inspect
+import os
+
+
+class OcfLib:
+ __lib__ = None
+
+ @classmethod
+ def getInstance(cls):
+ if cls.__lib__ is None:
+ lib = cdll.LoadLibrary(
+ os.path.join(
+ os.path.dirname(inspect.getfile(inspect.currentframe())),
+ "libocf.so",
+ )
+ )
+ lib.ocf_volume_get_uuid.restype = c_void_p
+ lib.ocf_volume_get_uuid.argtypes = [c_void_p]
+
+ lib.ocf_core_get_front_volume.restype = c_void_p
+ lib.ocf_core_get_front_volume.argtypes = [c_void_p]
+
+ cls.__lib__ = lib
+
+ return cls.__lib__
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/__init__.py b/src/spdk/ocf/tests/functional/pyocf/types/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/cache.py b/src/spdk/ocf/tests/functional/pyocf/types/cache.py
new file mode 100644
index 000000000..1a74a05f3
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/cache.py
@@ -0,0 +1,593 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_int,
+ c_char,
+ c_char_p,
+ c_void_p,
+ c_bool,
+ c_uint8,
+ Structure,
+ byref,
+ cast,
+ create_string_buffer,
+)
+from enum import IntEnum
+from datetime import timedelta
+
+from ..ocf import OcfLib
+from .shared import (
+ Uuid,
+ OcfError,
+ CacheLineSize,
+ CacheLines,
+ OcfCompletion,
+ SeqCutOffPolicy,
+)
+from ..utils import Size, struct_to_dict
+from .core import Core
+from .queue import Queue
+from .stats.cache import CacheInfo
+from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
+
+
+class Backfill(Structure):
+ _fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
+
+
+class CacheConfig(Structure):
+ MAX_CACHE_NAME_SIZE = 32
+ _fields_ = [
+ ("_name", c_char * MAX_CACHE_NAME_SIZE),
+ ("_cache_mode", c_uint32),
+ ("_eviction_policy", c_uint32),
+ ("_promotion_policy", c_uint32),
+ ("_cache_line_size", c_uint64),
+ ("_metadata_layout", c_uint32),
+ ("_metadata_volatile", c_bool),
+ ("_backfill", Backfill),
+ ("_locked", c_bool),
+ ("_pt_unaligned_io", c_bool),
+ ("_use_submit_io_fast", c_bool),
+ ]
+
+
+class CacheDeviceConfig(Structure):
+ _fields_ = [
+ ("_uuid", Uuid),
+ ("_volume_type", c_uint8),
+ ("_cache_line_size", c_uint64),
+ ("_force", c_bool),
+ ("_min_free_ram", c_uint64),
+ ("_perform_test", c_bool),
+ ("_discard_on_start", c_bool),
+ ]
+
+
+class ConfValidValues:
+ promotion_nhit_insertion_threshold_range = range(2, 1000)
+ promotion_nhit_trigger_threshold_range = range(0, 100)
+
+
+class CacheMode(IntEnum):
+ WT = 0
+ WB = 1
+ WA = 2
+ PT = 3
+ WI = 4
+ WO = 5
+ DEFAULT = WT
+
+ def lazy_write(self):
+ return self.value in [CacheMode.WB, CacheMode.WO]
+
+ def write_insert(self):
+ return self.value not in [CacheMode.PT, CacheMode.WA, CacheMode.WI]
+
+ def read_insert(self):
+ return self.value not in [CacheMode.PT, CacheMode.WO]
+
+
+class EvictionPolicy(IntEnum):
+ LRU = 0
+ DEFAULT = LRU
+
+
+class PromotionPolicy(IntEnum):
+ ALWAYS = 0
+ NHIT = 1
+ DEFAULT = ALWAYS
+
+
+class NhitParams(IntEnum):
+ INSERTION_THRESHOLD = 0
+ TRIGGER_THRESHOLD = 1
+
+
+class CleaningPolicy(IntEnum):
+ NOP = 0
+ ALRU = 1
+ ACP = 2
+ DEFAULT = ALRU
+
+
+class AlruParams(IntEnum):
+ WAKE_UP_TIME = 0
+ STALE_BUFFER_TIME = 1
+ FLUSH_MAX_BUFFERS = 2
+ ACTIVITY_THRESHOLD = 3
+
+
+class AcpParams(IntEnum):
+ WAKE_UP_TIME = 0
+ FLUSH_MAX_BUFFERS = 1
+
+
+class MetadataLayout(IntEnum):
+ STRIPING = 0
+ SEQUENTIAL = 1
+ DEFAULT = STRIPING
+
+
+class Cache:
+ DEFAULT_BACKFILL_QUEUE_SIZE = 65536
+ DEFAULT_BACKFILL_UNBLOCK = 60000
+ DEFAULT_PT_UNALIGNED_IO = False
+ DEFAULT_USE_SUBMIT_FAST = False
+
+ def __init__(
+ self,
+ owner,
+ name: str = "cache",
+ cache_mode: CacheMode = CacheMode.DEFAULT,
+ eviction_policy: EvictionPolicy = EvictionPolicy.DEFAULT,
+ promotion_policy: PromotionPolicy = PromotionPolicy.DEFAULT,
+ cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
+ metadata_layout: MetadataLayout = MetadataLayout.DEFAULT,
+ metadata_volatile: bool = False,
+ max_queue_size: int = DEFAULT_BACKFILL_QUEUE_SIZE,
+ queue_unblock_size: int = DEFAULT_BACKFILL_UNBLOCK,
+ locked: bool = False,
+ pt_unaligned_io: bool = DEFAULT_PT_UNALIGNED_IO,
+ use_submit_fast: bool = DEFAULT_USE_SUBMIT_FAST,
+ ):
+ self.device = None
+ self.started = False
+ self.owner = owner
+ self.cache_line_size = cache_line_size
+
+ self.cfg = CacheConfig(
+ _name=name.encode("ascii"),
+ _cache_mode=cache_mode,
+ _eviction_policy=eviction_policy,
+ _promotion_policy=promotion_policy,
+ _cache_line_size=cache_line_size,
+ _metadata_layout=metadata_layout,
+ _metadata_volatile=metadata_volatile,
+ _backfill=Backfill(
+ _max_queue_size=max_queue_size, _queue_unblock_size=queue_unblock_size
+ ),
+ _locked=locked,
+ _pt_unaligned_io=pt_unaligned_io,
+ _use_submit_fast=use_submit_fast,
+ )
+ self.cache_handle = c_void_p()
+ self._as_parameter_ = self.cache_handle
+ self.io_queues = []
+ self.cores = []
+
+ def start_cache(self, default_io_queue: Queue = None, mngt_queue: Queue = None):
+ status = self.owner.lib.ocf_mngt_cache_start(
+ self.owner.ctx_handle, byref(self.cache_handle), byref(self.cfg)
+ )
+ if status:
+ raise OcfError("Creating cache instance failed", status)
+ self.owner.caches.append(self)
+
+ self.mngt_queue = mngt_queue or Queue(self, "mgmt-{}".format(self.get_name()))
+
+ if default_io_queue:
+ self.io_queues += [default_io_queue]
+ else:
+ self.io_queues += [Queue(self, "default-io-{}".format(self.get_name()))]
+
+ status = self.owner.lib.ocf_mngt_cache_set_mngt_queue(self, self.mngt_queue)
+ if status:
+ raise OcfError("Error setting management queue", status)
+
+ self.started = True
+
+ def change_cache_mode(self, cache_mode: CacheMode):
+ self.write_lock()
+ status = self.owner.lib.ocf_mngt_cache_set_mode(self.cache_handle, cache_mode)
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error changing cache mode", status)
+
+ def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
+ self.cache_handle, cleaning_policy
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error changing cleaning policy", status)
+
+ def set_cleaning_policy_param(
+ self, cleaning_policy: CleaningPolicy, param_id, param_value
+ ):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
+ self.cache_handle, cleaning_policy, param_id, param_value
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error setting cleaning policy param", status)
+
+ def set_promotion_policy(self, promotion_policy: PromotionPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_set_policy(
+ self.cache_handle, promotion_policy
+ )
+
+ self.write_unlock()
+ if status:
+ raise OcfError("Error setting promotion policy", status)
+
+ def get_promotion_policy_param(self, promotion_type, param_id):
+ self.read_lock()
+
+ param_value = c_uint64()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_get_param(
+ self.cache_handle, promotion_type, param_id, byref(param_value)
+ )
+
+ self.read_unlock()
+ if status:
+ raise OcfError("Error getting promotion policy parameter", status)
+
+ return param_value
+
+ def set_promotion_policy_param(self, promotion_type, param_id, param_value):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_set_param(
+ self.cache_handle, promotion_type, param_id, param_value
+ )
+
+ self.write_unlock()
+ if status:
+ raise OcfError("Error setting promotion policy parameter", status)
+
+ def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
+ self.cache_handle, policy
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error setting cache seq cut off policy", status)
+
+ def configure_device(
+ self, device, force=False, perform_test=True, cache_line_size=None
+ ):
+ self.device = device
+ self.device_name = device.uuid
+ self.dev_cfg = CacheDeviceConfig(
+ _uuid=Uuid(
+ _data=cast(
+ create_string_buffer(self.device_name.encode("ascii")), c_char_p
+ ),
+ _size=len(self.device_name) + 1,
+ ),
+ _volume_type=device.type_id,
+ _cache_line_size=cache_line_size
+ if cache_line_size
+ else self.cache_line_size,
+ _force=force,
+ _min_free_ram=0,
+ _perform_test=perform_test,
+ _discard_on_start=False,
+ )
+
+ def attach_device(
+ self, device, force=False, perform_test=False, cache_line_size=None
+ ):
+ self.configure_device(device, force, perform_test, cache_line_size)
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+
+ device.owner.lib.ocf_mngt_cache_attach(
+ self.cache_handle, byref(self.dev_cfg), c, None
+ )
+
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Attaching cache device failed", c.results["error"])
+
+ def load_cache(self, device):
+ self.configure_device(device)
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ device.owner.lib.ocf_mngt_cache_load(
+ self.cache_handle, byref(self.dev_cfg), c, None
+ )
+
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Loading cache device failed", c.results["error"])
+
+ @classmethod
+ def load_from_device(cls, device, name="cache"):
+ c = cls(name=name, owner=device.owner)
+
+ c.start_cache()
+ try:
+ c.load_cache(device)
+ except: # noqa E722
+ c.stop()
+ raise
+
+ return c
+
+ @classmethod
+ def start_on_device(cls, device, **kwargs):
+ c = cls(owner=device.owner, **kwargs)
+
+ c.start_cache()
+ try:
+ c.attach_device(device, force=True)
+ except: # noqa E722
+ c.stop()
+ raise
+
+ return c
+
+ def put(self):
+ self.owner.lib.ocf_mngt_cache_put(self.cache_handle)
+
+ def get(self):
+ status = self.owner.lib.ocf_mngt_cache_get(self.cache_handle)
+ if status:
+ raise OcfError("Couldn't get cache instance", status)
+
+ def read_lock(self):
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_read_lock(self.cache_handle, c, None)
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Couldn't lock cache instance", c.results["error"])
+
+ def write_lock(self):
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_lock(self.cache_handle, c, None)
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Couldn't lock cache instance", c.results["error"])
+
+ def read_unlock(self):
+ self.owner.lib.ocf_mngt_cache_read_unlock(self.cache_handle)
+
+ def write_unlock(self):
+ self.owner.lib.ocf_mngt_cache_unlock(self.cache_handle)
+
+ def add_core(self, core: Core):
+ self.write_lock()
+
+ c = OcfCompletion(
+ [
+ ("cache", c_void_p),
+ ("core", c_void_p),
+ ("priv", c_void_p),
+ ("error", c_int),
+ ]
+ )
+
+ self.owner.lib.ocf_mngt_cache_add_core(
+ self.cache_handle, byref(core.get_cfg()), c, None
+ )
+
+ c.wait()
+ if c.results["error"]:
+ self.write_unlock()
+ raise OcfError("Failed adding core", c.results["error"])
+
+ core.cache = self
+ core.handle = c.results["core"]
+ self.cores.append(core)
+
+ self.write_unlock()
+
+ def remove_core(self, core: Core):
+ self.write_lock()
+
+ c = OcfCompletion([("priv", c_void_p), ("error", c_int)])
+
+ self.owner.lib.ocf_mngt_cache_remove_core(core.handle, c, None)
+
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Failed removing core", c.results["error"])
+
+ self.cores.remove(core)
+
+ def get_stats(self):
+ cache_info = CacheInfo()
+ usage = UsageStats()
+ req = RequestsStats()
+ block = BlocksStats()
+ errors = ErrorsStats()
+
+ self.read_lock()
+
+ status = self.owner.lib.ocf_cache_get_info(self.cache_handle, byref(cache_info))
+ if status:
+ self.read_unlock()
+ raise OcfError("Failed getting cache info", status)
+
+ status = self.owner.lib.ocf_stats_collect_cache(
+ self.cache_handle, byref(usage), byref(req), byref(block), byref(errors)
+ )
+ if status:
+ self.read_unlock()
+ raise OcfError("Failed getting stats", status)
+
+ line_size = CacheLineSize(cache_info.cache_line_size)
+ cache_name = self.owner.lib.ocf_cache_get_name(self).decode("ascii")
+
+ self.read_unlock()
+ return {
+ "conf": {
+ "attached": cache_info.attached,
+ "volume_type": self.owner.volume_types[cache_info.volume_type],
+ "size": CacheLines(cache_info.size, line_size),
+ "inactive": {
+ "occupancy": CacheLines(
+ cache_info.inactive.occupancy.value, line_size
+ ),
+ "dirty": CacheLines(cache_info.inactive.dirty.value, line_size),
+ "clean": CacheLines(cache_info.inactive.clean.value, line_size),
+ },
+ "occupancy": CacheLines(cache_info.occupancy, line_size),
+ "dirty": CacheLines(cache_info.dirty, line_size),
+ "dirty_initial": CacheLines(cache_info.dirty_initial, line_size),
+ "dirty_for": timedelta(seconds=cache_info.dirty_for),
+ "cache_mode": CacheMode(cache_info.cache_mode),
+ "fallback_pt": {
+ "error_counter": cache_info.fallback_pt.error_counter,
+ "status": cache_info.fallback_pt.status,
+ },
+ "state": cache_info.state,
+ "eviction_policy": EvictionPolicy(cache_info.eviction_policy),
+ "cleaning_policy": CleaningPolicy(cache_info.cleaning_policy),
+ "promotion_policy": PromotionPolicy(cache_info.promotion_policy),
+ "cache_line_size": line_size,
+ "flushed": CacheLines(cache_info.flushed, line_size),
+ "core_count": cache_info.core_count,
+ "metadata_footprint": Size(cache_info.metadata_footprint),
+ "metadata_end_offset": Size(cache_info.metadata_end_offset),
+ "cache_name": cache_name,
+ },
+ "block": struct_to_dict(block),
+ "req": struct_to_dict(req),
+ "usage": struct_to_dict(usage),
+ "errors": struct_to_dict(errors),
+ }
+
+ def reset_stats(self):
+ self.owner.lib.ocf_core_stats_initialize_all(self.cache_handle)
+
+ def get_default_queue(self):
+ if not self.io_queues:
+ raise Exception("No queues added for cache")
+
+ return self.io_queues[0]
+
+ def save(self):
+ if not self.started:
+ raise Exception("Not started!")
+
+ self.get_and_write_lock()
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_save(self.cache_handle, c, None)
+
+ c.wait()
+ self.put_and_write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Failed saving cache", c.results["error"])
+
+ def stop(self):
+ if not self.started:
+ raise Exception("Already stopped!")
+
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+
+ self.owner.lib.ocf_mngt_cache_stop(self.cache_handle, c, None)
+
+ c.wait()
+ if c.results["error"]:
+ self.write_unlock()
+ raise OcfError("Failed stopping cache", c.results["error"])
+
+ self.mngt_queue.put()
+ del self.io_queues[:]
+ self.started = False
+
+ self.write_unlock()
+
+ self.owner.caches.remove(self)
+
+ def flush(self):
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, c, None)
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Couldn't flush cache", c.results["error"])
+
+ def get_name(self):
+ self.read_lock()
+
+ try:
+ return str(self.owner.lib.ocf_cache_get_name(self), encoding="ascii")
+ except: # noqa E722
+ raise OcfError("Couldn't get cache name")
+ finally:
+ self.read_unlock()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
+lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
+lib.ocf_cache_get_name.argtypes = [c_void_p]
+lib.ocf_cache_get_name.restype = c_char_p
+lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_cache_cleaning_set_policy.restype = c_int
+lib.ocf_mngt_core_set_seq_cutoff_policy_all.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_core_set_seq_cutoff_policy_all.restype = c_int
+lib.ocf_stats_collect_cache.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_void_p,
+ c_void_p,
+ c_void_p,
+]
+lib.ocf_stats_collect_cache.restype = c_int
+lib.ocf_cache_get_info.argtypes = [c_void_p, c_void_p]
+lib.ocf_cache_get_info.restype = c_int
+lib.ocf_mngt_cache_cleaning_set_param.argtypes = [
+ c_void_p,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+]
+lib.ocf_mngt_cache_cleaning_set_param.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py b/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py
new file mode 100644
index 000000000..df28290aa
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py
@@ -0,0 +1,43 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, CFUNCTYPE, Structure, c_int
+from .shared import SharedOcfObject
+
+
+class CleanerOps(Structure):
+ INIT = CFUNCTYPE(c_int, c_void_p)
+ KICK = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("init", INIT), ("kick", KICK), ("stop", STOP)]
+
+
+class Cleaner(SharedOcfObject):
+ _instances_ = {}
+ _fields_ = [("cleaner", c_void_p)]
+
+ def __init__(self):
+ self._as_parameter_ = self.cleaner
+ super().__init__()
+
+ @classmethod
+ def get_ops(cls):
+ return CleanerOps(init=cls._init, kick=cls._kick, stop=cls._stop)
+
+ @staticmethod
+ @CleanerOps.INIT
+ def _init(cleaner):
+ return 0
+
+ @staticmethod
+ @CleanerOps.KICK
+ def _kick(cleaner):
+ pass
+
+ @staticmethod
+ @CleanerOps.STOP
+ def _stop(cleaner):
+ pass
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/core.py b/src/spdk/ocf/tests/functional/pyocf/types/core.py
new file mode 100644
index 000000000..0003c0daf
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/core.py
@@ -0,0 +1,227 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import (
+ c_size_t,
+ c_void_p,
+ Structure,
+ c_int,
+ c_uint8,
+ c_uint16,
+ c_uint32,
+ c_uint64,
+ c_char,
+ c_char_p,
+ c_bool,
+ cast,
+ byref,
+ create_string_buffer,
+)
+from datetime import timedelta
+
+from .data import Data
+from .io import Io, IoDir
+from .queue import Queue
+from .shared import Uuid, OcfCompletion, OcfError, SeqCutOffPolicy
+from .stats.core import CoreInfo
+from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
+from .volume import Volume
+from ..ocf import OcfLib
+from ..utils import Size, struct_to_dict
+
+
+class UserMetadata(Structure):
+ _fields_ = [("data", c_void_p), ("size", c_size_t)]
+
+
+class CoreConfig(Structure):
+ MAX_CORE_NAME_SIZE = 32
+ _fields_ = [
+ ("_name", c_char * MAX_CORE_NAME_SIZE),
+ ("_uuid", Uuid),
+ ("_volume_type", c_uint8),
+ ("_try_add", c_bool),
+ ("_seq_cutoff_threshold", c_uint32),
+ ("_user_metadata", UserMetadata),
+ ]
+
+
+class Core:
+ DEFAULT_ID = 4096
+ DEFAULT_SEQ_CUTOFF_THRESHOLD = 1024 * 1024
+
+ def __init__(
+ self,
+ device: Volume,
+ try_add: bool,
+ name: str = "core",
+ seq_cutoff_threshold: int = DEFAULT_SEQ_CUTOFF_THRESHOLD,
+ ):
+ self.cache = None
+ self.device = device
+ self.device_name = device.uuid
+ self.handle = c_void_p()
+ self.cfg = CoreConfig(
+ _uuid=Uuid(
+ _data=cast(
+ create_string_buffer(self.device_name.encode("ascii")),
+ c_char_p,
+ ),
+ _size=len(self.device_name) + 1,
+ ),
+ _name=name.encode("ascii"),
+ _volume_type=self.device.type_id,
+ _try_add=try_add,
+ _seq_cutoff_threshold=seq_cutoff_threshold,
+ _user_metadata=UserMetadata(_data=None, _size=0),
+ )
+
+ @classmethod
+ def using_device(cls, device, **kwargs):
+ c = cls(device=device, try_add=False, **kwargs)
+
+ return c
+
+ def get_cfg(self):
+ return self.cfg
+
+ def get_handle(self):
+ return self.handle
+
+ def new_io(
+ self, queue: Queue, addr: int, length: int, direction: IoDir,
+ io_class: int, flags: int
+ ):
+ if not self.cache:
+ raise Exception("Core isn't attached to any cache")
+
+ io = OcfLib.getInstance().ocf_core_new_io_wrapper(
+ self.handle, queue.handle, addr, length, direction, io_class, flags)
+
+ if io is None:
+ raise Exception("Failed to create io!")
+
+ return Io.from_pointer(io)
+
+ def new_core_io(
+ self, queue: Queue, addr: int, length: int, direction: IoDir,
+ io_class: int, flags: int
+ ):
+ lib = OcfLib.getInstance()
+ volume = lib.ocf_core_get_volume(self.handle)
+ io = lib.ocf_volume_new_io(
+ volume, queue.handle, addr, length, direction, io_class, flags)
+ return Io.from_pointer(io)
+
+ def get_stats(self):
+ core_info = CoreInfo()
+ usage = UsageStats()
+ req = RequestsStats()
+ blocks = BlocksStats()
+ errors = ErrorsStats()
+
+ self.cache.read_lock()
+ status = self.cache.owner.lib.ocf_stats_collect_core(
+ self.handle, byref(usage), byref(req), byref(blocks), byref(errors)
+ )
+ if status:
+ self.cache.read_unlock()
+ raise OcfError("Failed collecting core stats", status)
+
+ status = self.cache.owner.lib.ocf_core_get_info(
+ self.handle, byref(core_info)
+ )
+ if status:
+ self.cache.read_unlock()
+ raise OcfError("Failed getting core stats", status)
+
+ self.cache.read_unlock()
+ return {
+ "size": Size(core_info.core_size_bytes),
+ "dirty_for": timedelta(seconds=core_info.dirty_for),
+ "seq_cutoff_policy": SeqCutOffPolicy(core_info.seq_cutoff_policy),
+ "seq_cutoff_threshold": core_info.seq_cutoff_threshold,
+ "usage": struct_to_dict(usage),
+ "req": struct_to_dict(req),
+ "blocks": struct_to_dict(blocks),
+ "errors": struct_to_dict(errors),
+ }
+
+ def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
+ self.cache.write_lock()
+
+ status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(
+ self.handle, policy
+ )
+ if status:
+ self.cache.write_unlock()
+ raise OcfError("Error setting core seq cut off policy", status)
+
+ self.cache.write_unlock()
+
+ def reset_stats(self):
+ self.cache.owner.lib.ocf_core_stats_initialize(self.handle)
+
+ def exp_obj_md5(self):
+ logging.getLogger("pyocf").warning(
+ "Reading whole exported object! This disturbs statistics values"
+ )
+
+ cache_line_size = int(self.cache.get_stats()['conf']['cache_line_size'])
+ read_buffer_all = Data(self.device.size)
+
+ read_buffer = Data(cache_line_size)
+
+ position = 0
+ while position < read_buffer_all.size:
+ io = self.new_io(self.cache.get_default_queue(), position,
+ cache_line_size, IoDir.READ, 0, 0)
+ io.set_data(read_buffer)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ if cmpl.results["err"]:
+ raise Exception("Error reading whole exported object")
+
+ read_buffer_all.copy(read_buffer, position, 0, cache_line_size)
+ position += cache_line_size
+
+ return read_buffer_all.md5()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_core_get_volume.restype = c_void_p
+lib.ocf_volume_new_io.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_uint64,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+ c_uint64,
+]
+lib.ocf_volume_new_io.restype = c_void_p
+lib.ocf_core_get_volume.argtypes = [c_void_p]
+lib.ocf_core_get_volume.restype = c_void_p
+lib.ocf_mngt_core_set_seq_cutoff_policy.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_core_set_seq_cutoff_policy.restype = c_int
+lib.ocf_stats_collect_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
+lib.ocf_stats_collect_core.restype = c_int
+lib.ocf_core_get_info.argtypes = [c_void_p, c_void_p]
+lib.ocf_core_get_info.restype = c_int
+lib.ocf_core_new_io_wrapper.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_uint64,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+ c_uint64,
+]
+lib.ocf_core_new_io_wrapper.restype = c_void_p
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/ctx.py b/src/spdk/ocf/tests/functional/pyocf/types/ctx.py
new file mode 100644
index 000000000..14c4b5757
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/ctx.py
@@ -0,0 +1,122 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, Structure, c_char_p, cast, pointer, byref, c_int
+
+from .logger import LoggerOps, Logger
+from .data import DataOps, Data
+from .cleaner import CleanerOps, Cleaner
+from .metadata_updater import MetadataUpdaterOps, MetadataUpdater
+from .shared import OcfError
+from ..ocf import OcfLib
+from .queue import Queue
+from .volume import Volume
+
+
+class OcfCtxOps(Structure):
+ _fields_ = [
+ ("data", DataOps),
+ ("cleaner", CleanerOps),
+ ("metadata_updater", MetadataUpdaterOps),
+ ("logger", LoggerOps),
+ ]
+
+
+class OcfCtxCfg(Structure):
+ _fields_ = [("name", c_char_p), ("ops", OcfCtxOps), ("logger_priv", c_void_p)]
+
+
+class OcfCtx:
+ def __init__(self, lib, name, logger, data, mu, cleaner):
+ self.logger = logger
+ self.data = data
+ self.mu = mu
+ self.cleaner = cleaner
+ self.ctx_handle = c_void_p()
+ self.lib = lib
+ self.volume_types_count = 1
+ self.volume_types = {}
+ self.caches = []
+
+ self.cfg = OcfCtxCfg(
+ name=name,
+ ops=OcfCtxOps(
+ data=self.data.get_ops(),
+ cleaner=self.cleaner.get_ops(),
+ metadata_updater=self.mu.get_ops(),
+ logger=logger.get_ops(),
+ ),
+ logger_priv=cast(pointer(logger.get_priv()), c_void_p),
+ )
+
+ result = self.lib.ocf_ctx_create(byref(self.ctx_handle), byref(self.cfg))
+ if result != 0:
+ raise OcfError("Context initialization failed", result)
+
+ def register_volume_type(self, volume_type):
+ self.volume_types[self.volume_types_count] = volume_type
+ volume_type.type_id = self.volume_types_count
+ volume_type.owner = self
+
+ result = self.lib.ocf_ctx_register_volume_type(
+ self.ctx_handle,
+ self.volume_types_count,
+ byref(self.volume_types[self.volume_types_count].get_props()),
+ )
+ if result != 0:
+ raise OcfError("Volume type registration failed", result)
+
+ self.volume_types_count += 1
+
+ def unregister_volume_type(self, vol_type):
+ if not vol_type.type_id:
+ raise Exception("Already unregistered")
+
+ self.lib.ocf_ctx_unregister_volume_type(
+ self.ctx_handle, vol_type.type_id
+ )
+
+ del self.volume_types[vol_type.type_id]
+
+ def cleanup_volume_types(self):
+ for k, vol_type in list(self.volume_types.items()):
+ if vol_type:
+ self.unregister_volume_type(vol_type)
+
+ def stop_caches(self):
+ for cache in self.caches[:]:
+ cache.stop()
+
+ def exit(self):
+ self.stop_caches()
+ self.cleanup_volume_types()
+
+ self.lib.ocf_ctx_put(self.ctx_handle)
+
+ self.cfg = None
+ self.logger = None
+ self.data = None
+ self.mu = None
+ self.cleaner = None
+ Queue._instances_ = {}
+ Volume._instances_ = {}
+ Data._instances_ = {}
+ Logger._instances_ = {}
+
+
+def get_default_ctx(logger):
+ return OcfCtx(
+ OcfLib.getInstance(),
+ b"PyOCF default ctx",
+ logger,
+ Data,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+
+lib = OcfLib.getInstance()
+lib.ocf_mngt_cache_get_by_name.argtypes = [c_void_p, c_void_p, c_void_p]
+lib.ocf_mngt_cache_get_by_name.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/data.py b/src/spdk/ocf/tests/functional/pyocf/types/data.py
new file mode 100644
index 000000000..b032cf3ce
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/data.py
@@ -0,0 +1,225 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ c_uint32,
+ CFUNCTYPE,
+ c_uint64,
+ create_string_buffer,
+ cast,
+ memset,
+ string_at,
+ Structure,
+ c_int,
+ memmove,
+ byref,
+)
+from enum import IntEnum
+from hashlib import md5
+import weakref
+
+from ..utils import print_buffer, Size as S
+
+
+class DataSeek(IntEnum):
+ BEGIN = 0
+ CURRENT = 1
+
+
+class DataOps(Structure):
+ ALLOC = CFUNCTYPE(c_void_p, c_uint32)
+ FREE = CFUNCTYPE(None, c_void_p)
+ MLOCK = CFUNCTYPE(c_int, c_void_p)
+ MUNLOCK = CFUNCTYPE(None, c_void_p)
+ READ = CFUNCTYPE(c_uint32, c_void_p, c_void_p, c_uint32)
+ WRITE = CFUNCTYPE(c_uint32, c_void_p, c_void_p, c_uint32)
+ ZERO = CFUNCTYPE(c_uint32, c_void_p, c_uint32)
+ SEEK = CFUNCTYPE(c_uint32, c_void_p, c_uint32, c_uint32)
+ COPY = CFUNCTYPE(c_uint64, c_void_p, c_void_p, c_uint64, c_uint64, c_uint64)
+ SECURE_ERASE = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [
+ ("_alloc", ALLOC),
+ ("_free", FREE),
+ ("_mlock", MLOCK),
+ ("_munlock", MUNLOCK),
+ ("_read", READ),
+ ("_write", WRITE),
+ ("_zero", ZERO),
+ ("_seek", SEEK),
+ ("_copy", COPY),
+ ("_secure_erase", SECURE_ERASE),
+ ]
+
+
+class Data:
+ DATA_POISON = 0xA5
+ PAGE_SIZE = 4096
+
+ _instances_ = {}
+ _ocf_instances_ = []
+
+ def __init__(self, byte_count: int):
+ self.size = int(byte_count)
+ self.position = 0
+ self.buffer = create_string_buffer(int(self.size))
+ self.handle = cast(byref(self.buffer), c_void_p)
+
+ memset(self.handle, self.DATA_POISON, self.size)
+ type(self)._instances_[self.handle.value] = weakref.ref(self)
+ self._as_parameter_ = self.handle
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]()
+
+ @classmethod
+ def get_ops(cls):
+ return DataOps(
+ _alloc=cls._alloc,
+ _free=cls._free,
+ _mlock=cls._mlock,
+ _munlock=cls._munlock,
+ _read=cls._read,
+ _write=cls._write,
+ _zero=cls._zero,
+ _seek=cls._seek,
+ _copy=cls._copy,
+ _secure_erase=cls._secure_erase,
+ )
+
+ @classmethod
+ def pages(cls, pages: int):
+ return cls(pages * Data.PAGE_SIZE)
+
+ @classmethod
+ def from_bytes(cls, source: bytes, offset: int = 0, size: int = 0):
+ if size == 0:
+ size = len(source) - offset
+ d = cls(size)
+
+ memmove(d.handle, cast(source, c_void_p).value + offset, size)
+
+ return d
+
+ @classmethod
+ def from_string(cls, source: str, encoding: str = "ascii"):
+ b = bytes(source, encoding)
+ # duplicate string to fill space up to sector boundary
+ padding_len = S.from_B(len(b), sector_aligned=True).B - len(b)
+ padding = b * (padding_len // len(b) + 1)
+ padding = padding[:padding_len]
+ b = b + padding
+ return cls.from_bytes(b)
+
+ @staticmethod
+ @DataOps.ALLOC
+ def _alloc(pages):
+ data = Data.pages(pages)
+ Data._ocf_instances_.append(data)
+
+ return data.handle.value
+
+ @staticmethod
+ @DataOps.FREE
+ def _free(ref):
+ Data._ocf_instances_.remove(Data.get_instance(ref))
+
+ @staticmethod
+ @DataOps.MLOCK
+ def _mlock(ref):
+ return Data.get_instance(ref).mlock()
+
+ @staticmethod
+ @DataOps.MUNLOCK
+ def _munlock(ref):
+ Data.get_instance(ref).munlock()
+
+ @staticmethod
+ @DataOps.READ
+ def _read(dst, src, size):
+ return Data.get_instance(src).read(dst, size)
+
+ @staticmethod
+ @DataOps.WRITE
+ def _write(dst, src, size):
+ return Data.get_instance(dst).write(src, size)
+
+ @staticmethod
+ @DataOps.ZERO
+ def _zero(dst, size):
+ return Data.get_instance(dst).zero(size)
+
+ @staticmethod
+ @DataOps.SEEK
+ def _seek(dst, seek, size):
+ return Data.get_instance(dst).seek(DataSeek(seek), size)
+
+ @staticmethod
+ @DataOps.COPY
+ def _copy(dst, src, skip, seek, size):
+ return Data.get_instance(dst).copy(
+ Data.get_instance(src), skip, seek, size
+ )
+
+ @staticmethod
+ @DataOps.SECURE_ERASE
+ def _secure_erase(dst):
+ Data.get_instance(dst).secure_erase()
+
+ def read(self, dst, size):
+ to_read = min(self.size - self.position, size)
+ memmove(dst, self.handle.value + self.position, to_read)
+
+ self.position += to_read
+ return to_read
+
+ def write(self, src, size):
+ to_write = min(self.size - self.position, size)
+ memmove(self.handle.value + self.position, src, to_write)
+
+ self.position += to_write
+ return to_write
+
+ def mlock(self):
+ return 0
+
+ def munlock(self):
+ pass
+
+ def zero(self, size):
+ to_zero = min(self.size - self.position, size)
+ memset(self.handle.value + self.position, 0, to_zero)
+
+ self.position += to_zero
+ return to_zero
+
+ def seek(self, seek, size):
+ if seek == DataSeek.CURRENT:
+ to_move = min(self.size - self.position, size)
+ self.position += to_move
+ else:
+ to_move = min(self.size, size)
+ self.position = to_move
+
+ return to_move
+
+ def copy(self, src, skip, seek, size):
+ to_write = min(self.size - skip, size, src.size - seek)
+
+ memmove(self.handle.value + skip, src.handle.value + seek, to_write)
+ return to_write
+
+ def secure_erase(self):
+ pass
+
+ def dump(self, ignore=DATA_POISON, **kwargs):
+ print_buffer(self.buffer, self.size, ignore=ignore, **kwargs)
+
+ def md5(self):
+ m = md5()
+ m.update(string_at(self.handle, self.size))
+ return m.hexdigest()
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/io.py b/src/spdk/ocf/tests/functional/pyocf/types/io.py
new file mode 100644
index 000000000..7e3671c5b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/io.py
@@ -0,0 +1,118 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ c_int,
+ c_uint32,
+ c_uint64,
+ CFUNCTYPE,
+ Structure,
+ POINTER,
+ byref,
+ cast,
+)
+from enum import IntEnum
+
+from ..ocf import OcfLib
+from .data import Data
+
+
+class IoDir(IntEnum):
+ READ = 0
+ WRITE = 1
+
+
+class IoOps(Structure):
+ pass
+
+
+class Io(Structure):
+ START = CFUNCTYPE(None, c_void_p)
+ HANDLE = CFUNCTYPE(None, c_void_p, c_void_p)
+ END = CFUNCTYPE(None, c_void_p, c_int)
+
+ _instances_ = {}
+ _fields_ = [
+ ("_addr", c_uint64),
+ ("_flags", c_uint64),
+ ("_bytes", c_uint32),
+ ("_class", c_uint32),
+ ("_dir", c_uint32),
+ ("_io_queue", c_void_p),
+ ("_start", START),
+ ("_handle", HANDLE),
+ ("_end", END),
+ ("_priv1", c_void_p),
+ ("_priv2", c_void_p),
+ ]
+
+ @classmethod
+ def from_pointer(cls, ref):
+ c = cls.from_address(ref)
+ cls._instances_[ref] = c
+ OcfLib.getInstance().ocf_io_set_cmpl_wrapper(
+ byref(c), None, None, c.c_end
+ )
+ return c
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[cast(ref, c_void_p).value]
+
+ def del_object(self):
+ del type(self)._instances_[cast(byref(self), c_void_p).value]
+
+ def put(self):
+ OcfLib.getInstance().ocf_io_put(byref(self))
+
+ def get(self):
+ OcfLib.getInstance().ocf_io_get(byref(self))
+
+ @staticmethod
+ @END
+ def c_end(io, err):
+ Io.get_instance(io).end(err)
+
+ @staticmethod
+ @START
+ def c_start(io):
+ Io.get_instance(io).start()
+
+ @staticmethod
+ @HANDLE
+ def c_handle(io, opaque):
+ Io.get_instance(io).handle(opaque)
+
+ def end(self, err):
+ try:
+ self.callback(err)
+ except: # noqa E722
+ pass
+
+ self.put()
+ self.del_object()
+
+ def submit(self):
+ return OcfLib.getInstance().ocf_core_submit_io_wrapper(byref(self))
+
+ def set_data(self, data: Data, offset: int = 0):
+ self.data = data
+ OcfLib.getInstance().ocf_io_set_data(byref(self), data, offset)
+
+
+IoOps.SET_DATA = CFUNCTYPE(c_int, POINTER(Io), c_void_p, c_uint32)
+IoOps.GET_DATA = CFUNCTYPE(c_void_p, POINTER(Io))
+
+IoOps._fields_ = [("_set_data", IoOps.SET_DATA), ("_get_data", IoOps.GET_DATA)]
+
+lib = OcfLib.getInstance()
+lib.ocf_io_set_cmpl_wrapper.argtypes = [POINTER(Io), c_void_p, c_void_p, Io.END]
+
+lib.ocf_core_new_io_wrapper.argtypes = [c_void_p]
+lib.ocf_core_new_io_wrapper.restype = c_void_p
+
+lib.ocf_io_set_data.argtypes = [POINTER(Io), c_void_p, c_uint32]
+lib.ocf_io_set_data.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/logger.py b/src/spdk/ocf/tests/functional/pyocf/types/logger.py
new file mode 100644
index 000000000..3d85b3a7f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/logger.py
@@ -0,0 +1,182 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ Structure,
+ c_char_p,
+ c_uint,
+ c_int,
+ cast,
+ CFUNCTYPE,
+ pointer,
+)
+from enum import IntEnum
+import logging
+from io import StringIO
+import weakref
+
+from ..ocf import OcfLib
+
+logger = logging.getLogger("pyocf")
+logger.setLevel(logging.DEBUG)
+
+
+class LogLevel(IntEnum):
+ EMERG = 0
+ ALERT = 1
+ CRIT = 2
+ ERR = 3
+ WARN = 4
+ NOTICE = 5
+ INFO = 6
+ DEBUG = 7
+
+
+LevelMapping = {
+ LogLevel.EMERG: logging.CRITICAL,
+ LogLevel.ALERT: logging.CRITICAL,
+ LogLevel.CRIT: logging.CRITICAL,
+ LogLevel.ERR: logging.ERROR,
+ LogLevel.WARN: logging.WARNING,
+ LogLevel.NOTICE: logging.INFO,
+ LogLevel.INFO: logging.INFO,
+ LogLevel.DEBUG: logging.DEBUG,
+}
+
+
+class LoggerOps(Structure):
+ OPEN = CFUNCTYPE(c_int, c_void_p)
+ CLOSE = CFUNCTYPE(None, c_void_p)
+ # PRINTF ommited - we cannot make variadic function call in ctypes
+ LOG = CFUNCTYPE(c_int, c_void_p, c_uint, c_char_p)
+ PRINT_RL = CFUNCTYPE(c_int, c_void_p, c_char_p)
+ DUMP_STACK = CFUNCTYPE(c_int, c_void_p)
+
+ _fields_ = [
+ ("_open", OPEN),
+ ("_close", CLOSE),
+ ("_print", c_void_p),
+ ("_print_rl", PRINT_RL),
+ ("_dump_stack", DUMP_STACK),
+ ]
+
+
+class LoggerPriv(Structure):
+ _fields_ = [("_log", LoggerOps.LOG)]
+
+
+class Logger(Structure):
+ _instances_ = {}
+
+ _fields_ = [("logger", c_void_p)]
+
+ def __init__(self):
+ self.ops = LoggerOps(
+ _open=self._open,
+ _print=cast(OcfLib.getInstance().pyocf_printf_helper, c_void_p),
+ _close=self._close,
+ )
+ self.priv = LoggerPriv(_log=self._log)
+ self._as_parameter_ = cast(pointer(self.priv), c_void_p).value
+ self._instances_[self._as_parameter_] = weakref.ref(self)
+
+ def get_ops(self):
+ return self.ops
+
+ def get_priv(self):
+ return self.priv
+
+ @classmethod
+ def get_instance(cls, ctx: int):
+ priv = OcfLib.getInstance().ocf_logger_get_priv(ctx)
+ return cls._instances_[priv]()
+
+ @staticmethod
+ @LoggerOps.LOG
+ def _log(ref, lvl, msg):
+ Logger.get_instance(ref).log(lvl, str(msg, "ascii").strip())
+ return 0
+
+ @staticmethod
+ @LoggerOps.OPEN
+ def _open(ref):
+ if hasattr(Logger.get_instance(ref), "open"):
+ return Logger.get_instance(ref).open()
+ else:
+ return 0
+
+ @staticmethod
+ @LoggerOps.CLOSE
+ def _close(ref):
+ if hasattr(Logger.get_instance(ref), "close"):
+ return Logger.get_instance(ref).close()
+ else:
+ return 0
+
+
+class DefaultLogger(Logger):
+ def __init__(self, level: LogLevel = LogLevel.WARN):
+ super().__init__()
+ self.level = level
+
+ ch = logging.StreamHandler()
+ fmt = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+ ch.setFormatter(fmt)
+ ch.setLevel(LevelMapping[level])
+ logger.addHandler(ch)
+
+ def log(self, lvl: int, msg: str):
+ logger.log(LevelMapping[lvl], msg)
+
+ def close(self):
+ logger.handlers = []
+
+
+class FileLogger(Logger):
+ def __init__(self, f, console_level=None):
+ super().__init__()
+ fmt = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+
+ fh = logging.FileHandler(f)
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(fmt)
+
+ logger.addHandler(fh)
+
+ if console_level:
+ sh = logging.StreamHandler()
+ sh.setLevel(LevelMapping[console_level])
+ sh.setFormatter(fmt)
+ logger.addHandler(sh)
+
+ def log(self, lvl, msg):
+ logger.log(LevelMapping[lvl], msg)
+
+ def close(self):
+ logger.handlers = []
+
+
+class BufferLogger(Logger):
+ def __init__(self, level: LogLevel):
+ super().__init__()
+ self.level = level
+ self.buffer = StringIO()
+
+ def log(self, lvl, msg):
+ if lvl < self.level:
+ self.buffer.write(msg + "\n")
+
+ def get_lines(self):
+ return self.buffer.getvalue().split("\n")
+
+
+lib = OcfLib.getInstance()
+lib.ocf_logger_get_priv.restype = c_void_p
+lib.ocf_logger_get_priv.argtypes = [c_void_p]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py b/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py
new file mode 100644
index 000000000..592d2a14c
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py
@@ -0,0 +1,102 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, c_int, c_uint32, Structure, CFUNCTYPE
+from threading import Thread, Event
+
+from ..ocf import OcfLib
+
+
+class MetadataUpdaterOps(Structure):
+ INIT = CFUNCTYPE(c_int, c_void_p)
+ KICK = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("_init", INIT), ("_kick", KICK), ("_stop", STOP)]
+
+
+class MetadataUpdater:
+ pass
+
+
+def mu_run(*, mu: MetadataUpdater, kick: Event, stop: Event):
+ while True:
+ kick.clear()
+
+ if OcfLib.getInstance().ocf_metadata_updater_run(mu):
+ continue
+
+ kick.wait()
+ if stop.is_set():
+ break
+
+
+class MetadataUpdater:
+ _instances_ = {}
+ ops = None
+
+ def __init__(self, ref):
+ self._as_parameter_ = ref
+ MetadataUpdater._instances_[ref] = self
+ self.kick_event = Event()
+ self.stop_event = Event()
+
+ lib = OcfLib.getInstance()
+ self.thread = Thread(
+ group=None,
+ target=mu_run,
+ name="mu-{}".format(
+ lib.ocf_cache_get_name(lib.ocf_metadata_updater_get_cache(self))
+ ),
+ kwargs={"mu": self, "kick": self.kick_event, "stop": self.stop_event},
+ )
+ self.thread.start()
+
+ @classmethod
+ def get_ops(cls):
+ if not cls.ops:
+ cls.ops = MetadataUpdaterOps(
+ _init=cls._init, _kick=cls._kick, _stop=cls._stop
+ )
+ return cls.ops
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]
+
+ @classmethod
+ def del_instance(cls, ref):
+ del cls._instances_[ref]
+
+ @staticmethod
+ @MetadataUpdaterOps.INIT
+ def _init(ref):
+ m = MetadataUpdater(ref)
+ return 0
+
+ @staticmethod
+ @MetadataUpdaterOps.KICK
+ def _kick(ref):
+ MetadataUpdater.get_instance(ref).kick()
+
+ @staticmethod
+ @MetadataUpdaterOps.STOP
+ def _stop(ref):
+ MetadataUpdater.get_instance(ref).stop()
+ del MetadataUpdater._instances_[ref]
+
+ def kick(self):
+ self.kick_event.set()
+
+ def stop(self):
+ self.stop_event.set()
+ self.kick_event.set()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_metadata_updater_get_cache.argtypes = [c_void_p]
+lib.ocf_metadata_updater_get_cache.restype = c_void_p
+lib.ocf_metadata_updater_run.argtypes = [c_void_p]
+lib.ocf_metadata_updater_run.restype = c_uint32
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/queue.py b/src/spdk/ocf/tests/functional/pyocf/types/queue.py
new file mode 100644
index 000000000..da0963907
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/queue.py
@@ -0,0 +1,105 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, CFUNCTYPE, Structure, byref
+from threading import Thread, Condition, Event
+import weakref
+
+from ..ocf import OcfLib
+from .shared import OcfError
+
+
+class QueueOps(Structure):
+ KICK = CFUNCTYPE(None, c_void_p)
+ KICK_SYNC = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("kick", KICK), ("kick_sync", KICK_SYNC), ("stop", STOP)]
+
+
+class Queue:
+ pass
+
+
+def io_queue_run(*, queue: Queue, kick: Condition, stop: Event):
+ def wait_predicate():
+ return stop.is_set() or OcfLib.getInstance().ocf_queue_pending_io(queue)
+
+ while True:
+ with kick:
+ kick.wait_for(wait_predicate)
+
+ OcfLib.getInstance().ocf_queue_run(queue)
+
+ if stop.is_set() and not OcfLib.getInstance().ocf_queue_pending_io(queue):
+ break
+
+
+class Queue:
+ _instances_ = {}
+
+ def __init__(self, cache, name):
+
+ self.ops = QueueOps(kick=type(self)._kick, stop=type(self)._stop)
+
+ self.handle = c_void_p()
+ status = OcfLib.getInstance().ocf_queue_create(
+ cache.cache_handle, byref(self.handle), byref(self.ops)
+ )
+ if status:
+ raise OcfError("Couldn't create queue object", status)
+
+ Queue._instances_[self.handle.value] = weakref.ref(self)
+ self._as_parameter_ = self.handle
+
+ self.stop_event = Event()
+ self.kick_condition = Condition()
+ self.thread = Thread(
+ group=None,
+ target=io_queue_run,
+ name=name,
+ kwargs={
+ "queue": self,
+ "kick": self.kick_condition,
+ "stop": self.stop_event,
+ },
+ )
+ self.thread.start()
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]()
+
+ @staticmethod
+ @QueueOps.KICK_SYNC
+ def _kick_sync(ref):
+ Queue.get_instance(ref).kick_sync()
+
+ @staticmethod
+ @QueueOps.KICK
+ def _kick(ref):
+ Queue.get_instance(ref).kick()
+
+ @staticmethod
+ @QueueOps.STOP
+ def _stop(ref):
+ Queue.get_instance(ref).stop()
+
+ def kick_sync(self):
+ OcfLib.getInstance().ocf_queue_run(self.handle)
+
+ def kick(self):
+ with self.kick_condition:
+ self.kick_condition.notify_all()
+
+ def put(self):
+ OcfLib.getInstance().ocf_queue_put(self)
+
+ def stop(self):
+ with self.kick_condition:
+ self.stop_event.set()
+ self.kick_condition.notify_all()
+
+ self.thread.join()
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/shared.py b/src/spdk/ocf/tests/functional/pyocf/types/shared.py
new file mode 100644
index 000000000..5244b4d36
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/shared.py
@@ -0,0 +1,160 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import CFUNCTYPE, c_size_t, c_char_p, Structure, c_void_p
+from enum import IntEnum, auto
+from threading import Event
+
+from ..utils import Size as S
+
+
+class OcfErrorCode(IntEnum):
+ OCF_ERR_INVAL = 1000000
+ OCF_ERR_AGAIN = auto()
+ OCF_ERR_INTR = auto()
+ OCF_ERR_NOT_SUPP = auto()
+ OCF_ERR_NO_MEM = auto()
+ OCF_ERR_NO_LOCK = auto()
+ OCF_ERR_METADATA_VER = auto()
+ OCF_ERR_NO_METADATA = auto()
+ OCF_ERR_METADATA_FOUND = auto()
+ OCF_ERR_INVAL_VOLUME_TYPE = auto()
+ OCF_ERR_UNKNOWN = auto()
+ OCF_ERR_TOO_MANY_CACHES = auto()
+ OCF_ERR_NO_FREE_RAM = auto()
+ OCF_ERR_START_CACHE_FAIL = auto()
+ OCF_ERR_CACHE_NOT_EXIST = auto()
+ OCF_ERR_CORE_NOT_EXIST = auto()
+ OCF_ERR_CACHE_EXIST = auto()
+ OCF_ERR_CORE_EXIST = auto()
+ OCF_ERR_TOO_MANY_CORES = auto()
+ OCF_ERR_CORE_NOT_AVAIL = auto()
+ OCF_ERR_NOT_OPEN_EXC = auto()
+ OCF_ERR_CACHE_NOT_AVAIL = auto()
+ OCF_ERR_IO_CLASS_NOT_EXIST = auto()
+ OCF_ERR_IO = auto()
+ OCF_ERR_WRITE_CACHE = auto()
+ OCF_ERR_WRITE_CORE = auto()
+ OCF_ERR_DIRTY_SHUTDOWN = auto()
+ OCF_ERR_DIRTY_EXISTS = auto()
+ OCF_ERR_FLUSHING_INTERRUPTED = auto()
+ OCF_ERR_FLUSH_IN_PROGRESS = auto()
+ OCF_ERR_CANNOT_ADD_CORE_TO_POOL = auto()
+ OCF_ERR_CACHE_IN_INCOMPLETE_STATE = auto()
+ OCF_ERR_CORE_IN_INACTIVE_STATE = auto()
+ OCF_ERR_INVALID_CACHE_MODE = auto()
+ OCF_ERR_INVALID_CACHE_LINE_SIZE = auto()
+ OCF_ERR_CACHE_NAME_MISMATCH = auto()
+ OCF_ERR_INVAL_CACHE_DEV = auto()
+
+
+class OcfCompletion:
+ """
+ This class provides Completion mechanism for interacting with OCF async
+ management API.
+ """
+
+ class CompletionResult:
+ def __init__(self, completion_args):
+ self.completion_args = {
+ x[0]: i for i, x in enumerate(completion_args)
+ }
+ self.results = None
+ self.arg_types = [x[1] for x in completion_args]
+
+ def __getitem__(self, key):
+ try:
+ position = self.completion_args[key]
+ return self.results[position]
+ except KeyError:
+ raise KeyError(f"No completion argument {key} specified")
+
+ def __init__(self, completion_args: list):
+ """
+ Provide ctypes arg list, and optionally index of status argument in
+ completion function which will be extracted (default - last argument).
+
+ :param completion_args: list of tuples (parameter name, parameter type)
+ for OCF completion function
+ """
+ self.e = Event()
+ self.results = OcfCompletion.CompletionResult(completion_args)
+ self._as_parameter_ = self.callback
+
+ @property
+ def callback(self):
+ @CFUNCTYPE(c_void_p, *self.results.arg_types)
+ def complete(*args):
+ self.results.results = args
+ self.e.set()
+
+ return complete
+
+ def wait(self):
+ self.e.wait()
+
+
+class OcfError(BaseException):
+ def __init__(self, msg, error_code):
+ super().__init__(self, msg)
+ self.error_code = OcfErrorCode(abs(error_code))
+ self.msg = msg
+
+ def __str__(self):
+ return "{} ({})".format(self.msg, repr(self.error_code))
+
+
+class SharedOcfObject(Structure):
+ _instances_ = {}
+
+ def __init__(self):
+ super().__init__()
+ type(self)._instances_[self._as_parameter_] = self
+
+ @classmethod
+ def get_instance(cls, ref: int):
+ try:
+ return cls._instances_[ref]
+ except: # noqa E722
+ logging.getLogger("pyocf").error(
+ "OcfSharedObject corruption. wanted: {} instances: {}".format(
+ ref, cls._instances_
+ )
+ )
+ return None
+
+ @classmethod
+ def del_object(cls, ref: int):
+ del cls._instances_[ref]
+
+
+class Uuid(Structure):
+ _fields_ = [("_size", c_size_t), ("_data", c_char_p)]
+
+
+class CacheLineSize(IntEnum):
+ LINE_4KiB = S.from_KiB(4)
+ LINE_8KiB = S.from_KiB(8)
+ LINE_16KiB = S.from_KiB(16)
+ LINE_32KiB = S.from_KiB(32)
+ LINE_64KiB = S.from_KiB(64)
+ DEFAULT = LINE_4KiB
+
+
+class SeqCutOffPolicy(IntEnum):
+ ALWAYS = 0
+ FULL = 1
+ NEVER = 2
+ DEFAULT = FULL
+
+
+class CacheLines(S):
+ def __init__(self, count: int, line_size: CacheLineSize):
+ self.bytes = count * line_size
+ self.line_size = line_size
+
+ def __int__(self):
+ return int(self.bytes / self.line_size)
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py
new file mode 100644
index 000000000..59a4bdfa0
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py
@@ -0,0 +1,39 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint8, c_uint32, c_uint64, c_bool, c_int, Structure
+from pyocf.types.stats.shared import _Stat
+
+
+class _Inactive(Structure):
+ _fields_ = [("occupancy", _Stat), ("clean", _Stat), ("dirty", _Stat)]
+
+
+class _FallbackPt(Structure):
+ _fields_ = [("error_counter", c_int), ("status", c_bool)]
+
+
+class CacheInfo(Structure):
+ _fields_ = [
+ ("attached", c_bool),
+ ("volume_type", c_uint8),
+ ("size", c_uint32),
+ ("inactive", _Inactive),
+ ("occupancy", c_uint32),
+ ("dirty", c_uint32),
+ ("dirty_initial", c_uint32),
+ ("dirty_for", c_uint32),
+ ("cache_mode", c_uint32),
+ ("fallback_pt", _FallbackPt),
+ ("state", c_uint8),
+ ("eviction_policy", c_uint32),
+ ("cleaning_policy", c_uint32),
+ ("promotion_policy", c_uint32),
+ ("cache_line_size", c_uint64),
+ ("flushed", c_uint32),
+ ("core_count", c_uint32),
+ ("metadata_footprint", c_uint64),
+ ("metadata_end_offset", c_uint32),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py
new file mode 100644
index 000000000..dd2d06689
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py
@@ -0,0 +1,21 @@
+
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint32, c_uint64, Structure
+
+from .shared import OcfStatsReq, OcfStatsBlock, OcfStatsDebug, OcfStatsError
+
+
+class CoreInfo(Structure):
+ _fields_ = [
+ ("core_size", c_uint64),
+ ("core_size_bytes", c_uint64),
+ ("dirty", c_uint32),
+ ("flushed", c_uint32),
+ ("dirty_for", c_uint32),
+ ("seq_cutoff_threshold", c_uint32),
+ ("seq_cutoff_policy", c_uint32),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py
new file mode 100644
index 000000000..e6719d985
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py
@@ -0,0 +1,88 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint64, c_uint32, Structure
+
+
+class _Stat(Structure):
+ _fields_ = [("value", c_uint64), ("fraction", c_uint64)]
+
+
+class OcfStatsReq(Structure):
+ _fields_ = [
+ ("partial_miss", c_uint64),
+ ("full_miss", c_uint64),
+ ("total", c_uint64),
+ ("pass_through", c_uint64),
+ ]
+
+
+class OcfStatsBlock(Structure):
+ _fields_ = [("read", c_uint64), ("write", c_uint64)]
+
+
+class OcfStatsError(Structure):
+ _fields_ = [("read", c_uint32), ("write", c_uint32)]
+
+
+class OcfStatsDebug(Structure):
+ _fields_ = [
+ ("read_size", c_uint64 * 12),
+ ("write_size", c_uint64 * 12),
+ ("read_align", c_uint64 * 4),
+ ("write_align", c_uint64 * 4),
+ ]
+
+
+class UsageStats(Structure):
+ _fields_ = [
+ ("occupancy", _Stat),
+ ("free", _Stat),
+ ("clean", _Stat),
+ ("dirty", _Stat),
+ ]
+
+
+class RequestsStats(Structure):
+ _fields_ = [
+ ("rd_hits", _Stat),
+ ("rd_partial_misses", _Stat),
+ ("rd_full_misses", _Stat),
+ ("rd_total", _Stat),
+ ("wr_hits", _Stat),
+ ("wr_partial_misses", _Stat),
+ ("wr_full_misses", _Stat),
+ ("wr_total", _Stat),
+ ("rd_pt", _Stat),
+ ("wr_pt", _Stat),
+ ("serviced", _Stat),
+ ("total", _Stat),
+ ]
+
+
+class BlocksStats(Structure):
+ _fields_ = [
+ ("core_volume_rd", _Stat),
+ ("core_volume_wr", _Stat),
+ ("core_volume_total", _Stat),
+ ("cache_volume_rd", _Stat),
+ ("cache_volume_wr", _Stat),
+ ("cache_volume_total", _Stat),
+ ("volume_rd", _Stat),
+ ("volume_wr", _Stat),
+ ("volume_total", _Stat),
+ ]
+
+
+class ErrorsStats(Structure):
+ _fields_ = [
+ ("core_volume_rd", _Stat),
+ ("core_volume_wr", _Stat),
+ ("core_volume_total", _Stat),
+ ("cache_volume_rd", _Stat),
+ ("cache_volume_wr", _Stat),
+ ("cache_volume_total", _Stat),
+ ("total", _Stat),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/volume.py b/src/spdk/ocf/tests/functional/pyocf/types/volume.py
new file mode 100644
index 000000000..4bca10bd1
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/volume.py
@@ -0,0 +1,361 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ POINTER,
+ c_void_p,
+ c_uint32,
+ c_char_p,
+ create_string_buffer,
+ memmove,
+ memset,
+ Structure,
+ CFUNCTYPE,
+ c_int,
+ c_uint,
+ c_uint64,
+ sizeof,
+ cast,
+ string_at,
+)
+from hashlib import md5
+import weakref
+
+from .io import Io, IoOps, IoDir
+from .shared import OcfErrorCode, Uuid
+from ..ocf import OcfLib
+from ..utils import print_buffer, Size as S
+from .data import Data
+
+
+class VolumeCaps(Structure):
+ _fields_ = [("_atomic_writes", c_uint32, 1)]
+
+
+class VolumeOps(Structure):
+ SUBMIT_IO = CFUNCTYPE(None, POINTER(Io))
+ SUBMIT_FLUSH = CFUNCTYPE(None, c_void_p)
+ SUBMIT_METADATA = CFUNCTYPE(None, c_void_p)
+ SUBMIT_DISCARD = CFUNCTYPE(None, c_void_p)
+ SUBMIT_WRITE_ZEROES = CFUNCTYPE(None, c_void_p)
+ OPEN = CFUNCTYPE(c_int, c_void_p)
+ CLOSE = CFUNCTYPE(None, c_void_p)
+ GET_MAX_IO_SIZE = CFUNCTYPE(c_uint, c_void_p)
+ GET_LENGTH = CFUNCTYPE(c_uint64, c_void_p)
+
+ _fields_ = [
+ ("_submit_io", SUBMIT_IO),
+ ("_submit_flush", SUBMIT_FLUSH),
+ ("_submit_metadata", SUBMIT_METADATA),
+ ("_submit_discard", SUBMIT_DISCARD),
+ ("_submit_write_zeroes", SUBMIT_WRITE_ZEROES),
+ ("_open", OPEN),
+ ("_close", CLOSE),
+ ("_get_max_io_size", GET_MAX_IO_SIZE),
+ ("_get_length", GET_LENGTH),
+ ]
+
+
+class VolumeProperties(Structure):
+ _fields_ = [
+ ("_name", c_char_p),
+ ("_io_priv_size", c_uint32),
+ ("_volume_priv_size", c_uint32),
+ ("_caps", VolumeCaps),
+ ("_ops", VolumeOps),
+ ("_io_ops", IoOps),
+ ("_deinit", c_char_p),
+ ]
+
+
+class VolumeIoPriv(Structure):
+ _fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
+
+
+class Volume(Structure):
+ VOLUME_POISON = 0x13
+
+ _fields_ = [("_storage", c_void_p)]
+ _instances_ = {}
+ _uuid_ = {}
+
+ props = None
+
+ def __init__(self, size: S, uuid=None):
+ super().__init__()
+ self.size = size
+ if uuid:
+ if uuid in type(self)._uuid_:
+ raise Exception(
+ "Volume with uuid {} already created".format(uuid)
+ )
+ self.uuid = uuid
+ else:
+ self.uuid = str(id(self))
+
+ type(self)._uuid_[self.uuid] = weakref.ref(self)
+
+ self.data = create_string_buffer(int(self.size))
+ memset(self.data, self.VOLUME_POISON, self.size)
+ self._storage = cast(self.data, c_void_p)
+
+ self.reset_stats()
+ self.opened = False
+
+ def get_copy(self):
+ new_volume = Volume(self.size)
+ memmove(new_volume.data, self.data, self.size)
+ return new_volume
+
+ @classmethod
+ def get_props(cls):
+ if not cls.props:
+ cls.props = VolumeProperties(
+ _name=str(cls.__name__).encode("ascii"),
+ _io_priv_size=sizeof(VolumeIoPriv),
+ _volume_priv_size=0,
+ _caps=VolumeCaps(_atomic_writes=0),
+ _ops=VolumeOps(
+ _submit_io=cls._submit_io,
+ _submit_flush=cls._submit_flush,
+ _submit_metadata=cls._submit_metadata,
+ _submit_discard=cls._submit_discard,
+ _submit_write_zeroes=cls._submit_write_zeroes,
+ _open=cls._open,
+ _close=cls._close,
+ _get_max_io_size=cls._get_max_io_size,
+ _get_length=cls._get_length,
+ ),
+ _io_ops=IoOps(
+ _set_data=cls._io_set_data, _get_data=cls._io_get_data
+ ),
+ _deinit=0,
+ )
+
+ return cls.props
+
+ @classmethod
+ def get_instance(cls, ref):
+ instance = cls._instances_[ref]()
+ if instance is None:
+ print("tried to access {} but it's gone".format(ref))
+
+ return instance
+
+ @classmethod
+ def get_by_uuid(cls, uuid):
+ return cls._uuid_[uuid]()
+
+ @staticmethod
+ @VolumeOps.SUBMIT_IO
+ def _submit_io(io):
+ io_structure = cast(io, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_io(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_FLUSH
+ def _submit_flush(flush):
+ io_structure = cast(flush, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_flush(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_METADATA
+ def _submit_metadata(meta):
+ pass
+
+ @staticmethod
+ @VolumeOps.SUBMIT_DISCARD
+ def _submit_discard(discard):
+ io_structure = cast(discard, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_discard(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_WRITE_ZEROES
+ def _submit_write_zeroes(write_zeroes):
+ pass
+
+ @staticmethod
+ @CFUNCTYPE(c_int, c_void_p)
+ def _open(ref):
+ uuid_ptr = cast(
+ OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid)
+ )
+ uuid = str(uuid_ptr.contents._data, encoding="ascii")
+ try:
+ volume = Volume.get_by_uuid(uuid)
+ except: # noqa E722 TODO:Investigate whether this really should be so broad
+ print("Tried to access unallocated volume {}".format(uuid))
+ print("{}".format(Volume._uuid_))
+ return -1
+
+ if volume.opened:
+ return OcfErrorCode.OCF_ERR_NOT_OPEN_EXC
+
+ Volume._instances_[ref] = weakref.ref(volume)
+
+ return volume.open()
+
+ @staticmethod
+ @VolumeOps.CLOSE
+ def _close(ref):
+ volume = Volume.get_instance(ref)
+ volume.close()
+ volume.opened = False
+
+ @staticmethod
+ @VolumeOps.GET_MAX_IO_SIZE
+ def _get_max_io_size(ref):
+ return Volume.get_instance(ref).get_max_io_size()
+
+ @staticmethod
+ @VolumeOps.GET_LENGTH
+ def _get_length(ref):
+ return Volume.get_instance(ref).get_length()
+
+ @staticmethod
+ @IoOps.SET_DATA
+ def _io_set_data(io, data, offset):
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
+ )
+ data = Data.get_instance(data)
+ io_priv.contents._offset = offset
+ io_priv.contents._data = data.handle
+
+ return 0
+
+ @staticmethod
+ @IoOps.GET_DATA
+ def _io_get_data(io):
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
+ )
+ return io_priv.contents._data
+
+ def open(self):
+ self.opened = True
+ return 0
+
+ def close(self):
+ pass
+
+ def get_length(self):
+ return self.size
+
+ def get_max_io_size(self):
+ return S.from_KiB(128)
+
+ def submit_flush(self, flush):
+ flush.contents._end(flush, 0)
+
+ def submit_discard(self, discard):
+ try:
+ dst = self._storage + discard.contents._addr
+ memset(dst, 0, discard.contents._bytes)
+
+ discard.contents._end(discard, 0)
+ except: # noqa E722
+ discard.contents._end(discard, -5)
+
+ def get_stats(self):
+ return self.stats
+
+ def reset_stats(self):
+ self.stats = {IoDir.WRITE: 0, IoDir.READ: 0}
+
+ def submit_io(self, io):
+ try:
+ self.stats[IoDir(io.contents._dir)] += 1
+
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
+ offset = io_priv.contents._offset
+
+ if io.contents._dir == IoDir.WRITE:
+ src_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
+ src = Data.get_instance(src_ptr.value).handle.value + offset
+ dst = self._storage + io.contents._addr
+ elif io.contents._dir == IoDir.READ:
+ dst_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
+ dst = Data.get_instance(dst_ptr.value).handle.value + offset
+ src = self._storage + io.contents._addr
+
+ memmove(dst, src, io.contents._bytes)
+ io_priv.contents._offset += io.contents._bytes
+
+ io.contents._end(io, 0)
+ except: # noqa E722
+ io.contents._end(io, -5)
+
+ def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
+ if size == 0:
+ size = int(self.size) - int(offset)
+
+ print_buffer(
+ self._storage,
+ size,
+ ignore=ignore,
+ **kwargs
+ )
+
+ def md5(self):
+ m = md5()
+ m.update(string_at(self._storage, self.size))
+ return m.hexdigest()
+
+
+class ErrorDevice(Volume):
+ def __init__(self, size, error_sectors: set = None, uuid=None):
+ super().__init__(size, uuid)
+ self.error_sectors = error_sectors or set()
+
+ def set_mapping(self, error_sectors: set):
+ self.error_sectors = error_sectors
+
+ def submit_io(self, io):
+ if io.contents._addr in self.error_sectors:
+ io.contents._end(io, -5)
+ self.stats["errors"][io.contents._dir] += 1
+ else:
+ super().submit_io(io)
+
+ def reset_stats(self):
+ super().reset_stats()
+ self.stats["errors"] = {IoDir.WRITE: 0, IoDir.READ: 0}
+
+
+class TraceDevice(Volume):
+ def __init__(self, size, trace_fcn=None, uuid=None):
+ super().__init__(size, uuid)
+ self.trace_fcn = trace_fcn
+
+ def submit_io(self, io):
+ submit = True
+
+ if self.trace_fcn:
+ submit = self.trace_fcn(self, io)
+
+ if submit:
+ super().submit_io(io)
+
+
+lib = OcfLib.getInstance()
+lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
+lib.ocf_io_get_volume.argtypes = [c_void_p]
+lib.ocf_io_get_volume.restype = c_void_p
+lib.ocf_io_get_data.argtypes = [c_void_p]
+lib.ocf_io_get_data.restype = c_void_p
diff --git a/src/spdk/ocf/tests/functional/pyocf/utils.py b/src/spdk/ocf/tests/functional/pyocf/utils.py
new file mode 100644
index 000000000..d4ef42300
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/utils.py
@@ -0,0 +1,173 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import string_at
+
+
+def print_buffer(
+ buf,
+ length,
+ offset=0,
+ width=16,
+ ignore=0,
+ stop_after_count_ignored=0,
+ print_fcn=print,
+):
+ end = int(offset) + int(length)
+ offset = int(offset)
+ ignored_lines = 0
+ buf = string_at(buf, length)
+ whole_buffer_ignored = True
+ stop_after_count_ignored = int(stop_after_count_ignored / width)
+
+ for addr in range(offset, end, width):
+ cur_line = buf[addr : min(end, addr + width)]
+ byteline = ""
+ asciiline = ""
+ if not any(x != ignore for x in cur_line):
+ if (
+ stop_after_count_ignored
+ and ignored_lines > stop_after_count_ignored
+ ):
+ print_fcn(
+ "<{} bytes of '0x{:02X}' encountered, stopping>".format(
+ stop_after_count_ignored * width, ignore
+ )
+ )
+ return
+ ignored_lines += 1
+ continue
+
+ if ignored_lines:
+ print_fcn(
+ "<{} of '0x{:02X}' bytes omitted>".format(
+ ignored_lines * width, ignore
+ )
+ )
+ ignored_lines = 0
+
+ for byte in cur_line:
+ byte = int(byte)
+ byteline += "{:02X} ".format(byte)
+ if 31 < byte < 126:
+ char = chr(byte)
+ else:
+ char = "."
+ asciiline += char
+
+ print_fcn("0x{:08X}\t{}\t{}".format(addr, byteline, asciiline))
+ whole_buffer_ignored = False
+
+ if whole_buffer_ignored:
+ print_fcn("<whole buffer ignored>")
+ elif ignored_lines:
+ print_fcn("<'0x{:02X}' until end>".format(ignore))
+
+
+class Size:
+ _KiB = 1024
+ _MiB = _KiB * 1024
+ _GiB = _MiB * 1024
+ _TiB = _GiB * 1024
+ _SECTOR_SIZE = 512
+
+ def __init__(self, b: int, sector_aligned: bool = False):
+ if sector_aligned:
+ self.bytes = int(
+ ((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE)
+ * self._SECTOR_SIZE
+ )
+ else:
+ self.bytes = int(b)
+
+ def __int__(self):
+ return self.bytes
+
+ def __index__(self):
+ return self.bytes
+
+ @classmethod
+ def from_B(cls, value, sector_aligned=False):
+ return cls(value, sector_aligned)
+
+ @classmethod
+ def from_KiB(cls, value, sector_aligned=False):
+ return cls(value * cls._KiB, sector_aligned)
+
+ @classmethod
+ def from_MiB(cls, value, sector_aligned=False):
+ return cls(value * cls._MiB, sector_aligned)
+
+ @classmethod
+ def from_GiB(cls, value, sector_aligned=False):
+ return cls(value * cls._GiB, sector_aligned)
+
+ @classmethod
+ def from_TiB(cls, value, sector_aligned=False):
+ return cls(value * cls._TiB, sector_aligned)
+
+ @classmethod
+ def from_sector(cls, value):
+ return cls(value * cls._SECTOR_SIZE)
+
+ @property
+ def B(self):
+ return self.bytes
+
+ @property
+ def KiB(self):
+ return self.bytes / self._KiB
+
+ @property
+ def MiB(self):
+ return self.bytes / self._MiB
+
+ @property
+ def GiB(self):
+ return self.bytes / self._GiB
+
+ @property
+ def TiB(self):
+ return self.bytes / self._TiB
+
+ @property
+ def sectors(self):
+ return self.bytes // self._SECTOR_SIZE
+
+ def __str__(self):
+ if self.bytes < self._KiB:
+ return "{} B".format(self.B)
+ elif self.bytes < self._MiB:
+ return "{} KiB".format(self.KiB)
+ elif self.bytes < self._GiB:
+ return "{} MiB".format(self.MiB)
+ elif self.bytes < self._TiB:
+ return "{} GiB".format(self.GiB)
+ else:
+ return "{} TiB".format(self.TiB)
+
+
+def print_structure(struct, indent=0):
+ print(struct)
+ for field, field_type in struct._fields_:
+ value = getattr(struct, field)
+ if hasattr(value, "_fields_"):
+ print("{}{: <20} :".format(" " * indent, field))
+ print_structure(value, indent=indent + 1)
+ continue
+
+ print("{}{: <20} : {}".format(" " * indent, field, value))
+
+
+def struct_to_dict(struct):
+ d = {}
+ for field, field_type in struct._fields_:
+ value = getattr(struct, field)
+ if hasattr(value, "_fields_"):
+ d[field] = struct_to_dict(value)
+ continue
+ d[field] = value
+
+ return d
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c
new file mode 100644
index 000000000..79b9331e0
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf_io.h"
+#include "ocf/ocf_core.h"
+
+struct ocf_io *ocf_core_new_io_wrapper(ocf_core_t core, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags)
+{
+ return ocf_core_new_io(core, queue, addr, bytes, dir, io_class, flags);
+}
+
+void ocf_io_set_cmpl_wrapper(struct ocf_io *io, void *context,
+ void *context2, ocf_end_io_t fn)
+{
+ ocf_io_set_cmpl(io, context, context2, fn);
+}
+
+void ocf_io_set_start_wrapper(struct ocf_io *io, ocf_start_io_t fn)
+{
+ ocf_io_set_start(io, fn);
+}
+
+void ocf_io_set_handle_wrapper(struct ocf_io *io, ocf_handle_io_t fn)
+{
+ ocf_io_set_handle(io, fn);
+}
+
+void ocf_core_submit_io_wrapper(struct ocf_io *io)
+{
+ ocf_core_submit_io(io);
+}
+
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c
new file mode 100644
index 000000000..60ded8dad
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include <ocf/ocf_types.h>
+#include <ocf/ocf_logger.h>
+#include <stdarg.h>
+#include "ocf_env.h"
+
+#define LOG_BUFFER_SIZE 4096
+
+struct pyocf_logger_priv {
+ int (*pyocf_log)(void *pyocf_logger, ocf_logger_lvl_t lvl, char *msg);
+};
+
+int pyocf_printf_helper(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, va_list args)
+{
+ char *buffer = env_zalloc(LOG_BUFFER_SIZE, ENV_MEM_NORMAL);
+ struct pyocf_logger_priv *priv = ocf_logger_get_priv(logger);
+ int ret;
+
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = vsnprintf(buffer, LOG_BUFFER_SIZE, fmt, args);
+ if (ret < 0) {
+ env_free(buffer);
+ goto out;
+ }
+
+ ret = priv->pyocf_log(logger, lvl, buffer);
+
+ env_free(buffer);
+
+out:
+ return ret;
+}
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c
new file mode 100644
index 000000000..cb3787761
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c
@@ -0,0 +1,12 @@
+
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf_io.h"
+#include "ocf/ocf_volume.h"
+
+const char *ocf_uuid_to_str_wrapper(const struct ocf_volume_uuid *uuid) {
+ return ocf_uuid_to_str(uuid);
+}
diff --git a/src/spdk/ocf/tests/functional/pytest.ini b/src/spdk/ocf/tests/functional/pytest.ini
new file mode 100644
index 000000000..10796150b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = --ignore=tests/security -m "not long"
diff --git a/src/spdk/ocf/tests/functional/tests/__init__.py b/src/spdk/ocf/tests/functional/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/basic/__init__.py b/src/spdk/ocf/tests/functional/tests/basic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/basic/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py b/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py
new file mode 100644
index 000000000..b881abdc6
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py
@@ -0,0 +1,86 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from pyocf.types.cache import Cache
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume, ErrorDevice
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size as S
+from pyocf.types.shared import OcfError, OcfCompletion
+
+
+def test_ctx_fixture(pyocf_ctx):
+ pass
+
+
+def test_simple_wt_write(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+ core_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ write_data = Data.from_string("This is test data")
+ io = core.new_io(cache.get_default_queue(), S.from_sector(1).B,
+ write_data.size, IoDir.WRITE, 0, 0)
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ assert cmpl.results["err"] == 0
+ assert cache_device.get_stats()[IoDir.WRITE] == 1
+ stats = cache.get_stats()
+ assert stats["req"]["wr_full_misses"]["value"] == 1
+ assert stats["usage"]["occupancy"]["value"] == 1
+
+ assert core.exp_obj_md5() == core_device.md5()
+ cache.stop()
+
+
+def test_start_corrupted_metadata_lba(pyocf_ctx):
+ cache_device = ErrorDevice(S.from_MiB(30), error_sectors=set([0]))
+
+ with pytest.raises(OcfError, match="OCF_ERR_WRITE_CACHE"):
+ cache = Cache.start_on_device(cache_device)
+
+
+def test_load_cache_no_preexisting_data(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ with pytest.raises(OcfError, match="OCF_ERR_NO_METADATA"):
+ cache = Cache.load_from_device(cache_device)
+
+
+def test_load_cache(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ cache.stop()
+
+ cache = Cache.load_from_device(cache_device)
+
+
+def test_load_cache_recovery(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+
+ device_copy = cache_device.get_copy()
+
+ cache.stop()
+
+ cache = Cache.load_from_device(device_copy)
diff --git a/src/spdk/ocf/tests/functional/tests/conftest.py b/src/spdk/ocf/tests/functional/tests/conftest.py
new file mode 100644
index 000000000..943c1c07b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/conftest.py
@@ -0,0 +1,39 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import os
+import sys
+import pytest
+import gc
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+from pyocf.types.logger import LogLevel, DefaultLogger, BufferLogger
+from pyocf.types.volume import Volume, ErrorDevice
+from pyocf.types.ctx import get_default_ctx
+
+
+def pytest_configure(config):
+ sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+
+@pytest.fixture()
+def pyocf_ctx():
+ c = get_default_ctx(DefaultLogger(LogLevel.WARN))
+ c.register_volume_type(Volume)
+ c.register_volume_type(ErrorDevice)
+ yield c
+ c.exit()
+ gc.collect()
+
+
+@pytest.fixture()
+def pyocf_ctx_log_buffer():
+ logger = BufferLogger(LogLevel.DEBUG)
+ c = get_default_ctx(logger)
+ c.register_volume_type(Volume)
+ c.register_volume_type(ErrorDevice)
+ yield logger
+ c.exit()
+ gc.collect()
diff --git a/src/spdk/ocf/tests/functional/tests/engine/__init__.py b/src/spdk/ocf/tests/functional/tests/engine/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/engine/test_pp.py b/src/spdk/ocf/tests/functional/tests/engine/test_pp.py
new file mode 100644
index 000000000..e45377559
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/test_pp.py
@@ -0,0 +1,305 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int
+import pytest
+import math
+
+from pyocf.types.cache import Cache, PromotionPolicy, NhitParams
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size
+from pyocf.types.shared import OcfCompletion
+
+
+@pytest.mark.parametrize("promotion_policy", PromotionPolicy)
+def test_init_nhit(pyocf_ctx, promotion_policy):
+ """
+ Check if starting cache with promotion policy is reflected in stats
+
+ 1. Create core/cache pair with parametrized promotion policy
+ 2. Get cache statistics
+ * verify that promotion policy type is properly reflected in stats
+ """
+
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, promotion_policy=promotion_policy)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ assert cache.get_stats()["conf"]["promotion_policy"] == promotion_policy
+
+
+def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
+ """
+ Try switching promotion policy during io, no io's should return with error
+
+ 1. Create core/cache pair with promotion policy ALWAYS
+ 2. Issue IOs without waiting for completion
+ 3. Change promotion policy to NHIT
+ 4. Wait for IO completions
+ * no IOs should fail
+ 5. Issue IOs without waiting for completion
+ 6. Change promotion policy to ALWAYS
+ 7. Wait for IO completions
+ * no IOs should fail
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ # Step 2
+ completions = []
+ for i in range(2000):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(4096)
+ io = core.new_io(
+ cache.get_default_queue(), i * 4096, write_data.size, IoDir.WRITE, 0, 0
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ # Step 3
+ cache.set_promotion_policy(PromotionPolicy.NHIT)
+
+ # Step 4
+ for c in completions:
+ c.wait()
+ assert not c.results["error"], "No IO's should fail when turning NHIT policy on"
+
+ # Step 5
+ completions = []
+ for i in range(2000):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(4096)
+ io = core.new_io(
+ cache.get_default_queue(), i * 4096, write_data.size, IoDir.WRITE, 0, 0
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ # Step 6
+ cache.set_promotion_policy(PromotionPolicy.ALWAYS)
+
+ # Step 7
+ for c in completions:
+ c.wait()
+ assert not c.results[
+ "error"
+ ], "No IO's should fail when turning NHIT policy off"
+
+
+def fill_cache(cache, fill_ratio):
+ """
+ Helper to fill cache from LBA 0.
+ TODO:
+ * make it generic and share across all tests
+ * reasonable error handling
+ """
+
+ cache_lines = cache.get_stats()["conf"]["size"]
+
+ bytes_to_fill = cache_lines.bytes * fill_ratio
+ max_io_size = cache.device.get_max_io_size().bytes
+
+ ios_to_issue = math.floor(bytes_to_fill / max_io_size)
+
+ core = cache.cores[0]
+ completions = []
+ for i in range(ios_to_issue):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(max_io_size)
+ io = core.new_io(
+ cache.get_default_queue(),
+ i * max_io_size,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ completions += [comp]
+ io.submit()
+
+ if bytes_to_fill % max_io_size:
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(Size.from_B(bytes_to_fill % max_io_size, sector_aligned=True))
+ io = core.new_io(
+ cache.get_default_queue(),
+ ios_to_issue * max_io_size,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ completions += [comp]
+ io.submit()
+
+ for c in completions:
+ c.wait()
+
+
+@pytest.mark.parametrize("fill_percentage", [0, 1, 50, 99])
+@pytest.mark.parametrize("insertion_threshold", [2, 8])
+def test_promoted_after_hits_various_thresholds(
+ pyocf_ctx, insertion_threshold, fill_percentage
+):
+ """
+ Check promotion policy behavior with various set thresholds
+
+ 1. Create core/cache pair with promotion policy NHIT
+ 2. Set TRIGGER_THRESHOLD/INSERTION_THRESHOLD to predefined values
+ 3. Fill cache from the beggining until occupancy reaches TRIGGER_THRESHOLD%
+ 4. Issue INSERTION_THRESHOLD - 1 requests to core line not inserted to cache
+ * occupancy should not change
+ 5. Issue one request to LBA from step 4
+ * occupancy should rise by one cache line
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, promotion_policy=PromotionPolicy.NHIT)
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Step 2
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, fill_percentage
+ )
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, insertion_threshold
+ )
+ # Step 3
+ fill_cache(cache, fill_percentage / 100)
+
+ stats = cache.get_stats()
+ cache_lines = stats["conf"]["size"]
+ assert stats["usage"]["occupancy"]["fraction"] // 10 == fill_percentage * 10
+ filled_occupancy = stats["usage"]["occupancy"]["value"]
+
+ # Step 4
+ last_core_line = int(core_device.size) - cache_lines.line_size
+ completions = []
+ for i in range(insertion_threshold - 1):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(cache_lines.line_size)
+ io = core.new_io(
+ cache.get_default_queue(),
+ last_core_line,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ for c in completions:
+ c.wait()
+
+ stats = cache.get_stats()
+ threshold_reached_occupancy = stats["usage"]["occupancy"]["value"]
+ assert threshold_reached_occupancy == filled_occupancy, (
+ "No insertion should occur while NHIT is triggered and core line ",
+ "didn't reach INSERTION_THRESHOLD",
+ )
+
+ # Step 5
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(cache_lines.line_size)
+ io = core.new_io(
+ cache.get_default_queue(), last_core_line, write_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ comp.wait()
+
+ assert (
+ threshold_reached_occupancy
+ == cache.get_stats()["usage"]["occupancy"]["value"] - 1
+ ), "Previous request should be promoted and occupancy should rise"
+
+
+def test_partial_hit_promotion(pyocf_ctx):
+ """
+ Check if NHIT promotion policy doesn't prevent partial hits from getting
+ promoted to cache
+
+ 1. Create core/cache pair with promotion policy ALWAYS
+ 2. Issue one-sector IO to cache to insert partially valid cache line
+ 3. Set NHIT promotion policy with trigger=0 (always triggered) and high
+ insertion threshold
+ 4. Issue a request containing partially valid cache line and next cache line
+ * occupancy should rise - partially hit request should bypass nhit criteria
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Step 2
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(Size.from_sector(1))
+ io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.READ, 0, 0)
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ comp.wait()
+
+ stats = cache.get_stats()
+ cache_lines = stats["conf"]["size"]
+ assert stats["usage"]["occupancy"]["value"] == 1
+
+ # Step 3
+ cache.set_promotion_policy(PromotionPolicy.NHIT)
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0
+ )
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100
+ )
+
+ # Step 4
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(2 * cache_lines.line_size)
+ io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.WRITE, 0, 0)
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+ comp.wait()
+
+ stats = cache.get_stats()
+ assert (
+ stats["usage"]["occupancy"]["value"] == 2
+ ), "Second cache line should be mapped"
diff --git a/src/spdk/ocf/tests/functional/tests/engine/test_wo.py b/src/spdk/ocf/tests/functional/tests/engine/test_wo.py
new file mode 100644
index 000000000..e0cd10fdd
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/test_wo.py
@@ -0,0 +1,213 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int, memmove, cast, c_void_p
+from enum import IntEnum
+from itertools import product
+import random
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size
+from pyocf.types.shared import OcfCompletion
+
+
+def __io(io, queue, address, size, data, direction):
+ io.set_data(data, 0)
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+ return int(completion.results['err'])
+
+
+def _io(new_io, queue, address, size, data, offset, direction):
+ io = new_io(queue, address, size, direction, 0, 0)
+ if direction == IoDir.READ:
+ _data = Data.from_bytes(bytes(size))
+ else:
+ _data = Data.from_bytes(data, offset, size)
+ ret = __io(io, queue, address, size, _data, direction)
+ if not ret and direction == IoDir.READ:
+ memmove(cast(data, c_void_p).value + offset, _data.handle, size)
+ return ret
+
+
+def io_to_core(core, address, size, data, offset, direction):
+ return _io(core.new_core_io, core.cache.get_default_queue(), address, size,
+ data, offset, direction)
+
+
+def io_to_exp_obj(core, address, size, data, offset, direction):
+ return _io(core.new_io, core.cache.get_default_queue(), address, size, data,
+ offset, direction)
+
+
+def sector_to_region(sector, region_start):
+ i = 0
+ while i < len(region_start) - 1 and sector >= region_start[i + 1]:
+ i += 1
+ return i
+
+
+class SectorStatus(IntEnum):
+ DIRTY = 0,
+ CLEAN = 1,
+ INVALID = 2,
+
+
+I = SectorStatus.INVALID
+D = SectorStatus.DIRTY
+C = SectorStatus.CLEAN
+
+# Test reads with 4k cacheline and different combinations of sectors status and
+# IO range. Three consecutive core lines are targeted, with the middle one (no 1)
+# having all sectors status (clean, dirty, invalid) set independently. The other
+# two lines either are fully dirty/clean/invalid or have the single sector
+# neighbouring with middle core line with different status. This gives total of
+# 12 regions with independent state, listed on the diagram below.
+#
+# cache line | CL 0 | CL 1 | CL 2 |
+# sector no |01234567|89ABCDEF|(ctd..) |
+# |........|........|........|
+# region no |00000001|23456789|ABBBBBBB|
+# io start possible | | | |
+# values @START |> >>|>>>>>>>>| |
+# io end possible | | | |
+# values @END | |<<<<<<<<|<< <|
+#
+# Each test iteration is described by region states and IO start/end sectors,
+# giving total of 14 parameters
+#
+# In order to determine data consistency, cache is filled with data so so that:
+# - core sector no @n is filled with @n
+# - if clean, exported object sector no @n is filled with 100 + @n
+# - if dirty, exported object sector no @n is filled with 200 + @n
+#
+
+
+def test_wo_read_data_consistency(pyocf_ctx):
+ # start sector for each region
+ region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
+ # possible start sectors for test iteration
+ start_sec = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
+ # possible end sectors for test iteration
+ end_sec = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23]
+
+ CACHELINE_COUNT = 3
+ CACHELINE_SIZE = 4096
+ SECTOR_SIZE = Size.from_sector(1).B
+ CLS = CACHELINE_SIZE // SECTOR_SIZE
+ WORKSET_SIZE = CACHELINE_COUNT * CACHELINE_SIZE
+ WORKSET_OFFSET = 1024 * CACHELINE_SIZE
+ SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
+ ITRATION_COUNT = 200
+
+ # fixed test cases
+ fixed_combinations = [
+ [I, I, D, D, D, D, D, D, D, D, I, I],
+ [I, I, C, C, C, C, C, C, C, C, I, I],
+ [I, I, D, D, D, I, D, D, D, D, I, I],
+ [I, I, D, D, D, I, I, D, D, D, I, I],
+ [I, I, I, I, D, I, I, D, C, D, I, I],
+ [I, D, D, D, D, D, D, D, D, D, D, I],
+ [C, C, I, D, D, I, D, D, D, D, D, I],
+ [D, D, D, D, D, D, D, D, D, D, D, I],
+ ]
+
+ data = {}
+ # memset n-th sector of core data with n
+ data[SectorStatus.INVALID] = bytes([x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+ # memset n-th sector of clean data with n + 100
+ data[SectorStatus.CLEAN] = bytes([100 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+ # memset n-th sector of dirty data with n + 200
+ data[SectorStatus.DIRTY] = bytes([200 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+
+ result_b = bytes(WORKSET_SIZE)
+
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ insert_order = [x for x in range(CACHELINE_COUNT)]
+
+ # generate regions status combinations and shuffle it
+ combinations = []
+ state_combinations = product(SectorStatus, repeat=len(region_start))
+ for S in state_combinations:
+ combinations.append(S)
+ random.shuffle(combinations)
+
+ # add fixed test cases at the beginning
+ combinations = fixed_combinations + combinations
+
+ for S in combinations[:ITRATION_COUNT]:
+ # write data to core and invalidate all CL
+ cache.change_cache_mode(cache_mode=CacheMode.PT)
+ io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]),
+ data[SectorStatus.INVALID], 0, IoDir.WRITE)
+
+ # randomize cacheline insertion order to exercise different
+ # paths with regard to cache I/O physical addresses continuousness
+ random.shuffle(insert_order)
+ sectors = [insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)]
+
+ # insert clean sectors - iterate over cachelines in @insert_order order
+ cache.change_cache_mode(cache_mode=CacheMode.WT)
+ for sec in sectors:
+ region = sector_to_region(sec, region_start)
+ if S[region] != SectorStatus.INVALID:
+ io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
+ data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE)
+
+ # write dirty sectors
+ cache.change_cache_mode(cache_mode=CacheMode.WO)
+ for sec in sectors:
+ region = sector_to_region(sec, region_start)
+ if S[region] == SectorStatus.DIRTY:
+ io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
+ data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE)
+
+ core_device.reset_stats()
+
+ for s in start_sec:
+ for e in end_sec:
+ if s > e:
+ continue
+
+ # issue WO read
+ START = s * SECTOR_SIZE
+ END = e * SECTOR_SIZE
+ size = (e - s + 1) * SECTOR_SIZE
+ assert 0 == io_to_exp_obj(
+ core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ
+ ), "error reading in WO mode: S={}, start={}, end={}, insert_order={}".format(
+ S, s, e, insert_order
+ )
+
+ # verify read data
+ for sec in range(s, e + 1):
+ # just check the first byte of sector
+ region = sector_to_region(sec, region_start)
+ check_byte = sec * SECTOR_SIZE
+ assert (
+ result_b[check_byte] == data[S[region]][check_byte]
+ ), "unexpected data in sector {}, S={}, s={}, e={}, insert_order={}\n".format(
+ sec, S, s, e, insert_order
+ )
+
+ # WO is not supposed to clean dirty data
+ assert (
+ core_device.get_stats()[IoDir.WRITE] == 0
+ ), "unexpected write to core device, S={}, s={}, e={}, insert_order = {}\n".format(
+ S, s, e, insert_order
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/eviction/__init__.py b/src/spdk/ocf/tests/functional/tests/eviction/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/eviction/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py b/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py
new file mode 100644
index 000000000..d17bbdb55
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py
@@ -0,0 +1,80 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import c_int
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion, CacheLineSize, SeqCutOffPolicy
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO])
+@pytest.mark.xfail # TODO: remove when fixed
+def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Test if eviction does not occur when IO greater than cache size is submitted.
+ """
+ cache_device = Volume(Size.from_MiB(20)) # this gives about 1.375 MiB actual caching space
+
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode,
+ cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+ cache.add_core(core_exported)
+ cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
+
+ valid_io_size = Size.from_KiB(512)
+ test_data = Data(valid_io_size)
+ send_io(core_exported, test_data)
+
+ stats = core_exported.cache.get_stats()
+ assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
+ "Occupancy after first IO"
+ prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]
+
+ # Anything below 5 MiB is a valid size (less than core device size)
+ # Writing over 1.375 MiB in this case should go directly to core and shouldn't trigger eviction
+ io_size_bigger_than_cache = Size.from_MiB(2)
+ test_data = Data(io_size_bigger_than_cache)
+ send_io(core_exported, test_data)
+
+ stats = core_exported.cache.get_stats()
+
+ # Writes from IO greater than cache size should go directly to core
+ # Writes to core should equal the following:
+ # Previous writes to core + size written + size cleaned (reads from cache)
+ assert stats["block"]["core_volume_wr"]["value"] == \
+ stats["block"]["cache_volume_rd"]["value"] + \
+ prev_writes_to_core + io_size_bigger_than_cache.B / Size.from_KiB(4).B, \
+ "Writes to core after second IO"
+
+ # Occupancy shouldn't change (no eviction)
+ assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
+ "Occupancy after second IO"
+
+
+def send_io(exported_obj: Core, data: Data):
+ io = exported_obj.new_io(
+ exported_obj.cache.get_default_queue(),
+ 0, data.size, IoDir.WRITE, 0, 0
+ )
+
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO to exported object completion"
diff --git a/src/spdk/ocf/tests/functional/tests/flush/__init__.py b/src/spdk/ocf/tests/functional/tests/flush/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/flush/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/management/__init__.py b/src/spdk/ocf/tests/functional/tests/management/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py b/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py
new file mode 100644
index 000000000..2397be753
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py
@@ -0,0 +1,278 @@
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from random import randint
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size as S
+from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Check statistics before adding core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+ # Add core to cache
+ cache.add_core(core)
+
+ # Check statistics after adding core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_removing_core(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core to cache
+ cache.add_core(core)
+
+ # Remove core from cache
+ cache.remove_core(core)
+
+ # Check statistics after removing core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_30add_remove(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add and remove core device in a loop 100 times
+ # Check statistics after every operation
+ for i in range(0, 30):
+ cache.add_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ cache.remove_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_10add_remove_with_io(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add and remove core 10 times in a loop with io in between
+ for i in range(0, 10):
+ cache.add_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ write_data = Data.from_string("Test data")
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, write_data.size,
+ IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ cache.remove_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_add_remove_30core(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+ core_devices = []
+ core_amount = 30
+
+ # Add 50 cores and check stats after each addition
+ for i in range(0, core_amount):
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == i
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices.append(core)
+ cache.add_core(core)
+
+ # Remove 50 cores and check stats before each removal
+ for i in range(0, core_amount):
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - i
+ cache.remove_core(core_devices[i])
+
+ # Check statistics
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_adding_to_random_cache(pyocf_ctx):
+ cache_devices = []
+ core_devices = {}
+ cache_amount = 5
+ core_amount = 30
+
+ # Create 5 cache devices
+ for i in range(0, cache_amount):
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, name=f"cache{i}")
+ cache_devices.append(cache)
+
+ # Create 50 core devices and add to random cache
+ for i in range(0, core_amount):
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices[core] = randint(0, cache_amount - 1)
+ cache_devices[core_devices[core]].add_core(core)
+
+ # Count expected number of cores per cache
+ count_dict = {}
+ for i in range(0, cache_amount):
+ count_dict[i] = sum(k == i for k in core_devices.values())
+
+ # Check if cache statistics are as expected
+ for i in range(0, cache_amount):
+ stats = cache_devices[i].get_stats()
+ assert stats["conf"]["core_count"] == count_dict[i]
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core
+ cache.add_core(core)
+
+ # Check that it is not possible to add the same core again
+ with pytest.raises(OcfError):
+ cache.add_core(core)
+
+ # Check that core count is still equal to one
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
+ # Start first cache device
+ cache_device1 = Volume(S.from_MiB(30))
+ cache1 = Cache.start_on_device(
+ cache_device1, cache_mode=cache_mode, cache_line_size=cls, name="cache1"
+ )
+
+ # Start second cache device
+ cache_device2 = Volume(S.from_MiB(30))
+ cache2 = Cache.start_on_device(
+ cache_device2, cache_mode=cache_mode, cache_line_size=cls, name="cache2"
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core to first cache
+ cache1.add_core(core)
+
+ # Check that it is not possible to add core to second cache
+ with pytest.raises(OcfError):
+ cache2.add_core(core)
+
+ # Check that core count is as expected
+ stats = cache1.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ stats = cache2.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+ core_devices = []
+ core_amount = 5
+
+ # Create 5 core devices and add to cache
+ for i in range(0, core_amount):
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices.append(core)
+ cache.add_core(core)
+
+ # Check that core count is as expected
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount
+
+ # Remove 3 cores
+ cache.remove_core(core_devices[0])
+ cache.remove_core(core_devices[1])
+ cache.remove_core(core_devices[2])
+
+ # Add 2 cores and check if core count is as expected
+ cache.add_core(core_devices[0])
+ cache.add_core(core_devices[1])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - 1
+
+ # Remove 1 core and check if core count is as expected
+ cache.remove_core(core_devices[1])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - 2
+
+ # Add 2 cores and check if core count is as expected
+ cache.add_core(core_devices[1])
+ cache.add_core(core_devices[2])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_change_params.py b/src/spdk/ocf/tests/functional/tests/management/test_change_params.py
new file mode 100644
index 000000000..69b25e436
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_change_params.py
@@ -0,0 +1,135 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode, CleaningPolicy, SeqCutOffPolicy
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from pyocf.types.shared import CacheLineSize
+
+
+@pytest.mark.parametrize("from_cm", CacheMode)
+@pytest.mark.parametrize("to_cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=from_cm, cache_line_size=cls
+ )
+
+ # Change cache mode and check if stats are as expected
+ cache.change_cache_mode(to_cm)
+ stats_after = cache.get_stats()
+ assert stats_after["conf"]["cache_mode"] == to_cm
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_change_cleaning_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Check all possible cleaning policy switches
+ for cp_from in CleaningPolicy:
+ for cp_to in CleaningPolicy:
+ cache.set_cleaning_policy(cp_from.value)
+
+ # Check if cleaning policy is correct
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == cp_from.value
+
+ cache.set_cleaning_policy(cp_to.value)
+
+ # Check if cleaning policy is correct
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == cp_to.value
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Check all possible seq cut off policy switches
+ for seq_from in SeqCutOffPolicy:
+ for seq_to in SeqCutOffPolicy:
+ cache.set_seq_cut_off_policy(seq_from.value)
+
+ # Check if seq cut off policy is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+
+ cache.set_seq_cut_off_policy(seq_to.value)
+
+ # Check if seq cut off policy is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Check all possible seq cut off policy switches for first core
+ for seq_from in SeqCutOffPolicy:
+ for seq_to in SeqCutOffPolicy:
+ core1.set_seq_cut_off_policy(seq_from.value)
+
+ # Check if seq cut off policy of the first core is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+
+ # Check if seq cut off policy of the second core did not change
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
+
+ core1.set_seq_cut_off_policy(seq_to.value)
+
+ # Check if seq cut off policy of the first core is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+
+ # Check if seq cut off policy of the second core did not change
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py b/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py
new file mode 100644
index 000000000..f455ea1e1
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py
@@ -0,0 +1,545 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import c_int, c_void_p, byref, c_uint32
+from random import randrange
+from itertools import count
+
+import pytest
+
+from pyocf.ocf import OcfLib
+from pyocf.types.cache import Cache, CacheMode, MetadataLayout, EvictionPolicy, CleaningPolicy
+from pyocf.types.core import Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize, SeqCutOffPolicy
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+logger = logging.getLogger(__name__)
+
+
+def test_start_check_default(pyocf_ctx):
+ """Test if default values are correct after start.
+ """
+
+ cache_device = Volume(Size.from_MiB(40))
+ core_device = Volume(Size.from_MiB(10))
+ cache = Cache.start_on_device(cache_device)
+
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Check if values are default
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == CleaningPolicy.DEFAULT
+ assert stats["conf"]["cache_mode"] == CacheMode.DEFAULT
+ assert stats["conf"]["cache_line_size"] == CacheLineSize.DEFAULT
+ assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT
+
+ core_stats = core.get_stats()
+ assert core_stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Test starting cache in different modes with different cache line sizes.
+ After start check proper cache mode behaviour, starting with write operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(40))
+ core_device = Volume(Size.from_MiB(10))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+
+ cache.add_core(core_exported)
+
+ logger.info("[STAGE] Initial write to exported object")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("This is test data")
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+ check_stats_write_empty(core_exported, mode, cls)
+
+ logger.info("[STAGE] Read from exported object after initial write")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_after_write(core_exported, mode, cls, True)
+
+ logger.info("[STAGE] Write to exported object after read")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("Changed test data")
+
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+ check_stats_write_after_read(core_exported, mode, cls)
+
+ check_md5_sums(core_exported, mode)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Starting cache in different modes with different cache line sizes.
+ After start check proper cache mode behaviour, starting with read operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+
+ cache.add_core(core_exported)
+
+ logger.info("[STAGE] Initial write to core device")
+ test_data = Data.from_string("This is test data")
+ io_to_core(core_exported, test_data, Size.from_sector(1).B, True)
+
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ logger.info("[STAGE] Initial read from exported object")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_empty(core_exported, mode, cls)
+
+ logger.info("[STAGE] Write to exported object after initial read")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("Changed test data")
+
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+
+ check_stats_write_after_read(core_exported, mode, cls, True)
+
+ logger.info("[STAGE] Read from exported object after write")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_after_write(core_exported, mode, cls)
+
+ check_md5_sums(core_exported, mode)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("layout", MetadataLayout)
+def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: MetadataLayout):
+ """Starting cache with different parameters.
+ Check if cache starts without errors.
+ If possible check whether cache reports properly set parameters.
+ """
+ cache_device = Volume(Size.from_MiB(20))
+ queue_size = randrange(60000, 2**32)
+ unblock_size = randrange(1, queue_size)
+ volatile_metadata = randrange(2) == 1
+ unaligned_io = randrange(2) == 1
+ submit_fast = randrange(2) == 1
+ name = "test"
+
+ logger.info("[STAGE] Start cache")
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=name,
+ metadata_layout=MetadataLayout.SEQUENTIAL,
+ metadata_volatile=volatile_metadata,
+ max_queue_size=queue_size,
+ queue_unblock_size=unblock_size,
+ pt_unaligned_io=unaligned_io,
+ use_submit_fast=submit_fast)
+
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cls, "Cache line size"
+ assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy"
+ assert cache.get_name() == name, "Cache name"
+ # TODO: metadata_layout, metadata_volatile, max_queue_size,
+ # queue_unblock_size, pt_unaligned_io, use_submit_fast
+ # TODO: test in functional tests
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("with_flush", {True, False})
+def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
+ """Stopping cache.
+ Check if cache is stopped properly in different modes with or without preceding flush operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+ cache.add_core(core_exported)
+ cls_no = 10
+
+ run_io_and_cache_data_if_possible(core_exported, mode, cls, cls_no)
+
+ stats = cache.get_stats()
+ assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
+ "Dirty data before MD5"
+
+ md5_exported_core = core_exported.exp_obj_md5()
+
+ if with_flush:
+ cache.flush()
+ cache.stop()
+
+ if mode.lazy_write() and not with_flush:
+ assert core_device.md5() != md5_exported_core, \
+ "MD5 check: core device vs exported object with dirty data"
+ else:
+ assert core_device.md5() == md5_exported_core, \
+ "MD5 check: core device vs exported object with clean data"
+
+
+def test_start_stop_multiple(pyocf_ctx):
+ """Starting/stopping multiple caches.
+ Check whether OCF allows for starting multiple caches and stopping them in random order
+ """
+
+ caches = []
+ caches_no = randrange(6, 11)
+ for i in range(1, caches_no):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{i}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ caches.append(cache)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+
+ caches.sort(key=lambda e: randrange(1000))
+ for cache in caches:
+ logger.info("Getting stats before stopping cache")
+ stats = cache.get_stats()
+ cache_name = stats["conf"]["cache_name"]
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, cache_name) != 0, "Try getting cache after stopping it"
+
+
+def test_100_start_stop(pyocf_ctx):
+ """Starting/stopping stress test.
+ Check OCF behaviour when cache is started and stopped continuously
+ """
+
+ for i in range(1, 101):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{i}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, "cache1") != 0, "Try getting cache after stopping it"
+
+
+def test_start_stop_incrementally(pyocf_ctx):
+ """Starting/stopping multiple caches incrementally.
+ Check whether OCF behaves correctly when few caches at a time are
+ in turns added and removed (#added > #removed) until their number reaches limit,
+ and then proportions are reversed and number of caches gradually falls to 0.
+ """
+
+ counter = count()
+ caches = []
+ caches_limit = 10
+ add = True
+ run = True
+ increase = True
+ while run:
+ if add:
+ for i in range(0, randrange(3, 5) if increase else randrange(1, 3)):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{next(counter)}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ caches.append(cache)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+ if len(caches) == caches_limit:
+ increase = False
+ else:
+ for i in range(0, randrange(1, 3) if increase else randrange(3, 5)):
+ if len(caches) == 0:
+ run = False
+ break
+ cache = caches.pop()
+ logger.info("Getting stats before stopping cache")
+ stats = cache.get_stats()
+ cache_name = stats["conf"]["cache_name"]
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
+ "Try getting cache after stopping it"
+ add = not add
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_same_id(pyocf_ctx, mode, cls):
+ """Adding two caches with the same name
+ Check that OCF does not allow for 2 caches to be started with the same cache_name
+ """
+
+ cache_device1 = Volume(Size.from_MiB(20))
+ cache_device2 = Volume(Size.from_MiB(20))
+ cache_name = "cache"
+ cache = Cache.start_on_device(cache_device1,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=cache_name)
+ cache.get_stats()
+
+ with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
+ cache = Cache.start_on_device(cache_device2,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=cache_name)
+ cache.get_stats()
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
+ """
+ Test whether we can start cache which would overflow ocf_cache_line_t type.
+ pass_criteria:
+ - Starting cache on device too big to handle should fail
+ """
+ class HugeDevice(Volume):
+ def get_length(self):
+ return Size.from_B((cls * c_uint32(-1).value))
+
+ def submit_io(self, io):
+ io.contents._end(io, 0)
+
+ cache_device = HugeDevice(Size.from_MiB(20))
+
+ with pytest.raises(OcfError, match="OCF_ERR_START_CACHE_FAIL"):
+ cache = Cache.start_on_device(cache_device, cache_line_size=cls, metadata_volatile=True)
+
+ assert any(
+ [line.find("exceeds maximum") > 0 for line in pyocf_ctx_log_buffer.get_lines()]
+ ), "Expected to find log notifying that max size was exceeded"
+
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_same_device(pyocf_ctx, mode, cls):
+ """Adding two caches using the same cache device
+ Check that OCF does not allow for 2 caches using the same cache device to be started
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=mode, cache_line_size=cls, name="cache1"
+ )
+ cache.get_stats()
+
+ with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=mode, cache_line_size=cls, name="cache2"
+ )
+ cache.get_stats()
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_too_small_device(pyocf_ctx, mode, cls):
+ """Starting cache with device below 100MiB
+ Check if starting cache with device below minimum size is blocked
+ """
+
+ cache_device = Volume(Size.from_B(20 * 1024 * 1024 - 1))
+
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL_CACHE_DEV"):
+ Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+
+
+def test_start_stop_noqueue(pyocf_ctx):
+ # cache object just to construct cfg conveniently
+ _cache = Cache(pyocf_ctx.ctx_handle)
+
+ cache_handle = c_void_p()
+ status = pyocf_ctx.lib.ocf_mngt_cache_start(
+ pyocf_ctx.ctx_handle, byref(cache_handle), byref(_cache.cfg)
+ )
+ assert not status, "Failed to start cache: {}".format(status)
+
+ # stop without creating mngmt queue
+ c = OcfCompletion(
+ [("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
+ )
+ pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
+ c.wait()
+ assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
+
+
+def run_io_and_cache_data_if_possible(exported_obj, mode, cls, cls_no):
+ test_data = Data(cls_no * cls)
+
+ if mode in {CacheMode.WI, CacheMode.WA}:
+ logger.info("[STAGE] Write to core device")
+ io_to_core(exported_obj, test_data, 0, True)
+ logger.info("[STAGE] Read from exported object")
+ io_from_exported_object(exported_obj, test_data.size, 0)
+ else:
+ logger.info("[STAGE] Write to exported object")
+ io_to_core(exported_obj, test_data, 0)
+
+ stats = exported_obj.cache.get_stats()
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy"
+
+
+def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False):
+ new_io = exported_obj.new_core_io if to_core_device else exported_obj.new_io
+ io = new_io(exported_obj.cache.get_default_queue(), offset, data.size,
+ IoDir.WRITE, 0, 0)
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO to exported object completion"
+
+
+def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
+ read_buffer = Data(buffer_size)
+ io = exported_obj.new_io(exported_obj.cache.get_default_queue(), offset,
+ read_buffer.size, IoDir.READ, 0, 0)
+ io.set_data(read_buffer)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO from exported object completion"
+ return read_buffer
+
+
+def check_stats_read_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
+ stats = exported_obj.cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
+ assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
+ "Read full misses"
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
+
+
+def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
+ stats = exported_obj.cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ # TODO(ajrutkow): why 1 for WT ??
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
+ "Writes to core device"
+ assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
+ "Write full misses"
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0), \
+ "Occupancy"
+
+
+def check_stats_write_after_read(exported_obj: Core,
+ mode: CacheMode,
+ cls: CacheLineSize,
+ read_from_empty=False):
+ stats = exported_obj.cache.get_stats()
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (0 if mode in {CacheMode.WI, CacheMode.PT} else
+ (2 if read_from_empty and mode.lazy_write() else 1)), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
+ "Writes to core device"
+ assert stats["req"]["wr_hits"]["value"] == \
+ (1 if (mode.read_insert() and mode != CacheMode.WI)
+ or (mode.write_insert() and not read_from_empty) else 0), \
+ "Write hits"
+ assert stats["usage"]["occupancy"]["value"] == \
+ (0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
+ "Occupancy"
+
+
+def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
+ stats = exported_obj.cache.get_stats()
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
+ "Writes to cache device"
+ assert exported_obj.cache.device.get_stats()[IoDir.READ] == \
+ (1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
+ or (mode == CacheMode.WA and not write_to_empty) else 0), \
+ "Reads from cache device"
+ assert exported_obj.device.get_stats()[IoDir.READ] == \
+ (0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
+ or (mode == CacheMode.WA and not write_to_empty) else 1), \
+ "Reads from core device"
+ assert stats["req"]["rd_full_misses"]["value"] == \
+ (1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
+ + (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
+ "Read full misses"
+ assert stats["req"]["rd_hits"]["value"] == \
+ (1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
+ or (mode == CacheMode.WA and not write_to_empty) else 0), \
+ "Read hits"
+ assert stats["usage"]["occupancy"]["value"] == \
+ (0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
+
+
+def check_md5_sums(exported_obj: Core, mode: CacheMode):
+ if mode.lazy_write():
+ assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object without flush"
+ exported_obj.cache.flush()
+ assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object after flush"
+ else:
+ assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object"
+
+
+def get_cache_by_name(ctx, cache_name):
+ cache_pointer = c_void_p()
+ return OcfLib.getInstance().ocf_mngt_cache_get_by_name(
+ ctx.ctx_handle, cache_name, byref(cache_pointer)
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/__init__.py b/src/spdk/ocf/tests/functional/tests/security/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/security/conftest.py b/src/spdk/ocf/tests/functional/tests/security/conftest.py
new file mode 100644
index 000000000..7d9ca3bbb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/conftest.py
@@ -0,0 +1,98 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import os
+import sys
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_int
+)
+from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
+
+from pyocf.types.cache import CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
+from pyocf.types.shared import CacheLineSize
+
+import pytest
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+
+def enum_min(enum):
+ return list(enum)[0].value
+
+
+def enum_max(enum):
+ return list(enum)[-1].value
+
+
+def enum_range(enum):
+ return Range(enum_min(enum), enum_max(enum))
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT16))
+def c_uint16_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32))
+def c_uint32_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT64))
+def c_uint64_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
+def c_int_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
+def c_int_sector_randomize(request):
+ return request.param // 512 * 512
+
+
+@pytest.fixture(params=RandomStringGenerator())
+def string_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
+)
+def not_cache_mode_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheLineSize))
+)
+def not_cache_line_size_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(EvictionPolicy))
+)
+def not_eviction_policy_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(PromotionPolicy))
+)
+def not_promotion_policy_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(MetadataLayout))
+)
+def not_metadata_layout_randomize(request):
+ return request.param
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py b/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py
new file mode 100644
index 000000000..4369d49de
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py
@@ -0,0 +1,315 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+
+from pyocf.types.cache import (
+ Cache,
+ CacheMode,
+ CleaningPolicy,
+ AlruParams,
+ AcpParams,
+ PromotionPolicy,
+ NhitParams,
+ ConfValidValues,
+)
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from tests.utils.random import RandomGenerator, DefaultRanges
+from pyocf.types.shared import OcfError, CacheLineSize, SeqCutOffPolicy
+from ctypes import c_uint64, c_uint32, c_uint8
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_change_cache_mode(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache mode to invalid value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change cache mode to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in CacheMode]:
+ continue
+ with pytest.raises(OcfError, match="Error changing cache mode"):
+ cache.change_cache_mode(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_cleaning_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cleaning policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Set cleaning policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in CleaningPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error changing cleaning policy"):
+ cache.set_cleaning_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_attach_cls(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache line size to
+ invalid value while attaching cache device
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache(owner=cache_device.owner, cache_mode=cm, cache_line_size=cls)
+ cache.start_cache()
+
+ # Check whether it is possible to attach cache device with invalid cache line size
+ for i in RandomGenerator(DefaultRanges.UINT64):
+ if i in [item.value for item in CacheLineSize]:
+ continue
+ with pytest.raises(OcfError, match="Attaching cache device failed"):
+ cache.attach_device(cache_device, cache_line_size=i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_cache_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache seq cut-off policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Change cache seq cut off policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in SeqCutOffPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cache seq cut off policy"):
+ cache.set_seq_cut_off_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_core_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change core seq cut-off policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core
+ cache.add_core(core)
+
+ # Change core seq cut off policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in SeqCutOffPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting core seq cut off policy"):
+ core.set_seq_cut_off_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_alru_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for alru cleaning policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change invalid alru param and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in AlruParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cleaning policy param"):
+ cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_acp_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for acp cleaning policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change invalid acp param and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in AcpParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cleaning policy param"):
+ cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_promotion_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change to invalid promotion policy and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in PromotionPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy"):
+ cache.set_promotion_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param id for nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set invalid promotion policy param id and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT8):
+ if i in [item.value for item in NhitParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(PromotionPolicy.NHIT, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param TRIGGER_THRESHOLD for
+ nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set to invalid promotion policy trigger threshold and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in ConfValidValues.promotion_nhit_trigger_threshold_range:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i
+ )
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param_threshold(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param INSERTION_THRESHOLD for
+ nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set to invalid promotion policy insertion threshold and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in ConfValidValues.promotion_nhit_insertion_threshold_range:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, i
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py b/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py
new file mode 100644
index 000000000..ee399a9c3
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py
@@ -0,0 +1,155 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
+from pyocf.types.shared import OcfError, CacheLineSize
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+from tests.utils.random import RandomGenerator, DefaultRanges, Range
+
+logger = logging.getLogger(__name__)
+
+
+def try_start_cache(**config):
+ cache_device = Volume(Size.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, **config)
+ cache.stop()
+
+@pytest.mark.security
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_cache_mode(pyocf_ctx, cls, not_cache_mode_randomize):
+ """
+ Test whether it is impossible to start cache with invalid cache mode value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cls: cache line size value to start cache with
+ :param c_uint32_randomize: cache mode enum value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVALID_CACHE_MODE"):
+ try_start_cache(cache_mode=not_cache_mode_randomize, cache_line_size=cls)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+def test_fuzzy_start_cache_line_size(pyocf_ctx, not_cache_line_size_randomize, cm):
+ """
+ Test whether it is impossible to start cache with invalid cache line size value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint64_randomize: cache line size enum value to start cache with
+ :param cm: cache mode value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVALID_CACHE_LINE_SIZE"):
+ try_start_cache(cache_mode=cm, cache_line_size=not_cache_line_size_randomize)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
+ """
+ Test whether it is possible to start cache with various cache name value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param string_randomize: fuzzed cache name value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ cache_device = Volume(Size.from_MiB(30))
+ incorrect_values = ['']
+ try:
+ cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm,
+ cache_line_size=cls)
+ except OcfError:
+ if string_randomize not in incorrect_values:
+ logger.error(
+ f"Cache did not start properly with correct name value: '{string_randomize}'")
+ return
+ if string_randomize in incorrect_values:
+ logger.error(f"Cache started with incorrect name value: '{string_randomize}'")
+ cache.stop()
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_eviction_policy(pyocf_ctx, not_eviction_policy_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid eviction policy value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: eviction policy enum value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ eviction_policy=not_eviction_policy_randomize,
+ cache_mode=cm,
+ cache_line_size=cls
+ )
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_metadata_layout(pyocf_ctx, not_metadata_layout_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid metadata layout value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: metadata layout enum value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ metadata_layout=not_metadata_layout_randomize,
+ cache_mode=cm,
+ cache_line_size=cls
+ )
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize('max_wb_queue_size', RandomGenerator(DefaultRanges.UINT32, 10))
+def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
+ """
+ Test whether it is impossible to start cache with invalid dependence between max queue size
+ and queue unblock size.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param max_wb_queue_size: max queue size value to start cache with
+ :param c_uint32_randomize: queue unblock size value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ if c_uint32_randomize > max_wb_queue_size:
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ max_queue_size=max_wb_queue_size,
+ queue_unblock_size=c_uint32_randomize,
+ cache_mode=CacheMode.WB,
+ cache_line_size=cls)
+ else:
+ logger.warning(f"Test skipped for valid values: "
+ f"'max_queue_size={max_wb_queue_size}, "
+ f"queue_unblock_size={c_uint32_randomize}'.")
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_promotion_policy(pyocf_ctx, not_promotion_policy_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: promotion policy to start with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=not_promotion_policy_randomize
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py b/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py
new file mode 100644
index 000000000..c580df132
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py
@@ -0,0 +1,205 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int
+from random import randrange
+
+import pytest
+
+from pyocf.types.cache import Cache, Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+
+@pytest.mark.security
+def test_neg_write_too_long_data(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if writing data larger than exported object size is properly blocked
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(1))
+ data = Data(int(Size.from_KiB(c_uint16_randomize)))
+ completion = io_operation(core, data, IoDir.WRITE)
+
+ if c_uint16_randomize > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_read_too_long_data(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if reading data larger than exported object size is properly blocked
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(1))
+ data = Data(int(Size.from_KiB(c_uint16_randomize)))
+ completion = io_operation(core, data, IoDir.READ)
+
+ if c_uint16_randomize > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_write_too_far(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if writing data which would normally fit on exported object is
+ blocked when offset is set so that data goes over exported device end
+ """
+
+ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
+ core = prepare_cache_and_core(Size.from_MiB(4))
+ data = Data(int(Size.from_KiB(limited_size)))
+ completion = io_operation(core, data, IoDir.WRITE, int(Size.from_MiB(3)))
+
+ if limited_size > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_read_too_far(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if reading data which would normally fit on exported object is
+ blocked when offset is set so that data is read beyond exported device end
+ """
+
+ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
+ core = prepare_cache_and_core(Size.from_MiB(4))
+ data = Data(int(Size.from_KiB(limited_size)))
+ completion = io_operation(core, data, IoDir.READ, offset=(Size.from_MiB(3)))
+
+ if limited_size > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_write_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
+ """
+ Check that write operations are blocked when
+ IO offset is located outside of device range
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ completion = io_operation(core, data, IoDir.WRITE, offset=c_int_sector_randomize)
+
+ if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_read_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
+ """
+ Check that read operations are blocked when
+ IO offset is located outside of device range
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ completion = io_operation(core, data, IoDir.READ, offset=c_int_sector_randomize)
+
+ if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
+ """
+ Check that write operations are blocked when
+ IO offset is not aligned
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ if c_int_randomize % 512 != 0:
+ with pytest.raises(Exception, match="Failed to create io!"):
+ core.new_io(core.cache.get_default_queue(), c_int_randomize, data.size,
+ IoDir.WRITE, 0, 0)
+
+
+@pytest.mark.security
+def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize):
+ """
+ Check that write operations are blocked when
+ IO size is not aligned
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_B(c_uint16_randomize)))
+ if c_uint16_randomize % 512 != 0:
+ with pytest.raises(Exception, match="Failed to create io!"):
+ core.new_io(core.cache.get_default_queue(), 0, data.size,
+ IoDir.WRITE, 0, 0)
+
+
+@pytest.mark.security
+def test_neg_io_class(pyocf_ctx, c_int_randomize):
+ """
+ Check that IO operations are blocked when IO class
+ number is not in allowed values {0, ..., 32}
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_MiB(1)))
+ completion = io_operation(core, data, randrange(0, 2), io_class=c_int_randomize)
+
+ if 0 <= c_int_randomize <= 32:
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_io_direction(pyocf_ctx, c_int_randomize):
+ """
+ Check that IO operations are not executed for unknown IO direction,
+ that is when IO direction value is not in allowed values {0, 1}
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_MiB(1)))
+ completion = io_operation(core, data, c_int_randomize)
+
+ if c_int_randomize in [0, 1]:
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(20)):
+ cache_device = Volume(cache_size)
+ core_device = Volume(core_size)
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+ return core
+
+
+def io_operation(core: Core, data: Data, io_direction: int, offset: int = 0, io_class: int = 0):
+ io = core.new_io(core.cache.get_default_queue(), offset, data.size,
+ io_direction, io_class, 0)
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+ return completion
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py b/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py
new file mode 100644
index 000000000..229410864
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py
@@ -0,0 +1,215 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from pyocf.types.data import Data, DataOps
+from pyocf.types.ctx import OcfCtx
+from pyocf.types.logger import DefaultLogger, LogLevel
+from pyocf.ocf import OcfLib
+from pyocf.types.metadata_updater import MetadataUpdater
+from pyocf.types.cleaner import Cleaner
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion
+
+
+class DataCopyTracer(Data):
+ """
+ This class enables tracking whether each copied over Data instance is
+ then securely erased.
+ """
+
+ needs_erase = set()
+ locked_instances = set()
+
+ @staticmethod
+ @DataOps.ALLOC
+ def _alloc(pages):
+ data = DataCopyTracer.pages(pages)
+ Data._ocf_instances_.append(data)
+
+ return data.handle.value
+
+ def mlock(self):
+ DataCopyTracer.locked_instances.add(self)
+ DataCopyTracer.needs_erase.add(self)
+ return super().mlock()
+
+ def munlock(self):
+ if self in DataCopyTracer.needs_erase:
+ assert 0, "Erase should be called first on locked Data!"
+
+ DataCopyTracer.locked_instances.remove(self)
+ return super().munlock()
+
+ def secure_erase(self):
+ DataCopyTracer.needs_erase.remove(self)
+ return super().secure_erase()
+
+ def copy(self, src, end, start, size):
+ DataCopyTracer.needs_erase.add(self)
+ return super().copy(src, end, start, size)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize(
+ "cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI]
+)
+def test_secure_erase_simple_io_read_misses(cache_mode):
+ """
+ Perform simple IO which will trigger read misses, which in turn should
+ trigger backfill. Track all the data locked/copied for backfill and make
+ sure OCF calls secure erase and unlock on them.
+ """
+ ctx = OcfCtx(
+ OcfLib.getInstance(),
+ b"Security tests ctx",
+ DefaultLogger(LogLevel.WARN),
+ DataCopyTracer,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+ ctx.register_volume_type(Volume)
+
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
+
+ core_device = Volume(S.from_MiB(50))
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ write_data = DataCopyTracer(S.from_sector(1))
+ io = core.new_io(
+ cache.get_default_queue(),
+ S.from_sector(1).B,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ cmpls = []
+ for i in range(100):
+ read_data = DataCopyTracer(S.from_sector(1))
+ io = core.new_io(
+ cache.get_default_queue(),
+ i * S.from_sector(1).B,
+ read_data.size,
+ IoDir.READ,
+ 0,
+ 0,
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ cmpls.append(cmpl)
+ io.submit()
+
+ for c in cmpls:
+ c.wait()
+
+ write_data = DataCopyTracer.from_string("TEST DATA" * 100)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ stats = cache.get_stats()
+
+ ctx.exit()
+
+ assert (
+ len(DataCopyTracer.needs_erase) == 0
+ ), "Not all locked Data instances were secure erased!"
+ assert (
+ len(DataCopyTracer.locked_instances) == 0
+ ), "Not all locked Data instances were unlocked!"
+ assert (
+ stats["req"]["rd_partial_misses"]["value"]
+ + stats["req"]["rd_full_misses"]["value"]
+ ) > 0
+
+
+@pytest.mark.security
+def test_secure_erase_simple_io_cleaning():
+ """
+ Perform simple IO which will trigger WB cleaning. Track all the data from
+ cleaner (locked) and make sure they are erased and unlocked after use.
+
+ 1. Start cache in WB mode
+ 2. Write single sector at LBA 0
+ 3. Read whole cache line at LBA 0
+ 4. Assert that 3. triggered cleaning
+ 5. Check if all locked Data copies were erased and unlocked
+ """
+ ctx = OcfCtx(
+ OcfLib.getInstance(),
+ b"Security tests ctx",
+ DefaultLogger(LogLevel.WARN),
+ DataCopyTracer,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+ ctx.register_volume_type(Volume)
+
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)
+
+ core_device = Volume(S.from_MiB(100))
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ read_data = Data(S.from_sector(1).B)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ read_data = Data(S.from_sector(8).B)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ stats = cache.get_stats()
+
+ ctx.exit()
+
+ assert (
+ len(DataCopyTracer.needs_erase) == 0
+ ), "Not all locked Data instances were secure erased!"
+ assert (
+ len(DataCopyTracer.locked_instances) == 0
+ ), "Not all locked Data instances were unlocked!"
+ assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
diff --git a/src/spdk/ocf/tests/functional/tests/utils/random.py b/src/spdk/ocf/tests/functional/tests/utils/random.py
new file mode 100644
index 000000000..27735700f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/utils/random.py
@@ -0,0 +1,95 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import random
+import string
+import enum
+from functools import reduce
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_uint8,
+ c_int,
+ c_uint
+)
+
+
+class Range:
+ def __init__(self, min_val, max_val):
+ self.min = min_val
+ self.max = max_val
+
+ def is_within(self, val):
+ return val >= self.min and val <= self.max
+
+
+class DefaultRanges(Range, enum.Enum):
+ UINT8 = 0, c_uint8(-1).value
+ UINT16 = 0, c_uint16(-1).value
+ UINT32 = 0, c_uint32(-1).value
+ UINT64 = 0, c_uint64(-1).value
+ INT = int(-c_uint(-1).value / 2) - 1, int(c_uint(-1).value / 2)
+
+
+class RandomGenerator:
+ def __init__(self, base_range=DefaultRanges.INT, count=1000):
+ with open("config/random.cfg") as f:
+ self.random = random.Random(int(f.read()))
+ self.exclude = []
+ self.range = base_range
+ self.count = count
+ self.n = 0
+
+ def exclude_range(self, excl_range):
+ self.exclude.append(excl_range)
+ return self
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.n >= self.count:
+ raise StopIteration()
+ self.n += 1
+ while True:
+ val = self.random.randint(self.range.min, self.range.max)
+ if self.exclude:
+ excl_map = map(lambda e: e.is_within(val), self.exclude)
+ is_excluded = reduce(lambda a, b: a or b, excl_map)
+ if is_excluded:
+ continue
+ return val
+
+
+class RandomStringGenerator:
+ def __init__(self, len_range=Range(0, 20), count=700):
+ with open("config/random.cfg") as f:
+ self.random = random.Random(int(f.read()))
+ self.generator = self.__string_generator(len_range)
+ self.count = count
+ self.n = 0
+
+ def __string_generator(self, len_range):
+ while True:
+ for t in [string.digits,
+ string.ascii_letters + string.digits,
+ string.ascii_lowercase,
+ string.ascii_uppercase,
+ string.printable,
+ string.punctuation,
+ string.hexdigits]:
+ yield ''.join(random.choice(t) for _ in range(
+ self.random.randint(len_range.min, len_range.max)
+ ))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.n >= self.count:
+ raise StopIteration()
+ self.n += 1
+ return next(self.generator)
diff --git a/src/spdk/ocf/tests/functional/utils/configure_random.py b/src/spdk/ocf/tests/functional/utils/configure_random.py
new file mode 100755
index 000000000..71a044014
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/utils/configure_random.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import sys
+import random
+
+
+with open("config/random.cfg", "w") as f:
+ f.write(str(random.randint(0, sys.maxsize)))
diff --git a/src/spdk/ocf/tests/unit/framework/.gitignore b/src/spdk/ocf/tests/unit/framework/.gitignore
new file mode 100644
index 000000000..c18dd8d83
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/src/spdk/ocf/tests/unit/framework/README b/src/spdk/ocf/tests/unit/framework/README
new file mode 100644
index 000000000..e3e98e8d0
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/README
@@ -0,0 +1,11 @@
+GENERATING NEW TEST
+ To add new test, run "add_new_test_file.py" with two parameters:
+ - tested file path (path must be relative to your current working dir)
+ - tested function name
+ Generated file name may be changed without any consequences.
+
+ Good practise is to use "add_new_test_file.py" script from test directory (not framework),
+ because it prepend appropriate license header.
+
+RUNNING SINGLE TEST
+ Executable tests files are stored by default in 'UT_dir/build/sources_to_test_repository/'
diff --git a/src/spdk/ocf/tests/unit/framework/add_new_test_file.py b/src/spdk/ocf/tests/unit/framework/add_new_test_file.py
new file mode 100755
index 000000000..20c46d125
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/add_new_test_file.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import tests_config
+import re
+import os
+import sys
+import textwrap
+
+
+class TestGenerator(object):
+ main_UT_dir = ""
+ main_tested_dir = ""
+ tested_file_path = ""
+ tested_function_name = ""
+
+ def __init__(self, main_UT_dir, main_tested_dir, file_path, func_name):
+ self.set_main_UT_dir(main_UT_dir)
+ self.set_main_tested_dir(main_tested_dir)
+ self.set_tested_file_path(file_path)
+ self.tested_function_name = func_name
+
+ def create_empty_test_file(self):
+ dst_dir = os.path.dirname(self.get_tested_file_path()[::-1])[::-1]
+
+ self.create_dir_if_not_exist(self.get_main_UT_dir() + dst_dir)
+ test_file_name = os.path.basename(self.get_tested_file_path())
+
+ dst_path = self.get_main_UT_dir() + dst_dir + "/" + test_file_name
+
+ no_str = ""
+ no = 0
+ while True:
+ if not os.path.isfile("{0}{1}.{2}".format(dst_path.rsplit(".", 1)[0], no_str,
+ dst_path.rsplit(".", 1)[1])):
+ break
+ no += 1
+ no_str = str(no)
+
+ dst_path = dst_path.rsplit(".", 1)[0] + no_str + "." + dst_path.rsplit(".", 1)[1]
+ buf = self.get_markups()
+ buf += "#undef static\n\n"
+ buf += "#undef inline\n\n"
+ buf += self.get_UT_includes()
+ buf += self.get_includes(self.get_main_tested_dir() + self.get_tested_file_path())
+ buf += self.get_autowrap_file_include(dst_path)
+ buf += self.get_empty_test_function()
+ buf += self.get_test_main()
+
+ with open(dst_path, "w") as f:
+ f.writelines(buf)
+
+ print(f"{dst_path} generated successfully!")
+
+ def get_markups(self):
+ ret = "/*\n"
+ ret += " * <tested_file_path>" + self.get_tested_file_path() + "</tested_file_path>\n"
+ ret += " * <tested_function>" + self.get_tested_function_name() + "</tested_function>\n"
+ ret += " * <functions_to_leave>\n"
+ ret += " *\tINSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE\n"
+ ret += " *\tONE FUNCTION PER LINE\n"
+ ret += " * </functions_to_leave>\n"
+ ret += " */\n\n"
+
+ return ret
+
+ def create_dir_if_not_exist(self, path):
+ if not os.path.isdir(path):
+ try:
+ os.makedirs(path)
+ except Exception:
+ pass
+ return True
+ return None
+
+ def get_UT_includes(self):
+ ret = '''
+ #include <stdarg.h>
+ #include <stddef.h>
+ #include <setjmp.h>
+ #include <cmocka.h>
+ #include "print_desc.h"\n\n'''
+
+ return textwrap.dedent(ret)
+
+ def get_autowrap_file_include(self, test_file_path):
+ autowrap_file = test_file_path.rsplit(".", 1)[0]
+ autowrap_file = autowrap_file.replace(self.main_UT_dir, "")
+ autowrap_file += "_generated_wraps.c"
+ return "#include \"" + autowrap_file + "\"\n\n"
+
+ def get_includes(self, abs_path_to_tested_file):
+ with open(abs_path_to_tested_file, "r") as f:
+ code = f.readlines()
+
+ ret = [line for line in code if re.search(r'#include', line)]
+
+ return "".join(ret) + "\n"
+
+ def get_empty_test_function(self):
+ ret = "static void " + self.get_tested_function_name() + "_test01(void **state)\n"
+ ret += "{\n"
+ ret += "\tprint_test_description(\"Put test description here\\n\");\n"
+ ret += "\tassert_int_equal(1,1);\n"
+ ret += "}\n\n"
+
+ return ret
+
+ def get_test_main(self):
+ ret = "int main(void)\n"
+ ret += "{\n"
+ ret += "\tconst struct CMUnitTest tests[] = {\n"
+ ret += "\t\tcmocka_unit_test(" + self.get_tested_function_name() + "_test01)\n"
+ ret += "\t};\n\n"
+ ret += "\tprint_message(\"Unit test for " + self.get_tested_function_name() + "\\n\");\n\n"
+ ret += "\treturn cmocka_run_group_tests(tests, NULL, NULL);\n"
+ ret += "}"
+
+ return ret
+
+ def set_tested_file_path(self, path):
+ call_dir = os.getcwd() + os.sep
+ p = os.path.normpath(call_dir + path)
+
+ if os.path.isfile(p):
+ self.tested_file_path = p.split(self.get_main_tested_dir(), 1)[1]
+ return
+ elif os.path.isfile(self.get_main_tested_dir() + path):
+ self.tested_file_path = path
+ return
+
+ print(f"{os.path.join(self.get_main_tested_dir(), path)}")
+ print("Given path not exists!")
+ exit(1)
+
+ def set_main_UT_dir(self, path):
+ p = os.path.dirname(os.path.realpath(__file__)) + os.sep + path
+ p = os.path.normpath(os.path.dirname(p)) + os.sep
+ self.main_UT_dir = p
+
+ def get_main_UT_dir(self):
+ return self.main_UT_dir
+
+ def set_main_tested_dir(self, path):
+ p = os.path.dirname(os.path.realpath(__file__)) + os.sep + path
+ p = os.path.normpath(os.path.dirname(p)) + os.sep
+ self.main_tested_dir = p
+
+ def get_main_tested_dir(self):
+ return self.main_tested_dir
+
+ def get_tested_file_path(self):
+ return self.tested_file_path
+
+ def get_tested_function_name(self):
+ return self.tested_function_name
+
+
+def __main__():
+ if len(sys.argv) < 3:
+ print("No path to tested file or tested function name given !")
+ sys.exit(1)
+
+ tested_file_path = sys.argv[1]
+ tested_function_name = sys.argv[2]
+
+ generator = TestGenerator(tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS,
+ tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT,
+ tested_file_path, tested_function_name)
+
+ generator.create_empty_test_file()
+
+
+if __name__ == "__main__":
+ __main__()
diff --git a/src/spdk/ocf/tests/unit/framework/prepare_sources_for_testing.py b/src/spdk/ocf/tests/unit/framework/prepare_sources_for_testing.py
new file mode 100755
index 000000000..03b49218a
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/prepare_sources_for_testing.py
@@ -0,0 +1,730 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import shutil
+import sys
+import re
+import os.path
+import subprocess
+import tests_config
+
+
+def run_command(args, verbose=True):
+ result = subprocess.run(" ".join(args), shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ result.stdout = result.stdout.decode("ASCII", errors='ignore')
+ result.stderr = result.stderr.decode("ASCII", errors='ignore')
+ if verbose:
+ print(result.stderr)
+ return result
+
+
+#
+# This script purpose is to remove unused functions definitions
+# It is giving the opportunity to unit test all functions from OCF.
+# As a parameter should be given path to file containing function,
+# which is target of testing. However that file has to be after
+# preprocessing.
+#
+# Output file of this script is not ready to make it. Before that,
+# has to be given definitions of functions, which are used by
+# tested function.
+#
+# In brief: this script allow wrapping all function calls in UT
+#
+
+class UnitTestsSourcesGenerator(object):
+ script_file_abs_path = ""
+ script_dir_abs_path = ""
+
+ main_UT_dir = ""
+ main_env_dir = ""
+ main_tested_dir = ""
+
+ ctags_path = ""
+
+ test_catalogues_list = []
+ dirs_to_include_list = []
+
+ tests_internal_includes_list = []
+ framework_includes = []
+
+ dirs_with_tests_list = []
+ test_files_paths_list = []
+
+ tested_files_paths_list = []
+
+ includes_to_copy_dict = {}
+
+ preprocessing_repo = ""
+ sources_to_test_repo = ""
+
+ def __init__(self):
+ self.script_file_abs_path = os.path.realpath(__file__)
+ self.script_dir_abs_path = os.path.normpath(
+ os.path.dirname(self.script_file_abs_path) + os.sep)
+
+ self.set_ctags_path()
+
+ self.set_main_UT_dir()
+ self.set_main_env_dir()
+ self.set_main_tested_dir()
+
+ self.test_catalogues_list = tests_config.DIRECTORIES_WITH_TESTS_LIST
+ self.set_includes_to_copy_dict(tests_config.INCLUDES_TO_COPY_DICT)
+ self.set_dirs_to_include()
+
+ self.set_tests_internal_includes_list()
+ self.set_framework_includes()
+ self.set_files_with_tests_list()
+ self.set_tested_files_paths_list()
+
+ self.set_preprocessing_repo()
+ self.set_sources_to_test_repo()
+
+ def preprocessing(self):
+ tested_files_list = self.get_tested_files_paths_list()
+ project_includes = self.get_dirs_to_include_list()
+ framework_includes = self.get_tests_internal_includes_list()
+
+ gcc_flags = " -fno-inline -Dstatic= -Dinline= -E "
+ gcc_command_template = "gcc "
+ for path in project_includes:
+ gcc_command_template += " -I " + path + " "
+
+ for path in framework_includes:
+ gcc_command_template += " -I " + path
+
+ gcc_command_template += gcc_flags
+
+ for path in tested_files_list:
+ preprocessing_dst = self.get_preprocessing_repo() \
+ + self.get_relative_path(path, self.get_main_tested_dir())
+ preprocessing_dst_dir = os.path.dirname(preprocessing_dst)
+ self.create_dir_if_not_exist(preprocessing_dst_dir)
+
+ gcc_command = gcc_command_template + path + " > " + preprocessing_dst
+
+ result = run_command([gcc_command])
+
+ if result.returncode != 0:
+ print(f"Generating preprocessing for {self.get_main_tested_dir() + path} failed!")
+ print(result.output)
+ run_command(["rm", "-f", preprocessing_dst])
+ continue
+
+ self.remove_hashes(preprocessing_dst)
+
+ print(f"Preprocessed file {path} saved to {preprocessing_dst}")
+
+ def copy_includes(self):
+ includes_dict = self.get_includes_to_copy_dict()
+
+ for dst, src in includes_dict.items():
+ src_path = os.path.normpath(self.get_main_tested_dir() + src)
+ if not os.path.isdir(src_path):
+ print(f"Directory {src_path} given to include does not exists!")
+ continue
+ dst_path = os.path.normpath(self.get_main_UT_dir() + dst)
+
+ shutil.rmtree(dst_path, ignore_errors=True)
+ shutil.copytree(src_path, dst_path)
+
+ def get_user_wraps(self, path):
+ functions_list = self.get_functions_list(path)
+ functions_list = [re.sub(r'__wrap_([\S]+)\s*[\d]+', r'\1', line)
+ for line in functions_list if re.search("__wrap_", line)]
+
+ return functions_list
+
+ def get_autowrap_file_path(self, test_file_path):
+ wrap_file_path = test_file_path.rsplit('.', 1)[0]
+ wrap_file_path = wrap_file_path + "_generated_wraps.c"
+ return wrap_file_path
+
+ def prepare_autowraps(self, test_file_path, preprocessed_file_path):
+ functions_to_wrap = self.get_functions_calls(
+ self.get_sources_to_test_repo() + test_file_path)
+ user_wraps = set(self.get_user_wraps(self.get_main_UT_dir() + test_file_path))
+
+ functions_to_wrap = functions_to_wrap - user_wraps
+
+ tags_list = self.get_functions_list(preprocessed_file_path, prototypes=True)
+
+ wrap_list = []
+
+ with open(preprocessed_file_path) as f:
+ code = f.readlines()
+ for function in functions_to_wrap:
+ if function.startswith("env_") or function.startswith("bug") \
+ or function.startswith("memcpy"):
+ # added memcpy function to list of ignored functions
+ # because this is macro
+ continue
+ for tag in tags_list:
+ if function in tag:
+ name, line = tag.split()
+ if name == function:
+ line = int(line)
+ wrap_list.append(self.get_function_wrap(code, line))
+ break
+
+ wrap_file_path = self.get_main_UT_dir() + self.get_autowrap_file_path(test_file_path)
+
+ with open(wrap_file_path, "w") as f:
+ f.write("/* This file is generated by UT framework */\n")
+ for wrap in wrap_list:
+ f.write(wrap + "\n")
+
+ def prepare_sources_for_testing(self):
+ test_files_paths = self.get_files_with_tests_list()
+
+ for test_path in test_files_paths:
+ path = self.get_tested_file_path(self.get_main_UT_dir() + test_path)
+
+ preprocessed_tested_path = self.get_preprocessing_repo() + path
+ if not os.path.isfile(preprocessed_tested_path):
+ print(f"No preprocessed path for {test_path} test file.")
+ continue
+
+ tested_src = self.get_src_to_test(test_path, preprocessed_tested_path)
+
+ self.create_dir_if_not_exist(
+ self.get_sources_to_test_repo() + os.path.dirname(test_path))
+
+ with open(self.get_sources_to_test_repo() + test_path, "w") as f:
+ f.writelines(tested_src)
+ print(
+ f"Sources for {test_path} saved in + \
+ {self.get_sources_to_test_repo() + test_path}")
+
+ self.prepare_autowraps(test_path, preprocessed_tested_path)
+
+ def create_main_cmake_lists(self):
+ buf = "cmake_minimum_required(VERSION 2.6.0)\n\n"
+ buf += "project(OCF_unit_tests C)\n\n"
+
+ buf += "enable_testing()\n\n"
+
+ buf += "include_directories(\n"
+ dirs_to_inc = self.get_dirs_to_include_list() + self.get_framework_includes() \
+ + self.get_tests_internal_includes_list()
+ for path in dirs_to_inc:
+ buf += "\t" + path + "\n"
+ buf += ")\n\n"
+
+ includes = self.get_tests_internal_includes_list()
+ for path in includes:
+ buf += "\nadd_subdirectory(" + path + ")"
+ buf += "\n\n"
+
+ test_files = self.get_files_with_tests_list()
+ test_dirs_to_include = [os.path.dirname(path) for path in test_files]
+
+ test_dirs_to_include = self.remove_duplicates_from_list(test_dirs_to_include)
+
+ for path in test_dirs_to_include:
+ buf += "\nadd_subdirectory(" + self.get_sources_to_test_repo() + path + ")"
+
+ with open(self.get_main_UT_dir() + "CMakeLists.txt", "w") as f:
+ f.writelines(buf)
+
+ print(f"Main CMakeLists.txt generated written to {self.get_main_UT_dir()} CMakeLists.txt")
+
+ def generate_cmakes_for_tests(self):
+ test_files_paths = self.get_files_with_tests_list()
+
+ for test_path in test_files_paths:
+ tested_file_path = self.get_sources_to_test_repo() + test_path
+ if not os.path.isfile(tested_file_path):
+ print(f"No source to test for {test_path} test")
+ continue
+
+ test_file_path = self.get_main_UT_dir() + test_path
+
+ cmake_buf = self.generate_test_cmake_buf(test_file_path, tested_file_path)
+
+ cmake_path = self.get_sources_to_test_repo() + test_path
+ cmake_path = os.path.splitext(cmake_path)[0] + ".cmake"
+ with open(cmake_path, "w") as f:
+ f.writelines(cmake_buf)
+ print(f"cmake file for {test_path} written to {cmake_path}")
+
+ cmake_lists_path = os.path.dirname(cmake_path) + os.sep
+ self.update_cmakelists(cmake_lists_path, cmake_path)
+
+ def generate_test_cmake_buf(self, test_file_path, tested_file_path):
+ test_file_name = os.path.basename(test_file_path)
+ target_name = os.path.splitext(test_file_name)[0]
+
+ add_executable = "add_executable(" + target_name + " " + test_file_path + " " + \
+ tested_file_path + ")\n"
+
+ libraries = "target_link_libraries(" + target_name + " libcmocka.so ocf_env)\n"
+
+ add_test = "add_test(" + target_name + " ${CMAKE_CURRENT_BINARY_DIR}/" + target_name + ")\n"
+
+ tgt_properties = "set_target_properties(" + target_name + "\n" + \
+ "PROPERTIES\n" + \
+ "COMPILE_FLAGS \"-fno-inline -Dstatic= -Dinline= -w \"\n"
+
+ link_flags = self.generate_cmake_link_flags(test_file_path)
+ tgt_properties += link_flags + ")"
+
+ buf = add_executable + libraries + add_test + tgt_properties
+
+ return buf
+
+ def generate_cmake_link_flags(self, path):
+ ret = ""
+
+ autowraps_path = self.get_autowrap_file_path(path)
+ functions_to_wrap = self.get_functions_to_wrap(path)
+ functions_to_wrap += self.get_functions_to_wrap(autowraps_path)
+
+ for function_name in functions_to_wrap:
+ ret += ",--wrap=" + function_name
+ if len(ret) > 0:
+ ret = "LINK_FLAGS \"-Wl" + ret + "\"\n"
+
+ return ret
+
+ def update_cmakelists(self, cmake_lists_path, cmake_name):
+ with open(cmake_lists_path + "CMakeLists.txt", "a+") as f:
+ f.seek(0, os.SEEK_SET)
+ new_line = "include(" + os.path.basename(cmake_name) + ")\n"
+
+ if new_line not in f.read():
+ f.write(new_line)
+
+ def get_functions_to_wrap(self, path):
+ functions_list = self.get_functions_list(path)
+ functions_list = [re.sub(r'__wrap_([\S]+)\s*[\d]+', r'\1', line) for line in functions_list
+ if re.search("__wrap_", line)]
+
+ return functions_list
+
+ def get_functions_to_leave(self, path):
+ with open(path) as f:
+ lines = f.readlines()
+ buf = ''.join(lines)
+
+ tags_pattern = re.compile(r"<functions_to_leave>[\s\S]*</functions_to_leave>")
+
+ buf = re.findall(tags_pattern, buf)
+ if not len(buf) > 0:
+ return []
+
+ buf = buf[0]
+
+ buf = re.sub(r'<.*>', '', buf)
+ buf = re.sub(r'[^a-zA-Z0-9_\n]+', '', buf)
+
+ ret = buf.split("\n")
+ ret = [name for name in ret if name]
+ return ret
+
+ def get_functions_list(self, file_path, prototypes=None):
+ ctags_path = self.get_ctags_path()
+
+ ctags_args = "--c-types=f"
+ if prototypes:
+ ctags_args += " --c-kinds=+p"
+ # find all functions' definitions | put tabs instead of spaces |
+ # take only columns with function name and line number | sort in descending order
+ result = run_command([ctags_path, "-x", ctags_args, file_path,
+ "--language-force=c | sed \"s/ \\+/\t/g\" | cut -f 1,3 | sort -nsr "
+ "-k 2"])
+
+ # 'output' is string, but it has to be changed to list
+ output = list(filter(None, result.stdout.split("\n")))
+ return output
+
+ def remove_functions_from_list(self, functions_list, to_remove_list):
+ ret = functions_list[:]
+ for function_name in to_remove_list:
+ ret = [line for line in ret if not re.search(r'\b%s\b' % function_name, line)]
+ return ret
+
+ def get_src_to_test(self, test_path, preprocessed_tested_path):
+ functions_to_leave = self.get_functions_to_leave(self.get_main_UT_dir() + test_path)
+
+ functions_to_leave.append(self.get_tested_function_name(self.get_main_UT_dir() + test_path))
+ functions_list = self.get_functions_list(preprocessed_tested_path)
+
+ functions_list = self.remove_functions_from_list(functions_list, functions_to_leave)
+
+ with open(preprocessed_tested_path) as f:
+ ret = f.readlines()
+ for function in functions_list:
+ line = function.split("\t")[1]
+ line = int(line)
+
+ self.remove_function_body(ret, line)
+
+ return ret
+
+ def set_tested_files_paths_list(self):
+ test_files_list = self.get_files_with_tests_list()
+
+ for f in test_files_list:
+ self.tested_files_paths_list.append(self.get_main_tested_dir()
+ + self.get_tested_file_path(
+ self.get_main_UT_dir() + f))
+
+ self.tested_files_paths_list = self.remove_duplicates_from_list(
+ self.tested_files_paths_list)
+
+ def get_tested_files_paths_list(self):
+ return self.tested_files_paths_list
+
+ def get_files_with_tests_list(self):
+ return self.test_files_paths_list
+
+ def set_files_with_tests_list(self):
+ test_catalogues_list = self.get_tests_catalouges_list()
+ for catalogue in test_catalogues_list:
+ dir_with_tests_path = self.get_main_UT_dir() + catalogue
+
+ for path, dirs, files in os.walk(dir_with_tests_path):
+ test_files = self.get_test_files_from_dir(path + os.sep)
+
+ for test_file_name in test_files:
+ test_rel_path = os.path.relpath(path + os.sep + test_file_name,
+ self.get_main_UT_dir())
+ self.test_files_paths_list.append(test_rel_path)
+
+ def are_markups_valid(self, path):
+ file_path = self.get_tested_file_path(path)
+ function_name = self.get_tested_function_name(path)
+
+ if file_path is None:
+ print(f"{path} file has no tested_file tag!")
+ return None
+ elif not os.path.isfile(self.get_main_tested_dir() + file_path):
+ print(f"Tested file given in {path} does not exist!")
+ return None
+
+ if function_name is None:
+ print(f"{path} file has no tested_function_name tag!")
+ return None
+
+ return True
+
+ def create_dir_if_not_exist(self, path):
+ if not os.path.isdir(path):
+ try:
+ os.makedirs(path)
+ except Exception:
+ pass
+ return True
+ return None
+
+ def get_tested_file_path(self, test_file_path):
+ with open(test_file_path) as f:
+ buf = f.readlines()
+ buf = ''.join(buf)
+
+ tags_pattern = re.compile(r"<tested_file_path>[\s\S]*</tested_file_path>")
+ buf = re.findall(tags_pattern, buf)
+
+ if not len(buf) > 0:
+ return None
+
+ buf = buf[0]
+
+ buf = re.sub(r'<[^>]*>', '', buf)
+ buf = re.sub(r'\s+', '', buf)
+
+ if len(buf) > 0:
+ return buf
+
+ return None
+
+ def get_tested_function_name(self, test_file_path):
+ with open(test_file_path) as f:
+ buf = f.readlines()
+ buf = ''.join(buf)
+
+ tags_pattern = re.compile(r"<tested_function>[\s\S]*</tested_function>")
+ buf = re.findall(tags_pattern, buf)
+
+ if not len(buf) > 0:
+ return None
+
+ buf = buf[0]
+
+ buf = re.sub(r'<[^>]*>', '', buf)
+ buf = re.sub('//', '', buf)
+ buf = re.sub(r'\s+', '', buf)
+
+ if len(buf) > 0:
+ return buf
+
+ return None
+
+ def get_test_files_from_dir(self, path):
+ ret = os.listdir(path)
+ ret = [name for name in ret if os.path.isfile(path + os.sep + name)
+ and (name.endswith(".c") or name.endswith(".h"))]
+ ret = [name for name in ret if self.are_markups_valid(path + name)]
+
+ return ret
+
+ def get_list_of_directories(self, path):
+ if not os.path.isdir(path):
+ return []
+
+ ret = os.listdir(path)
+ ret = [name for name in ret if not os.path.isfile(path + os.sep + name)]
+ ret = [os.path.normpath(name) + os.sep for name in ret]
+
+ return ret
+
+ def remove_hashes(self, path):
+ with open(path) as f:
+ buf = f.readlines()
+
+ buf = [l for l in buf if not re.search(r'.*#.*', l)]
+
+ with open(path, "w") as f:
+ f.writelines(buf)
+
+ return
+ for i in range(len(padding)):
+ try:
+ padding[i] = padding[i].split("#")[0]
+ except ValueError:
+ continue
+
+ f = open(path, "w")
+ f.writelines(padding)
+ f.close()
+
+ def find_function_end(self, code_lines_list, first_line_of_function_index):
+ brackets_counter = 0
+ current_line_index = first_line_of_function_index
+
+ while True:
+ if "{" in code_lines_list[current_line_index]:
+ brackets_counter += code_lines_list[current_line_index].count("{")
+ brackets_counter -= code_lines_list[current_line_index].count("}")
+ break
+ else:
+ current_line_index += 1
+
+ while brackets_counter > 0:
+ current_line_index += 1
+ if "{" in code_lines_list[current_line_index]:
+ brackets_counter += code_lines_list[current_line_index].count("{")
+ brackets_counter -= code_lines_list[current_line_index].count("}")
+ elif "}" in code_lines_list[current_line_index]:
+ brackets_counter -= code_lines_list[current_line_index].count("}")
+
+ return current_line_index
+
+ def get_functions_calls(self, file_to_compile):
+ out_dir = "/tmp/ocf_ut"
+ out_file = out_dir + "/ocf_obj.o"
+ self.create_dir_if_not_exist(out_dir)
+ cmd = "/usr/bin/gcc -o " + out_file + " -c " + file_to_compile + " &> /dev/null"
+ run_command([cmd], verbose=None)
+ result = run_command(["/usr/bin/nm -u " + out_file + " | cut -f2 -d\'U\'"])
+ return set(result.stdout.split())
+
+ def remove_function_body(self, code_lines_list, line_id):
+ try:
+ while "{" not in code_lines_list[line_id]:
+ if ";" in code_lines_list[line_id]:
+ return
+ line_id += 1
+ except IndexError:
+ return
+
+ last_line_id = self.find_function_end(code_lines_list, line_id)
+
+ code_lines_list[line_id] = code_lines_list[line_id].split("{")[0]
+ code_lines_list[line_id] += ";"
+
+ del code_lines_list[line_id + 1: last_line_id + 1]
+
+ def get_function_wrap(self, code_lines_list, line_id):
+ ret = []
+ # Line numbering starts with one, list indexing with zero
+ line_id -= 1
+
+ # If returned type is not present, it should be in line above
+ try:
+ code_lines_list[line_id].split("(")[0].rsplit()[1]
+ except IndexError:
+ line_id -= 1
+
+ while True:
+ ret.append(code_lines_list[line_id])
+ if ")" in code_lines_list[line_id]:
+ break
+ line_id += 1
+
+ # Tags list contains both prototypes and definitions, here we recoginze
+ # with which one we deals
+ delimiter = ""
+ try:
+ if "{" in ret[-1] or "{" in ret[-2]:
+ delimter = "{"
+ else:
+ delimiter = ";"
+ except IndexError:
+ delimiter = ";"
+
+ ret[-1] = ret[-1].split(delimiter)[0]
+ ret[-1] += "{}"
+
+ function_name = ""
+ line_with_name = 0
+ try:
+ function_name = ret[line_with_name].split("(")[0].rsplit(maxsplit=1)[1]
+ except IndexError:
+ line_with_name = 1
+ function_name = ret[line_with_name].split("(")[0]
+
+ function_new_name = "__wrap_" + function_name.replace("*", "")
+ ret[0] = ret[0].replace(function_name, function_new_name)
+
+ return ''.join(ret)
+
+ def set_ctags_path(self):
+ result = run_command(["/usr/bin/ctags --version &> /dev/null"])
+ if result.returncode == 0:
+ path = "/usr/bin/ctags "
+ result = run_command([path, "--c-types=f"], verbose=None)
+ if not re.search("unrecognized option", result.stdout, re.IGNORECASE):
+ self.ctags_path = path
+ return
+
+ result = run_command(["/usr/local/bin/ctags --version &> /dev/null"])
+ if result.returncode == 0:
+ path = "/usr/local/bin/ctags "
+ result = run_command(["path", "--c-types=f"], verbose=None)
+ if not re.search("unrecognized option", result.stdout, re.IGNORECASE):
+ self.ctags_path = path
+ return
+
+ print("ERROR: Current ctags version don't support \"--c-types=f\" parameter!")
+ exit(1)
+
+ def get_ctags_path(self):
+ return self.ctags_path
+
+ def get_tests_catalouges_list(self):
+ return self.test_catalogues_list
+
+ def get_relative_path(self, original_path, part_to_remove):
+ return original_path.split(part_to_remove, 1)[1]
+
+ def get_dirs_to_include_list(self):
+ return self.dirs_to_include_list
+
+ def set_dirs_to_include(self):
+ self.dirs_to_include_list = [self.get_main_tested_dir() + name
+ for name in
+ tests_config.DIRECTORIES_TO_INCLUDE_FROM_PROJECT_LIST]
+
+ def set_tests_internal_includes_list(self):
+ self.tests_internal_includes_list = [self.get_main_UT_dir() + name
+ for name in
+ tests_config.DIRECTORIES_TO_INCLUDE_FROM_UT_LIST]
+
+ def set_preprocessing_repo(self):
+ self.preprocessing_repo = self.get_main_UT_dir() \
+ + tests_config.PREPROCESSED_SOURCES_REPOSITORY
+
+ def set_sources_to_test_repo(self):
+ self.sources_to_test_repo = self.get_main_UT_dir() + tests_config.SOURCES_TO_TEST_REPOSITORY
+
+ def get_sources_to_test_repo(self):
+ return self.sources_to_test_repo
+
+ def get_preprocessing_repo(self):
+ return self.preprocessing_repo
+
+ def get_tests_internal_includes_list(self):
+ return self.tests_internal_includes_list
+
+ def get_script_dir_path(self):
+ return os.path.normpath(self.script_dir_abs_path) + os.sep
+
+ def get_main_UT_dir(self):
+ return os.path.normpath(self.main_UT_dir) + os.sep
+
+ def get_main_env_dir(self):
+ return os.path.normpath(self.main_env_dir) + os.sep
+
+ def get_main_tested_dir(self):
+ return os.path.normpath(self.main_tested_dir) + os.sep
+
+ def remove_duplicates_from_list(self, l):
+ return list(set(l))
+
+ def set_framework_includes(self):
+ self.framework_includes = tests_config.FRAMEWORK_DIRECTORIES_TO_INCLUDE_LIST
+
+ def get_framework_includes(self):
+ return self.framework_includes
+
+ def set_includes_to_copy_dict(self, files_to_copy_dict):
+ self.includes_to_copy_dict = files_to_copy_dict
+
+ def get_includes_to_copy_dict(self):
+ return self.includes_to_copy_dict
+
+ def set_main_env_dir(self):
+ main_env_dir = os.path.normpath(os.path.normpath(self.get_script_dir_path()
+ + os.sep
+ + tests_config.
+ MAIN_DIRECTORY_OF_ENV_FILES))
+ if not os.path.isdir(main_env_dir):
+ print("Given path to main env directory is wrong!")
+ sys.exit(1)
+
+ self.main_env_dir = main_env_dir
+
+ def set_main_UT_dir(self):
+ main_UT_dir = os.path.normpath(os.path.normpath(self.get_script_dir_path()
+ + os.sep
+ + tests_config.
+ MAIN_DIRECTORY_OF_UNIT_TESTS))
+ if not os.path.isdir(main_UT_dir):
+ print("Given path to main UT directory is wrong!")
+ sys.exit(1)
+
+ self.main_UT_dir = main_UT_dir
+
+ def set_main_tested_dir(self):
+ main_tested_dir = os.path.normpath(os.path.normpath(self.get_script_dir_path()
+ + os.sep
+ + tests_config.
+ MAIN_DIRECTORY_OF_TESTED_PROJECT))
+ if not os.path.isdir(main_tested_dir):
+ print("Given path to main tested directory is wrong!")
+ sys.exit(1)
+
+ self.main_tested_dir = main_tested_dir
+
+
+def __main__():
+ generator = UnitTestsSourcesGenerator()
+ generator.copy_includes()
+ generator.preprocessing()
+ generator.prepare_sources_for_testing()
+ generator.create_main_cmake_lists()
+ generator.generate_cmakes_for_tests()
+
+ print("Files for testing generated!")
+
+
+if __name__ == "__main__":
+ __main__()
diff --git a/src/spdk/ocf/tests/unit/framework/run_unit_tests.py b/src/spdk/ocf/tests/unit/framework/run_unit_tests.py
new file mode 100755
index 000000000..a0a477fc6
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/run_unit_tests.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import tests_config
+import os
+import sys
+import subprocess
+
+
+def run_command(args):
+ result = subprocess.run(" ".join(args), shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ result.stdout = result.stdout.decode("ASCII", errors='ignore')
+ result.stderr = result.stderr.decode("ASCII", errors='ignore')
+ return result
+
+
+def rmv_cmd(trgt):
+ """Remove target with force"""
+ result = run_command(["rm", "-rf", trgt])
+ if result.returncode != 0:
+ raise Exception("Removing {} before testing failed!".
+ format(os.path.dirname(os.path.realpath(__file__))
+ + trgt))
+
+
+def cleanup():
+ """Delete files created by unit tests"""
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ test_dir = os.path.join(script_path, tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS)
+ result = run_command(["cd", test_dir])
+ if result.returncode != 0:
+ raise Exception("Cleanup before testing failed!")
+
+ # r=root, d=directories, f = files
+ for r, d, f in os.walk(test_dir):
+ for file in f:
+ if '_generated_wrap' in file:
+ rmv_cmd(file)
+
+ rmv_cmd("preprocessed_sources_repository")
+ rmv_cmd("sources_to_test_repository")
+ rmv_cmd("build")
+
+ result = run_command(["cd", script_path])
+ if result.returncode != 0:
+ raise Exception("Cleanup before testing failed!")
+
+
+cleanup()
+
+script_path = os.path.dirname(os.path.realpath(__file__))
+
+main_UT_dir = os.path.join(script_path, tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS)
+
+main_env_dir = os.path.join(script_path, tests_config.MAIN_DIRECTORY_OF_ENV_FILES)
+
+main_tested_dir = os.path.join(script_path, tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT)
+
+if not os.path.isdir(os.path.join(main_UT_dir, "ocf_env", "ocf")):
+ try:
+ os.makedirs(os.path.join(main_UT_dir, "ocf_env", "ocf"))
+ except Exception:
+ raise Exception("Cannot create ocf_env/ocf directory!")
+
+result = run_command(["ln", "-fs",
+ os.path.join(main_env_dir, "*"),
+ os.path.join(main_UT_dir, "ocf_env")])
+if result.returncode != 0:
+ raise Exception("Preparing env sources for testing failed!")
+
+result = run_command(["cp", "-r",
+ os.path.join(main_tested_dir, "inc", "*"),
+ os.path.join(main_UT_dir, "ocf_env", "ocf")])
+if result.returncode != 0:
+ raise Exception("Preparing sources for testing failed!")
+
+result = run_command([os.path.join(script_path, "prepare_sources_for_testing.py")])
+if result.returncode != 0:
+ raise Exception("Preparing sources for testing failed!")
+
+build_dir = os.path.join(main_UT_dir, "build")
+logs_dir = os.path.join(main_UT_dir, "logs")
+
+try:
+ if not os.path.isdir(build_dir):
+ os.makedirs(build_dir)
+ if not os.path.isdir(logs_dir):
+ os.makedirs(logs_dir)
+except Exception:
+ raise Exception("Cannot create logs directory!")
+
+os.chdir(build_dir)
+
+cmake_result = run_command(["cmake", ".."])
+
+print(cmake_result.stdout)
+with open(os.path.join(logs_dir, "cmake.output"), "w") as f:
+ f.write(cmake_result.stdout)
+ f.write(cmake_result.stderr)
+
+if cmake_result.returncode != 0:
+ with open(os.path.join(logs_dir, "tests.output"), "w") as f:
+ f.write("Cmake step failed! More details in cmake.output.")
+ sys.exit(1)
+
+make_result = run_command(["make", "-j"])
+
+print(make_result.stdout)
+with open(os.path.join(logs_dir, "make.output"), "w") as f:
+ f.write(make_result.stdout)
+ f.write(make_result.stderr)
+
+if make_result.returncode != 0:
+ with open(os.path.join(logs_dir, "tests.output"), "w") as f:
+ f.write("Make step failed! More details in make.output.")
+ sys.exit(1)
+
+test_result = run_command(["make", "test"])
+
+print(test_result.stdout)
+with open(os.path.join(logs_dir, "tests.output"), "w") as f:
+ f.write(test_result.stdout)
diff --git a/src/spdk/ocf/tests/unit/framework/tests_config.py b/src/spdk/ocf/tests/unit/framework/tests_config.py
new file mode 100644
index 000000000..52c00286b
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/framework/tests_config.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+# ALL PATHS SHOULD BE ENDED WITH "/" CHARACTER
+
+MAIN_DIRECTORY_OF_TESTED_PROJECT = "../../../"
+
+MAIN_DIRECTORY_OF_ENV_FILES = MAIN_DIRECTORY_OF_TESTED_PROJECT + "env/posix/"
+
+MAIN_DIRECTORY_OF_UNIT_TESTS = "../tests/"
+
+# Paths to all directories, in which tests are stored. All paths should be relative to
+# MAIN_DIRECTORY_OF_UNIT_TESTS
+DIRECTORIES_WITH_TESTS_LIST = ["cleaning/", "metadata/", "mngt/", "concurrency/", "engine/",
+ "eviction/", "utils/", "promotion/", "ocf_freelist.c/"]
+
+# Paths to all directories containing files with sources. All paths should be relative to
+# MAIN_DIRECTORY_OF_TESTED_PROJECT
+DIRECTORIES_TO_INCLUDE_FROM_PROJECT_LIST = ["src/", "src/cleaning/", "src/engine/", "src/metadata/",
+ "src/eviction/", "src/mngt/", "src/concurrency/",
+ "src/utils/", "inc/", "src/promotion/",
+ "src/promotion/nhit/"]
+
+# Paths to all directories from directory with tests, which should also be included
+DIRECTORIES_TO_INCLUDE_FROM_UT_LIST = ["ocf_env/"]
+
+# Paths to include, required by cmake, cmocka, cunit
+FRAMEWORK_DIRECTORIES_TO_INCLUDE_LIST = ["${CMOCKA_PUBLIC_INCLUDE_DIRS}", "${CMAKE_BINARY_DIR}",
+ "${CMAKE_CURRENT_SOURCE_DIR}"]
+
+# Path to directory containing all sources after preprocessing. Should be relative to
+# MAIN_DIRECTORY_OF_UNIT_TESTS
+PREPROCESSED_SOURCES_REPOSITORY = "preprocessed_sources_repository/"
+
+# Path to directory containing all sources after removing unneeded functions and cmake files for
+# tests
+SOURCES_TO_TEST_REPOSITORY = "sources_to_test_repository/"
+
+# List of includes.
+# Directories will be recursively copied to given destinations in directory with tests.
+# key - destination in dir with tests
+# value - path in tested project to dir which should be copied
+INCLUDES_TO_COPY_DICT = {'ocf_env/ocf/': "inc/"}
diff --git a/src/spdk/ocf/tests/unit/tests/.gitignore b/src/spdk/ocf/tests/unit/tests/.gitignore
new file mode 100644
index 000000000..fce0dafa3
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/.gitignore
@@ -0,0 +1,6 @@
+build/
+ocf_env/
+logs/
+preprocessed_sources_repository/
+sources_to_test_repository/
+*generated_wraps.c
diff --git a/src/spdk/ocf/tests/unit/tests/add_new_test_file.py b/src/spdk/ocf/tests/unit/tests/add_new_test_file.py
new file mode 100755
index 000000000..8377b856c
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/add_new_test_file.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import subprocess
+import sys
+import os
+
+
+args = ' '.join(sys.argv[1:])
+script_path = os.path.dirname(os.path.realpath(__file__))
+framework_script_path = os.path.join(script_path, "../framework/add_new_test_file.py")
+framework_script_path = os.path.normpath(framework_script_path)
+result = subprocess.run(framework_script_path + " " + args, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+status = result.returncode
+output = result.stdout.decode("ASCII", errors='ignore')
+
+print(output)
+
+if status == 0:
+ path = output.split(" ", 1)[0]
+ with open(script_path + os.sep + "header.c", "r") as header_file:
+ with open(path, "r+") as source_file:
+ source = source_file.readlines()
+
+ source_file.seek(0, os.SEEK_SET)
+ source_file.truncate()
+
+ source_file.writelines(header_file.readlines())
+ source_file.writelines(source)
diff --git a/src/spdk/ocf/tests/unit/tests/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c b/src/spdk/ocf/tests/unit/tests/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c
new file mode 100644
index 000000000..f1361e96b
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+/*
+<tested_file_path>src/cleaning/alru.c</tested_file_path>
+<tested_function>cleaning_policy_alru_initialize_part</tested_function>
+<functions_to_leave>
+</functions_to_leave>
+*/
+
+#undef static
+#undef inline
+/*
+ * This headers must be in test source file. It's important that cmocka.h is
+ * last.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+/*
+ * Headers from tested target.
+ */
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "cleaning.h"
+#include "alru.h"
+#include "../metadata/metadata.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_realloc.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+#include "../ocf_def_priv.h"
+
+#include "cleaning/alru.c/cleaning_policy_alru_initialize_part_test_generated_wraps.c"
+
+
+static void cleaning_policy_alru_initialize_test01(void **state)
+{
+ int result;
+ struct ocf_cache *cache;
+ ocf_part_id_t part_id = 0;
+
+ int collision_table_entries = 900729;
+
+ print_test_description("Check if all variables are set correctly");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
+ cache->device = test_malloc(sizeof(struct ocf_cache_device));
+ cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
+
+ cache->device->collision_table_entries = collision_table_entries;
+
+ result = cleaning_policy_alru_initialize_part(cache, &cache->user_parts[part_id], 1, 1);
+
+ assert_int_equal(result, 0);
+
+ assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 0);
+ assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, collision_table_entries);
+ assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, collision_table_entries);
+
+ assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
+
+ test_free(cache->device->runtime_meta);
+ test_free(cache->device);
+ test_free(cache->user_parts[part_id].runtime);
+ test_free(cache);
+}
+
+static void cleaning_policy_alru_initialize_test02(void **state)
+{
+ int result;
+ struct ocf_cache *cache;
+ ocf_part_id_t part_id = 0;
+
+ uint32_t collision_table_entries = 900729;
+
+ print_test_description("Check if only appropirate variables are changed");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
+ cache->device = test_malloc(sizeof(struct ocf_cache_device));
+ cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
+
+ env_atomic_set(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size, 1);
+ cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head = -collision_table_entries;
+ cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail = -collision_table_entries;
+
+ result = cleaning_policy_alru_initialize_part(cache, cache->user_parts[part_id], 0, 0);
+
+ assert_int_equal(result, 0);
+
+ assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 1);
+ assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, -collision_table_entries);
+ assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, -collision_table_entries);
+
+ assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
+
+ test_free(cache->device->runtime_meta);
+ test_free(cache->device);
+ test_free(cache->user_parts[part_id].runtime);
+ test_free(cache);
+}
+
+/*
+ * Main function. It runs tests.
+ */
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(cleaning_policy_alru_initialize_test01),
+ cmocka_unit_test(cleaning_policy_alru_initialize_test02)
+ };
+
+ print_message("Unit test of alru.c\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
+
+
+
diff --git a/src/spdk/ocf/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c b/src/spdk/ocf/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c
new file mode 100644
index 000000000..9a7f6188a
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/*
+ * This headers must be in test source file. It's important that cmocka.h is
+ * last.
+ */
+
+#undef static
+#undef inline
+
+//<tested_file_path>src/cleaning/cleaning.c</tested_file_path>
+//<tested_function>ocf_cleaner_run</tested_function>
+//<functions_to_leave>
+//ocf_cleaner_set_cmpl
+//</functions_to_leave>
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+/*
+ * Headers from tested target.
+ */
+#include "cleaning.h"
+#include "alru.h"
+#include "acp.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_ctx_priv.h"
+#include "../mngt/ocf_mngt_common.h"
+#include "../metadata/metadata.h"
+
+#include "cleaning/cleaning.c/ocf_cleaner_run_test_generated_wraps.c"
+
+/*
+ * Mocked functions. Here we must deliver functions definitions which are not
+ * in tested source file.
+ */
+
+
+int __wrap_cleaning_alru_perform_cleaning(struct ocf_cache *cache, ocf_cleaner_end_t cmpl)
+{
+ function_called();
+ return mock();
+}
+
+
+ocf_cache_t __wrap_ocf_cleaner_get_cache(ocf_cleaner_t c)
+{
+ function_called();
+ return mock_ptr_type(struct ocf_cache*);
+}
+
+bool __wrap_ocf_mngt_cache_is_locked(ocf_cache_t cache)
+{
+ function_called();
+ return mock();
+}
+
+
+int __wrap__ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ function_called();
+}
+
+int __wrap_env_bit_test(int nr, const void *addr)
+{
+ function_called();
+ return mock();
+}
+
+int __wrap_ocf_mngt_cache_trylock(env_rwsem *s)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_mngt_cache_unlock(env_rwsem *s)
+{
+ function_called();
+}
+
+static void cleaner_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ function_called();
+}
+
+/*
+ * Tests of functions. Every test name must be written to tests array in main().
+ * Declarations always look the same: static void test_name(void **state);
+ */
+
+static void ocf_cleaner_run_test01(void **state)
+{
+ struct ocf_cache *cache;
+ ocf_part_id_t part_id;
+ uint32_t io_queue;
+ int result;
+
+ print_test_description("Parts are ready for cleaning - should perform cleaning"
+ " for each part");
+
+ //Initialize needed structures.
+ cache = test_malloc(sizeof(*cache));
+ cache->conf_meta = test_malloc(sizeof(struct ocf_superblock_config));
+ cache->conf_meta->cleaning_policy_type = ocf_cleaning_alru;
+
+
+ expect_function_call(__wrap_ocf_cleaner_get_cache);
+ will_return(__wrap_ocf_cleaner_get_cache, cache);
+
+ expect_function_call(__wrap_env_bit_test);
+ will_return(__wrap_env_bit_test, 1);
+
+ expect_function_call(__wrap_ocf_mngt_cache_is_locked);
+ will_return(__wrap_ocf_mngt_cache_is_locked, 0);
+
+ expect_function_call(__wrap_ocf_mngt_cache_trylock);
+ will_return(__wrap_ocf_mngt_cache_trylock, 0);
+
+ expect_function_call(__wrap__ocf_cleaner_run_check_dirty_inactive);
+ will_return(__wrap__ocf_cleaner_run_check_dirty_inactive, 0);
+
+ expect_function_call(__wrap_cleaning_alru_perform_cleaning);
+ will_return(__wrap_cleaning_alru_perform_cleaning, 0);
+
+ ocf_cleaner_set_cmpl(&cache->cleaner, cleaner_complete);
+
+ ocf_cleaner_run(&cache->cleaner, 0xdeadbeef);
+
+ /* Release allocated memory if allocated with test_* functions */
+
+ test_free(cache->conf_meta);
+ test_free(cache);
+}
+
+/*
+ * Main function. It runs tests.
+ */
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_cleaner_run_test01)
+ };
+
+ print_message("Unit test of cleaning.c\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c b/src/spdk/ocf/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c
new file mode 100644
index 000000000..72fe4c674
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c
@@ -0,0 +1,135 @@
+/*
+ * <tested_file_path>src/concurrency/ocf_metadata_concurrency.c</tested_file_path>
+ * <tested_function>ocf_req_hash_lock_rd</tested_function>
+ * <functions_to_leave>
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf_metadata_concurrency.h"
+#include "../metadata/metadata_misc.h"
+
+#include "concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency_generated_wraps.c"
+
+void __wrap_ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ check_expected(hash);
+ function_called();
+}
+
+#define MAP_SIZE 16
+
+static struct ocf_request *alloc_req()
+{
+ struct ocf_request *req;
+ struct ocf_cache *cache = test_malloc(sizeof(*cache));
+
+ req = test_malloc(sizeof(*req) + MAP_SIZE * sizeof(req->map[0]));
+ req->map = req->__map;
+ req->cache = cache;
+
+ return req;
+}
+
+static void free_req(struct ocf_request *req)
+{
+ test_free(req->cache);
+ test_free(req);
+}
+
+static void _test_lock_order(struct ocf_request* req,
+ unsigned hash[], unsigned hash_count,
+ unsigned expected_call[], unsigned expected_call_count)
+{
+ unsigned i;
+
+ req->core_line_count = hash_count;
+
+ for (i = 0; i < hash_count; i++)
+ req->map[i].hash = hash[i];
+
+ for (i = 0; i < expected_call_count; i++) {
+ expect_function_call(__wrap_ocf_metadata_hash_lock);
+ expect_value(__wrap_ocf_metadata_hash_lock, hash, expected_call[i]);
+ }
+
+ ocf_req_hash_lock_rd(req);
+
+}
+
+static void ocf_req_hash_lock_rd_test01(void **state)
+{
+ struct ocf_request *req = alloc_req();
+ struct {
+ struct {
+ unsigned val[MAP_SIZE];
+ unsigned count;
+ } hash, expected_call;
+ } test_cases[] = {
+ {
+ .hash = {.val = {2}, .count = 1},
+ .expected_call = {.val = {2}, .count = 1}
+ },
+ {
+ .hash = {.val = {2, 3, 4}, .count = 3},
+ .expected_call = {.val = {2, 3, 4}, .count = 3}
+ },
+ {
+ .hash = {.val = {2, 3, 4, 0}, .count = 4},
+ .expected_call = {.val = {0, 2, 3, 4}, .count = 4}
+ },
+ {
+ .hash = {.val = {2, 3, 4, 0, 1, 2, 3, 4, 0, 1}, .count = 10},
+ .expected_call = {.val = {0, 1, 2, 3, 4}, .count = 5}
+ },
+ {
+ .hash = {.val = {4, 0}, .count = 2},
+ .expected_call = {.val = {0, 4}, .count = 2}
+ },
+ {
+ .hash = {.val = {0, 1, 2, 3, 4, 0, 1}, .count = 7},
+ .expected_call = {.val = {0, 1, 2, 3, 4}, .count = 5}
+ },
+ {
+ .hash = {.val = {1, 2, 3, 4, 0, 1}, .count = 6},
+ .expected_call = {.val = {0, 1, 2, 3, 4}, .count = 5}
+ },
+};
+ const unsigned test_case_count = sizeof(test_cases) / sizeof(test_cases[0]);
+ unsigned i;
+
+ req->cache->metadata.lock.num_hash_entries = 5;
+
+ print_test_description("Verify hash locking order\n");
+
+ for (i = 0; i < test_case_count; i++) {
+ _test_lock_order(req, test_cases[i].hash.val, test_cases[i].hash.count,
+ test_cases[i].expected_call.val, test_cases[i].expected_call.count);
+ }
+
+ free_req(req);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_req_hash_lock_rd_test01)
+ };
+
+ print_message("Unit test for ocf_req_hash_lock_rd\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/header.c b/src/spdk/ocf/tests/unit/tests/header.c
new file mode 100644
index 000000000..fb2a6e5bd
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/header.c
@@ -0,0 +1,5 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
diff --git a/src/spdk/ocf/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c b/src/spdk/ocf/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c
new file mode 100644
index 000000000..7f83ca9c1
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/metadata/metadata_collision.c/metadata_collision.c
@@ -0,0 +1,69 @@
+/*
+ * <tested_file_path>src/metadata/metadata_collision.c</tested_file_path>
+ * <tested_function>ocf_metadata_hash_func</tested_function>
+ * <functions_to_leave>
+ * ocf_metadata_get_hash
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "metadata.h"
+#include "../utils/utils_cache_line.h"
+
+#include "metadata/metadata_collision.c/metadata_collision_generated_wraps.c"
+
+static void metadata_hash_func_test01(void **state)
+{
+ struct ocf_cache *cache;
+ bool wrap = false;
+ ocf_cache_line_t i;
+ ocf_cache_line_t hash_cur, hash_next;
+ unsigned c;
+ ocf_core_id_t core_ids[] = {0, 1, 2, 100, OCF_CORE_MAX};
+ ocf_core_id_t core_id;
+
+ print_test_description("Verify that hash function increments by 1 and generates"
+ "collision after 'hash_table_entries' successive core lines");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->device = test_malloc(sizeof(*cache->device));
+ cache->device->hash_table_entries = 10;
+
+ for (c = 0; c < sizeof(core_ids) / sizeof(core_ids[0]); c++) {
+ core_id = core_ids[c];
+ for (i = 0; i < cache->device->hash_table_entries + 1; i++) {
+ hash_cur = ocf_metadata_hash_func(cache, i, core_id);
+ hash_next = ocf_metadata_hash_func(cache, i + 1, core_id);
+ /* make sure hash value is within expected range */
+ assert(hash_cur < cache->device->hash_table_entries);
+ assert(hash_next < cache->device->hash_table_entries);
+ /* hash should either increment by 1 or overflow to 0 */
+ assert(hash_next == (hash_cur + 1) % cache->device->hash_table_entries);
+ }
+ }
+
+ test_free(cache->device);
+ test_free(cache);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(metadata_hash_func_test01)
+ };
+
+ print_message("Unit test of src/metadata/metadata_collision.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/_cache_mngt_set_cache_mode_test.c b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/_cache_mngt_set_cache_mode_test.c
new file mode 100644
index 000000000..fc74bb880
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/_cache_mngt_set_cache_mode_test.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+//<tested_file_path>src/mngt/ocf_mngt_cache.c</tested_file_path>
+//<tested_function>_cache_mngt_set_cache_mode</tested_function>
+
+/*
+<functions_to_leave>
+ocf_mngt_cache_mode_has_lazy_write
+</functions_to_leave>
+*/
+
+#undef static
+#undef inline
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+/*
+ * Headers from tested target.
+ */
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_core_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_pipeline.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../eviction/ops.h"
+#include "../ocf_ctx_priv.h"
+#include "../cleaning/cleaning.h"
+
+#include "mngt/ocf_mngt_cache.c/_cache_mngt_set_cache_mode_test_generated_wraps.c"
+/*
+ * Mocked functions
+ */
+bool __wrap_ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
+{
+ function_called();
+ return mock();
+}
+
+ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache)
+{
+ return cache->owner;
+}
+
+int __wrap_ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, ...)
+{
+ function_called();
+ return mock();
+}
+
+int __wrap_ocf_mngt_cache_flush(ocf_cache_t cache, bool interruption)
+{
+ function_called();
+ return mock();
+}
+
+bool __wrap_env_bit_test(int nr, const volatile unsigned long *addr)
+{
+}
+
+void __wrap_env_atomic_set(env_atomic *a, int i)
+{
+ function_called();
+}
+
+int __wrap_env_atomic_read(const env_atomic *a)
+{
+ function_called();
+ return mock();
+}
+
+int __wrap_ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap__cache_mngt_update_initial_dirty_clines(ocf_cache_t cache)
+{
+ function_called();
+}
+
+static void _cache_mngt_set_cache_mode_test01(void **state)
+{
+ ocf_cache_mode_t mode_old = -20;
+ ocf_cache_mode_t mode_new = ocf_cache_mode_none;
+ struct ocf_ctx ctx = {
+ .logger = 0x1, /* Just not NULL, we don't care. */
+ };
+ struct ocf_superblock_config sb_config = {
+ .cache_mode = mode_old,
+ };
+ struct ocf_cache *cache;
+ int result;
+
+ print_test_description("Invalid new mode produces appropirate error code");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->owner = &ctx;
+ cache->conf_meta = &sb_config;
+
+ expect_function_call(__wrap_ocf_cache_mode_is_valid);
+ will_return(__wrap_ocf_cache_mode_is_valid, 0);
+
+ result = _cache_mngt_set_cache_mode(cache, mode_new);
+
+ assert_int_equal(result, -OCF_ERR_INVAL);
+ assert_int_equal(cache->conf_meta->cache_mode, mode_old);
+
+ test_free(cache);
+}
+
+static void _cache_mngt_set_cache_mode_test02(void **state)
+{
+ ocf_cache_mode_t mode_old = ocf_cache_mode_wt;
+ ocf_cache_mode_t mode_new = ocf_cache_mode_wt;
+ struct ocf_ctx ctx = {
+ .logger = 0x1, /* Just not NULL, we don't care. */
+ };
+ struct ocf_superblock_config sb_config = {
+ .cache_mode = mode_old,
+ };
+ struct ocf_cache *cache;
+ uint8_t flush = 0;
+ int result;
+
+ print_test_description("Attempt to set mode the same as previous");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->owner = &ctx;
+ cache->conf_meta = &sb_config;
+
+ expect_function_call(__wrap_ocf_cache_mode_is_valid);
+ will_return(__wrap_ocf_cache_mode_is_valid, 1);
+
+ expect_function_call(__wrap_ocf_log_raw);
+ will_return(__wrap_ocf_log_raw, 0);
+
+ result = _cache_mngt_set_cache_mode(cache, mode_new);
+
+ assert_int_equal(result, 0);
+ assert_int_equal(cache->conf_meta->cache_mode, mode_old);
+
+ test_free(cache);
+}
+
+static void _cache_mngt_set_cache_mode_test03(void **state)
+{
+ ocf_cache_mode_t mode_old = ocf_cache_mode_wb;
+ ocf_cache_mode_t mode_new = ocf_cache_mode_wa;
+ struct ocf_ctx ctx = {
+ .logger = 0x1, /* Just not NULL, we don't care. */
+ };
+ struct ocf_superblock_config sb_config = {
+ .cache_mode = mode_old,
+ };
+ struct ocf_cache *cache;
+ int result;
+ int i;
+
+ print_test_description("Old cache mode is write back. "
+ "Setting new cache mode is succesfull");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->owner = &ctx;
+ cache->conf_meta = &sb_config;
+
+ expect_function_call(__wrap_ocf_cache_mode_is_valid);
+ will_return(__wrap_ocf_cache_mode_is_valid, 1);
+
+ expect_function_call(__wrap__cache_mngt_update_initial_dirty_clines);
+
+ expect_function_call(__wrap_ocf_log_raw);
+ will_return(__wrap_ocf_log_raw, 0);
+
+ result = _cache_mngt_set_cache_mode(cache, mode_new);
+
+ assert_int_equal(result, 0);
+ assert_int_equal(cache->conf_meta->cache_mode, mode_new);
+
+ test_free(cache);
+}
+
+static void _cache_mngt_set_cache_mode_test04(void **state)
+{
+ ocf_cache_mode_t mode_old = ocf_cache_mode_wt;
+ ocf_cache_mode_t mode_new = ocf_cache_mode_wa;
+ struct ocf_ctx ctx = {
+ .logger = 0x1, /* Just not NULL, we don't care. */
+ };
+ struct ocf_superblock_config sb_config = {
+ .cache_mode = mode_old,
+ };
+ struct ocf_cache *cache;
+ int result;
+ int i;
+
+ print_test_description("Mode changed successfully");
+
+ cache = test_malloc(sizeof(*cache));
+ cache->owner = &ctx;
+ cache->conf_meta = &sb_config;
+
+ expect_function_call(__wrap_ocf_cache_mode_is_valid);
+ will_return(__wrap_ocf_cache_mode_is_valid, 1);
+
+ expect_function_call(__wrap_ocf_log_raw);
+ will_return(__wrap_ocf_log_raw, 0);
+
+ result = _cache_mngt_set_cache_mode(cache, mode_new);
+
+ assert_int_equal(result, 0);
+ assert_int_equal(cache->conf_meta->cache_mode, mode_new);
+
+ test_free(cache);
+}
+
+/*
+ * Main function. It runs tests.
+ */
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(_cache_mngt_set_cache_mode_test01),
+ cmocka_unit_test(_cache_mngt_set_cache_mode_test02),
+ cmocka_unit_test(_cache_mngt_set_cache_mode_test03),
+ cmocka_unit_test(_cache_mngt_set_cache_mode_test04),
+ };
+
+ print_message("Unit test of _cache_mngt_set_cache_mode\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c
new file mode 100644
index 000000000..e14b6aace
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c
@@ -0,0 +1,189 @@
+/*
+ *<tested_file_path>src/mngt/ocf_mngt_cache.c</tested_file_path>
+ * <tested_function>ocf_mngt_cache_set_fallback_pt_error_threshold</tested_function>
+ * <functions_to_leave>
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ *</functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_core_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_part.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_io.h"
+#include "../utils/utils_cache_line.h"
+#include "../utils/utils_pipeline.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../eviction/ops.h"
+#include "../ocf_ctx_priv.h"
+#include "../cleaning/cleaning.h"
+
+#include "mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold_generated_wraps.c"
+
+int __wrap_ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, ...)
+{
+ function_called();
+}
+
+ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache)
+{
+ function_called();
+}
+
+int __wrap_ocf_mngt_cache_set_fallback_pt(ocf_cache_t cache)
+{
+ function_called();
+}
+
+static void ocf_mngt_cache_set_fallback_pt_error_threshold_test01(void **state)
+{
+ struct ocf_cache *cache;
+ int new_threshold;
+ int result;
+
+ print_test_description("Appropriate error code on invalid threshold value");
+
+ cache = test_malloc(sizeof(*cache));
+
+ new_threshold = -1;
+
+ result = ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(result, -OCF_ERR_INVAL);
+
+
+ new_threshold = 10000001;
+
+ result = ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(result, -OCF_ERR_INVAL);
+
+ test_free(cache);
+}
+
+static void ocf_mngt_cache_set_fallback_pt_error_threshold_test02(void **state)
+{
+ struct ocf_cache *cache;
+ int new_threshold;
+ int old_threshold;
+
+ print_test_description("Invalid new threshold value doesn't change current threshold");
+
+ cache = test_malloc(sizeof(*cache));
+
+ new_threshold = -1;
+ old_threshold = cache->fallback_pt_error_threshold = 1000;
+
+ ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, old_threshold);
+
+
+ new_threshold = 10000001;
+ old_threshold = cache->fallback_pt_error_threshold = 1000;
+
+ ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, old_threshold);
+
+ test_free(cache);
+}
+
+static void ocf_mngt_cache_set_fallback_pt_error_threshold_test03(void **state)
+{
+ struct ocf_cache *cache;
+ int new_threshold, old_threshold;
+
+ print_test_description("Setting new threshold value");
+
+ cache = test_malloc(sizeof(*cache));
+
+ new_threshold = 5000;
+ old_threshold = cache->fallback_pt_error_threshold = 1000;
+
+ ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, new_threshold);
+
+
+ new_threshold = 1000000;
+ old_threshold = cache->fallback_pt_error_threshold = 1000;
+
+ ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, new_threshold);
+
+
+ new_threshold = 0;
+ old_threshold = cache->fallback_pt_error_threshold = 1000;
+
+ ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, new_threshold);
+
+ test_free(cache);
+}
+
+static void ocf_mngt_cache_set_fallback_pt_error_threshold_test04(void **state)
+{
+ struct ocf_cache *cache;
+ int new_threshold;
+ int result;
+
+ print_test_description("Return appropriate value on success");
+
+ cache = test_malloc(sizeof(*cache));
+
+ new_threshold = 5000;
+
+ result = ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(result, 0);
+
+
+ new_threshold = 1000000;
+
+ result = ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(cache->fallback_pt_error_threshold, new_threshold);
+
+
+ new_threshold = 0;
+
+ result = ocf_mngt_cache_set_fallback_pt_error_threshold(cache, new_threshold);
+
+ assert_int_equal(result, 0);
+
+ test_free(cache);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test01),
+ cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test02),
+ cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test03),
+ cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test04)
+ };
+
+ print_message("Unit test of src/mngt/ocf_mngt_cache.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_io_class.c/ocf_mngt_io_class.c b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_io_class.c/ocf_mngt_io_class.c
new file mode 100644
index 000000000..1abec1623
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/mngt/ocf_mngt_io_class.c/ocf_mngt_io_class.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+/*
+ * <tested_file_path>src/mngt/ocf_mngt_io_class.c</tested_file_path>
+ * <tested_function>ocf_mngt_cache_io_classes_configure</tested_function>
+ * <functions_to_leave>
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ * _ocf_mngt_io_class_edit
+ * _ocf_mngt_io_class_configure
+ * _ocf_mngt_io_class_remove
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "ocf_mngt_common.h"
+#include "../ocf_priv.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../utils/utils_part.h"
+#include "../eviction/ops.h"
+#include "ocf_env.h"
+
+#include "mngt/ocf_mngt_io_class.c/ocf_mngt_io_class_generated_wraps.c"
+
+/* Functions mocked for testing purposes */
+bool __wrap_ocf_part_is_added(struct ocf_user_part *part)
+{
+ function_called();
+ return mock();
+}
+
+int __wrap__ocf_mngt_set_partition_size(struct ocf_cache *cache,
+ ocf_part_id_t part_id, uint32_t min, uint32_t max)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_part_set_prio(struct ocf_cache *cache,
+ struct ocf_user_part *part, int16_t prio)
+{
+ function_called();
+}
+
+bool __wrap_ocf_part_is_valid(struct ocf_user_part *part)
+{
+ function_called();
+ return mock();
+}
+
+
+void __wrap_ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
+ bool valid)
+{
+ function_called();
+ check_expected(valid);
+ check_expected(id);
+}
+
+int __wrap__ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
+ const struct ocf_mngt_io_class_config *cfg)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_part_sort(struct ocf_cache *cache)
+{
+ function_called();
+}
+
+int __wrap_ocf_metadata_flush_superblock(struct ocf_cache *cache)
+{
+}
+
+/* Helper function for test prepration */
+static inline void setup_valid_config(struct ocf_mngt_io_class_config *cfg,
+ bool remove)
+{
+ int i;
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ cfg[i].class_id = i;
+ cfg[i].name = remove ? NULL : "test_io_class_name" ;
+ cfg[i].prio = i;
+ cfg[i].cache_mode = ocf_cache_mode_pt;
+ cfg[i].min_size = 2*i;
+ cfg[i].max_size = 20*i;
+ }
+}
+
+static void ocf_mngt_io_classes_configure_test03(void **state)
+{
+ struct ocf_cache *cache;
+ struct ocf_mngt_io_classes_config cfg = {0};
+ int result, i;
+
+ print_test_description("Remove all io classes");
+
+ cache = test_malloc(sizeof(*cache));
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ cache->user_parts[i].config =
+ test_malloc(sizeof(struct ocf_user_part_config));
+ }
+ cache->device = 1;
+
+ setup_valid_config(cfg.config, true);
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
+ will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
+ }
+
+ /* Removing default io_class is not allowed */
+ for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
+ expect_function_call(__wrap_ocf_part_is_valid);
+ will_return(__wrap_ocf_part_is_valid, 1);
+
+ expect_function_call(__wrap_ocf_part_set_valid);
+ /* Test assumes default partition has id equal 0 */
+ expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN + 1,
+ OCF_IO_CLASS_ID_MAX);
+ expect_value(__wrap_ocf_part_set_valid, valid, false);
+ }
+
+ expect_function_call(__wrap_ocf_part_sort);
+
+ result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
+
+ assert_int_equal(result, 0);
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++)
+ test_free(cache->user_parts[i].config);
+
+ test_free(cache);
+}
+
+static void ocf_mngt_io_classes_configure_test02(void **state)
+{
+ struct ocf_cache *cache;
+ struct ocf_mngt_io_classes_config cfg = {0};
+ int result, i;
+
+ cache = test_malloc(sizeof(*cache));
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ cache->user_parts[i].config =
+ test_malloc(sizeof(struct ocf_user_part_config));
+ }
+ cache->device = 1;
+
+ setup_valid_config(cfg.config, false);
+
+ print_test_description("Configure all possible io classes");
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
+ expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
+ will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
+ }
+
+ /* Configure default io_class */
+ expect_function_call(__wrap_ocf_part_is_added);
+ will_return(__wrap_ocf_part_is_added, 1);
+
+ expect_function_call(__wrap__ocf_mngt_set_partition_size);
+ will_return(__wrap__ocf_mngt_set_partition_size, 0);
+
+ expect_function_call(__wrap_ocf_part_set_prio);
+
+ /* Configure custom io_classes */
+ for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
+ expect_function_call(__wrap_ocf_part_is_added);
+ will_return(__wrap_ocf_part_is_added, 1);
+
+ expect_function_call(__wrap__ocf_mngt_set_partition_size);
+ will_return(__wrap__ocf_mngt_set_partition_size, 0);
+
+ expect_function_call(__wrap_ocf_part_is_valid);
+ will_return(__wrap_ocf_part_is_valid, 0);
+
+ expect_function_call(__wrap_ocf_part_set_valid);
+ expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN,
+ OCF_IO_CLASS_ID_MAX);
+ expect_value(__wrap_ocf_part_set_valid, valid, true);
+
+ expect_function_call(__wrap_ocf_part_set_prio);
+ }
+
+ expect_function_call(__wrap_ocf_part_sort);
+
+ result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
+
+ assert_int_equal(result, 0);
+
+ for (i = 0; i < OCF_IO_CLASS_MAX; i++)
+ test_free(cache->user_parts[i].config);
+
+ test_free(cache);
+}
+
+static void ocf_mngt_io_classes_configure_test01(void **state)
+{
+ struct ocf_cache *cache;
+ struct ocf_mngt_io_classes_config cfg[OCF_IO_CLASS_MAX];
+ int error_code = -OCF_ERR_INVAL;
+ int result;
+
+ print_test_description("Invalid config - "
+ "termination with error");
+
+ cache = test_malloc(sizeof(*cache));
+
+ expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
+ will_return(__wrap__ocf_mngt_io_class_validate_cfg, error_code);
+
+ result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
+
+ assert_int_equal(result, error_code);
+
+ test_free(cache);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_mngt_io_classes_configure_test01),
+ cmocka_unit_test(ocf_mngt_io_classes_configure_test02),
+ cmocka_unit_test(ocf_mngt_io_classes_configure_test03)
+ };
+
+ print_message("Unit test of src/mngt/ocf_mngt_io_class.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/ocf_env/CMakeLists.txt b/src/spdk/ocf/tests/unit/tests/ocf_env/CMakeLists.txt
new file mode 100644
index 000000000..5ab2fea4f
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/ocf_env/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(ocf_env ocf_env.c /usr/include/sys/types.h /usr/include/setjmp.h /usr/include/cmocka.h)
+add_definitions(-Dstatic= -Dinline= )
+target_link_libraries(ocf_env pthread z)
diff --git a/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_get_put.c b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_get_put.c
new file mode 100644
index 000000000..cc9eed3cc
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_get_put.c
@@ -0,0 +1,382 @@
+/*
+ * <tested_file_path>src/ocf_freelist.c</tested_file_path>
+ * <tested_function>ocf_freelist_get_cache_line</tested_function>
+ * <functions_to_leave>
+ * ocf_freelist_init
+ * ocf_freelist_deinit
+ * ocf_freelist_populate
+ * next_phys_invalid
+ * ocf_freelist_lock
+ * ocf_freelist_trylock
+ * ocf_freelist_unlock
+ * _ocf_freelist_remove_cache_line
+ * ocf_freelist_get_cache_line_fast
+ * ocf_freelist_get_cache_line_slow
+ * ocf_freelist_add_cache_line
+ * ocf_freelist_get_cache_line_ctx
+ * get_next_victim_freelist
+ * ocf_freelist_put_cache_line
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+
+#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
+
+ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
+{
+ return mock();
+}
+
+unsigned __wrap_env_get_execution_context_count(void)
+{
+ return mock();
+}
+
+unsigned __wrap_env_get_execution_context(void)
+{
+ return mock();
+}
+
+void __wrap_env_put_execution_context(unsigned ctx)
+{
+}
+
+/* simulate no striping */
+ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
+{
+ return phy;
+}
+
+bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ return mock();
+}
+
+/* metadata partition info interface mock: */
+
+#define max_clines 100
+
+struct {
+ ocf_cache_line_t prev;
+ ocf_cache_line_t next;
+} partition_list[max_clines];
+
+
+void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id,
+ ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
+{
+ assert_int_equal(part_id, PARTITION_INVALID);
+ partition_list[line].prev = prev_line;
+ partition_list[line].next = next_line;
+}
+
+void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t *part_id,
+ ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
+{
+ if (part_id)
+ *part_id = PARTITION_INVALID;
+ if (prev_line)
+ *prev_line = partition_list[line].prev;
+ if (next_line)
+ *next_line = partition_list[line].next;
+}
+
+void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t prev_line)
+{
+ partition_list[line].prev = prev_line;
+}
+
+void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t next_line)
+{
+ partition_list[line].next = next_line;
+}
+
+static void ocf_freelist_get_cache_line_get_fast(void **state)
+{
+ unsigned num_cls = 8;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+ ocf_cache_line_t line;
+
+ print_test_description("Verify get free cache line get fast path");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+ will_return_maybe(__wrap_metadata_test_valid_any, false);
+
+ freelist = ocf_freelist_init(NULL);
+
+ ocf_freelist_populate(freelist, num_cls);
+
+ /* now there are following cachelines on per-context lists:
+ * ctx 0: 0, 1, 2
+ * ctx 1: 3, 4, 5
+ * ctx 2: 6, 7
+ */
+
+ /* get cline from context 1 */
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 3);
+
+ /* ctx 0: 0, 1, 2
+ * ctx 1: _, 4, 5
+ * ctx 2: 6, 7 */
+
+ /* get cline from context 2 */
+ will_return(__wrap_env_get_execution_context, 2);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 6);
+
+ /* ctx 0: 0, 1, 2
+ * ctx 1: _, 4, 5
+ * ctx 2: _, 7 */
+
+ /* get cline from context 1 */
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 4);
+
+ /* ctx 0: 0, 1, 2
+ * ctx 1: _, _, 5
+ * ctx 2: _, 7 */
+
+ /* get cline from context 0 */
+ will_return(__wrap_env_get_execution_context, 0);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 0);
+
+ /* ctx 0: _, 1, 2
+ * ctx 1: _, _, 5
+ * ctx 2: _, 7 */
+
+ /* get cline from context 0 */
+ will_return(__wrap_env_get_execution_context, 0);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 1);
+
+ /* ctx 0: _, _, 2
+ * ctx 1: _, _, 5
+ * ctx 2: _, 7 */
+
+ /* get cline from context 0 */
+ will_return(__wrap_env_get_execution_context, 0);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 2);
+
+ /* ctx 0: _, _, _,
+ * ctx 1: _, _, 5
+ * ctx 2: _, 7 */
+
+ /* get cline from context 2 */
+ will_return(__wrap_env_get_execution_context, 2);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 7);
+
+ /* ctx 0: _, _, _,
+ * ctx 1: _, _, _5
+ * ctx 2: _, _ */
+
+ /* get cline from context 1 */
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 5);
+
+ /* ctx 0: _, _, _,
+ * ctx 1: _, _, _
+ * ctx 2: _, _ */
+
+ ocf_freelist_deinit(freelist);
+}
+
+static void ocf_freelist_get_cache_line_get_slow(void **state)
+{
+ unsigned num_cls = 8;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+ ocf_cache_line_t line;
+
+ print_test_description("Verify get free cache line get slow path");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+ will_return_maybe(__wrap_metadata_test_valid_any, false);
+
+ /* always return exec ctx 0 */
+ will_return_maybe(__wrap_env_get_execution_context, 0);
+
+ freelist = ocf_freelist_init(NULL);
+
+ ocf_freelist_populate(freelist, num_cls);
+
+ /* now there are following cachelines on per-context lists:
+ * ctx 0: 0, 1, 2
+ * ctx 1: 3, 4, 5
+ * ctx 2: 6, 7
+ */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 0);
+
+ /* ctx 0: _, 1, 2
+ * ctx 1: 3, 4, 5
+ * ctx 2: 6, 7 */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 1);
+
+ /* ctx 0: _, _, 2
+ * ctx 1: 3, 4, 5
+ * ctx 2: 6, 7 */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 2);
+
+ /* ctx 0: _, _, _
+ * ctx 1: 3, 4, 5
+ * ctx 2: 6, 7 */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 3);
+
+ /* ctx 0: _, _, _
+ * ctx 1: _, 4, 5
+ * ctx 2: 6, 7 */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 6);
+
+ /* ctx 0: _, _, _
+ * ctx 1: _, 4, 5
+ * ctx 2: _, 7 */
+
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 4);
+
+ /* ctx 0: _, _, _
+ * ctx 1: _, _, 5
+ * ctx 2: _, 7 */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 7);
+
+ /* ctx 0: _, _, _
+ * ctx 1: _, _, 5
+ * ctx 2: _, _ */
+
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 5);
+
+ /* ctx 0: _, _, _,
+ * ctx 1: _, _, _
+ * ctx 2: _, _ */
+
+ ocf_freelist_deinit(freelist);
+}
+
+static void ocf_freelist_get_cache_line_put(void **state)
+{
+ unsigned num_cls = 8;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+ ocf_cache_line_t line;
+
+ print_test_description("Verify freelist cacheline put");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+ will_return_maybe(__wrap_metadata_test_valid_any, false);
+
+ freelist = ocf_freelist_init(NULL);
+
+ ocf_freelist_populate(freelist, num_cls);
+
+ /* get some clines from the freelists */
+ will_return(__wrap_env_get_execution_context, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+ will_return(__wrap_env_get_execution_context, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+ will_return(__wrap_env_get_execution_context, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+ will_return(__wrap_env_get_execution_context, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+ will_return(__wrap_env_get_execution_context, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+
+ /* ctx 0:
+ * ctx 1: 4, 5
+ * ctx 2: 7 */
+
+ will_return(__wrap_env_get_execution_context, 1);
+ ocf_freelist_put_cache_line(freelist, 0);
+
+ will_return(__wrap_env_get_execution_context, 1);
+ ocf_freelist_put_cache_line(freelist, 2);
+
+ will_return(__wrap_env_get_execution_context, 2);
+ ocf_freelist_put_cache_line(freelist, 3);
+
+ /* ctx 0:
+ * ctx 1: 4, 5, 0, 2
+ * ctx 2: 7, 3*/
+
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 4);
+
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 5);
+
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 0);
+
+ will_return(__wrap_env_get_execution_context, 1);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 2);
+
+ will_return(__wrap_env_get_execution_context, 2);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 7);
+
+ will_return(__wrap_env_get_execution_context, 2);
+ assert(ocf_freelist_get_cache_line(freelist, &line));
+ assert_int_equal(line, 3);
+
+ ocf_freelist_deinit(freelist);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_freelist_get_cache_line_get_fast),
+ cmocka_unit_test(ocf_freelist_get_cache_line_get_slow),
+ cmocka_unit_test(ocf_freelist_get_cache_line_put)
+ };
+
+ print_message("Unit test for ocf_freelist_get_cache_line\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_init.c b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_init.c
new file mode 100644
index 000000000..387bbe8a1
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_init.c
@@ -0,0 +1,68 @@
+/*
+ * <tested_file_path>src/ocf_freelist.c</tested_file_path>
+ * <tested_function>ocf_freelist_populate</tested_function>
+ * <functions_to_leave>
+ * ocf_freelist_init
+ * ocf_freelist_deinit
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+
+#include "ocf_freelist.c/ocf_freelist_init_generated_wraps.c"
+
+ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
+{
+ function_called();
+ return mock();
+}
+
+ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
+{
+ function_called();
+ return mock();
+}
+
+static void ocf_freelist_init_test01(void **state)
+{
+ unsigned num_cls = 9;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ ocf_cache_t cache = 0x1234;
+
+ print_test_description("Freelist initialization test");
+
+ expect_function_call(__wrap_ocf_metadata_collision_table_entries);
+ will_return(__wrap_ocf_metadata_collision_table_entries, num_cls);
+
+ expect_function_call(__wrap_env_get_execution_context_count);
+ will_return(__wrap_env_get_execution_context_count, num_ctxts);
+
+ freelist = ocf_freelist_init(cache);
+ assert(freelist != NULL);
+
+ ocf_freelist_deinit(freelist);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_freelist_init_test01)
+ };
+
+ print_message("Unit test of ocf_freelist_init\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_locks.c b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_locks.c
new file mode 100644
index 000000000..b4a2b5624
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_locks.c
@@ -0,0 +1,213 @@
+/*
+ * <tested_file_path>src/ocf_freelist.c</tested_file_path>
+ * <tested_function>ocf_freelist_get_cache_line</tested_function>
+ * <functions_to_leave>
+ * ocf_freelist_init
+ * ocf_freelist_deinit
+ * ocf_freelist_populate
+ * next_phys_invalid
+ * ocf_freelist_unlock
+ * _ocf_freelist_remove_cache_line
+ * ocf_freelist_get_cache_line_fast
+ * ocf_freelist_get_cache_line_slow
+ * ocf_freelist_add_cache_line
+ * ocf_freelist_get_cache_line_ctx
+ * get_next_victim_freelist
+ * ocf_freelist_put_cache_line
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+
+#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
+
+ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
+{
+ return mock();
+}
+
+unsigned __wrap_env_get_execution_context_count(void)
+{
+ return mock();
+}
+
+unsigned __wrap_env_get_execution_context(void)
+{
+ return mock();
+}
+
+void __wrap_env_put_execution_context(unsigned ctx)
+{
+}
+
+/* simulate no striping */
+ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
+{
+ return phy;
+}
+
+bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ return mock();
+}
+
+void __wrap_ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
+{
+ function_called();
+ check_expected(ctx);
+}
+
+int __wrap_ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
+{
+ function_called();
+ check_expected(ctx);
+ return mock();
+}
+
+/* metadata partition info interface mock: */
+
+#define max_clines 100
+
+struct {
+ ocf_cache_line_t prev;
+ ocf_cache_line_t next;
+} partition_list[max_clines];
+
+
+void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id,
+ ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
+{
+ assert_int_equal(part_id, PARTITION_INVALID);
+ partition_list[line].prev = prev_line;
+ partition_list[line].next = next_line;
+}
+
+void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t *part_id,
+ ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
+{
+ if (part_id)
+ *part_id = PARTITION_INVALID;
+ if (prev_line)
+ *prev_line = partition_list[line].prev;
+ if (next_line)
+ *next_line = partition_list[line].next;
+}
+
+void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t prev_line)
+{
+ partition_list[line].prev = prev_line;
+}
+
+void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_cache_line_t next_line)
+{
+ partition_list[line].next = next_line;
+}
+
+static void ocf_freelist_get_put_locks(void **state)
+{
+ unsigned num_cls = 4;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+ ocf_cache_line_t line;
+
+ print_test_description("Verify lock/trylock sequence in get free cacheline");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+ will_return_maybe(__wrap_metadata_test_valid_any, false);
+
+ /* simulate context 1 for the entire test duration */
+ will_return_maybe(__wrap_env_get_execution_context, 1);
+
+ freelist = ocf_freelist_init(NULL);
+
+ ocf_freelist_populate(freelist, num_cls);
+
+ /****************************************************************/
+ /* verify fast path locking - scucessfull trylock */
+
+ /* ctx 0: 0, 3
+ * ctx 1: 1
+ * ctx 2: 2
+ * slowpath next victim: 0
+ */
+
+ expect_value(__wrap_ocf_freelist_trylock, ctx, 1);
+ expect_function_call(__wrap_ocf_freelist_trylock);
+ will_return(__wrap_ocf_freelist_trylock, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+
+ /****************************************************************/
+ /* verify fast path locking - scucessfull trylock in slowpath */
+
+ /* ctx 0: 0, 3
+ * ctx 1:
+ * ctx 2: 2
+ * slowpath next victim: 0 */
+
+ /* we expect trylock for context 0, since context 1 has empty list */
+ expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
+ expect_function_call(__wrap_ocf_freelist_trylock);
+ will_return(__wrap_ocf_freelist_trylock, 0);
+ ocf_freelist_get_cache_line(freelist, &line);
+
+ /****************************************************************/
+ /* verify fast path locking - trylock failure in slowpath */
+
+ /* ctx 0: 3
+ * ctx 1:
+ * ctx 2: 2
+ * slowpath next victim: 1 */
+
+ /* fastpath will fail immediately - context 1 list is empty */
+ /* next slowpath victim context (1) is empty - will move to ctx 2 */
+ /* so now we expect trylock for context no 2 - injecting error here*/
+ expect_value(__wrap_ocf_freelist_trylock, ctx, 2);
+ expect_function_call(__wrap_ocf_freelist_trylock);
+ will_return(__wrap_ocf_freelist_trylock, 1);
+
+ /* slowpath will attempt to trylock next non-empty context - 0
+ * - injecting error here as well */
+ expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
+ expect_function_call(__wrap_ocf_freelist_trylock);
+ will_return(__wrap_ocf_freelist_trylock, 1);
+
+ /* slowpath trylock loop failed - expecting full lock */
+ expect_value(__wrap_ocf_freelist_lock, ctx, 2);
+ expect_function_call(__wrap_ocf_freelist_lock);
+
+ /* execute freelist_get_cache_line */
+ ocf_freelist_get_cache_line(freelist, &line);
+
+ /****************************************************************/
+
+ ocf_freelist_deinit(freelist);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_freelist_get_put_locks)
+ };
+
+ print_message("Unit test for ocf_freelist_get_cache_line locking\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_populate.c b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_populate.c
new file mode 100644
index 000000000..4780667ea
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/ocf_freelist.c/ocf_freelist_populate.c
@@ -0,0 +1,138 @@
+/*
+ * <tested_file_path>src/ocf_freelist.c</tested_file_path>
+ * <tested_function>ocf_freelist_populate</tested_function>
+ * <functions_to_leave>
+ * ocf_freelist_init
+ * ocf_freelist_deinit
+ * ocf_freelist_populate
+ * next_phys_invalid
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "metadata/metadata.h"
+
+#include "ocf_freelist.c/ocf_freelist_populate_generated_wraps.c"
+
+ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
+{
+ return mock();
+}
+
+ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
+{
+ return mock();
+}
+
+/* simulate no striping */
+ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
+{
+ return phy;
+}
+
+bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
+{
+ return mock();
+}
+
+void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
+ ocf_cache_line_t line, ocf_part_id_t part_id,
+ ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
+{
+ print_message("%s %u %u %u\n", __func__, prev_line, line, next_line);
+ check_expected(line);
+ check_expected(part_id);
+ check_expected(next_line);
+ check_expected(prev_line);
+}
+
+#define expect_set_info(curr, part, next, prev) \
+ expect_value(__wrap_ocf_metadata_set_partition_info, line, curr); \
+ expect_value(__wrap_ocf_metadata_set_partition_info, part_id, part); \
+ expect_value(__wrap_ocf_metadata_set_partition_info, next_line, next); \
+ expect_value(__wrap_ocf_metadata_set_partition_info, prev_line, prev);
+
+static void ocf_freelist_populate_test01(void **state)
+{
+ unsigned num_cls = 8;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+
+ print_test_description("Verify proper set_partition_info order and arguments - empty cache");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+ will_return_maybe(__wrap_metadata_test_valid_any, false);
+
+ freelist = ocf_freelist_init(NULL);
+
+ expect_set_info(0, PARTITION_INVALID, 1 , num_cls);
+ expect_set_info(1, PARTITION_INVALID, 2 , 0);
+ expect_set_info(2, PARTITION_INVALID, num_cls, 1);
+ expect_set_info(3, PARTITION_INVALID, 4 , num_cls);
+ expect_set_info(4, PARTITION_INVALID, 5 , 3);
+ expect_set_info(5, PARTITION_INVALID, num_cls, 4);
+ expect_set_info(6, PARTITION_INVALID, 7 , num_cls);
+ expect_set_info(7, PARTITION_INVALID, num_cls, 6);
+
+ ocf_freelist_populate(freelist, num_cls);
+
+ ocf_freelist_deinit(freelist);
+}
+
+static void ocf_freelist_populate_test02(void **state)
+{
+ unsigned num_cls = 8;
+ unsigned num_ctxts = 3;
+ ocf_freelist_t freelist;
+ unsigned ctx_iter, cl_iter;
+
+ print_test_description("Verify proper set_partition_info order and arguments - some valid clines");
+
+ will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
+ will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
+
+ freelist = ocf_freelist_init(NULL);
+
+ /* simulate only cachelines 2, 3, 4, 7 invalid */
+ will_return(__wrap_metadata_test_valid_any, true);
+ will_return(__wrap_metadata_test_valid_any, true);
+ will_return(__wrap_metadata_test_valid_any, false);
+ will_return(__wrap_metadata_test_valid_any, false);
+ will_return(__wrap_metadata_test_valid_any, false);
+ will_return(__wrap_metadata_test_valid_any, true);
+ will_return(__wrap_metadata_test_valid_any, true);
+ will_return(__wrap_metadata_test_valid_any, false);
+
+ expect_set_info(2, PARTITION_INVALID, 3 , num_cls);
+ expect_set_info(3, PARTITION_INVALID, num_cls, 2);
+ expect_set_info(4, PARTITION_INVALID, num_cls, num_cls);
+ expect_set_info(7, PARTITION_INVALID, num_cls, num_cls);
+
+ ocf_freelist_populate(freelist, 4);
+
+ ocf_freelist_deinit(freelist);
+}
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_freelist_populate_test01),
+ cmocka_unit_test(ocf_freelist_populate_test02)
+ };
+
+ print_message("Unit test of src/ocf_freelist.c\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/print_desc.h b/src/spdk/ocf/tests/unit/tests/print_desc.h
new file mode 100644
index 000000000..90de578f9
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/print_desc.h
@@ -0,0 +1,6 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#define print_test_description(description) print_message("[ DESC ] %s\n", description);
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_dec.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_dec.c
new file mode 100644
index 000000000..8bd40fcd2
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_dec.c
@@ -0,0 +1,63 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_dec</tested_function>
+ * <functions_to_leave>
+ * ocf_refcnt_init
+ * ocf_refcnt_inc
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_dec_generated_wraps.c"
+
+static void ocf_refcnt_dec_test01(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Decrement subtracts 1 and returns proper value");
+
+ ocf_refcnt_init(&rc);
+
+ ocf_refcnt_inc(&rc);
+ ocf_refcnt_inc(&rc);
+ ocf_refcnt_inc(&rc);
+
+ val = ocf_refcnt_dec(&rc);
+ assert_int_equal(2, val);
+ val2 = env_atomic_read(&rc.counter);
+ assert_int_equal(2, val2);
+
+ val = ocf_refcnt_dec(&rc);
+ assert_int_equal(1, val);
+ val2 = env_atomic_read(&rc.counter);
+ assert_int_equal(1, val2);
+
+ val = ocf_refcnt_dec(&rc);
+ assert_int_equal(0, val);
+ val2 = env_atomic_read(&rc.counter);
+ assert_int_equal(0, val2);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_dec_test01)
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_freeze.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_freeze.c
new file mode 100644
index 000000000..a1385f3b2
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_freeze.c
@@ -0,0 +1,117 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_freeze</tested_function>
+ * <functions_to_leave>
+ * ocf_refcnt_init
+ * ocf_refcnt_inc
+ * ocf_refcnt_dec
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_freeze_generated_wraps.c"
+
+static void ocf_refcnt_freeze_test01(void **state)
+{
+ struct ocf_refcnt rc;
+ int val;
+
+ print_test_description("Freeze increments freeze counter");
+
+ ocf_refcnt_init(&rc);
+
+ ocf_refcnt_freeze(&rc);
+ assert_int_equal(1, env_atomic_read(&rc.freeze));
+
+ ocf_refcnt_freeze(&rc);
+ assert_int_equal(2, env_atomic_read(&rc.freeze));
+}
+
+static void ocf_refcnt_freeze_test02(void **state)
+{
+ struct ocf_refcnt rc;
+ int val;
+
+ print_test_description("Increment returns 0 for frozen counter");
+
+ ocf_refcnt_init(&rc);
+
+ ocf_refcnt_inc(&rc);
+ ocf_refcnt_inc(&rc);
+ ocf_refcnt_inc(&rc);
+
+ ocf_refcnt_freeze(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+
+ assert_int_equal(0, val);
+}
+
+static void ocf_refcnt_freeze_test03(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Freeze bocks increment");
+
+ ocf_refcnt_init(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+ val = ocf_refcnt_inc(&rc);
+ val = ocf_refcnt_inc(&rc);
+
+ ocf_refcnt_freeze(&rc);
+
+ ocf_refcnt_inc(&rc);
+
+ val2 = env_atomic_read(&rc.counter);
+
+ assert_int_equal(val, val2);
+}
+
+static void ocf_refcnt_freeze_test04(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Freeze allows decrement");
+
+ ocf_refcnt_init(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+ val = ocf_refcnt_inc(&rc);
+ val = ocf_refcnt_inc(&rc);
+
+ ocf_refcnt_freeze(&rc);
+
+ val2 = ocf_refcnt_dec(&rc);
+ assert_int_equal(val2, val - 1);
+
+ val2 = ocf_refcnt_dec(&rc);
+ assert_int_equal(val2, val - 2);
+}
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_freeze_test01),
+ cmocka_unit_test(ocf_refcnt_freeze_test02),
+ cmocka_unit_test(ocf_refcnt_freeze_test03),
+ cmocka_unit_test(ocf_refcnt_freeze_test04),
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_inc.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_inc.c
new file mode 100644
index 000000000..d1b613335
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_inc.c
@@ -0,0 +1,52 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_inc</tested_function>
+ * <functions_to_leave>
+ * ocf_refcnt_init
+ * ocf_refcnt_dec
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_inc_generated_wraps.c"
+
+static void ocf_refcnt_inc_test(void **state)
+{
+ struct ocf_refcnt rc;
+ int val;
+
+ print_test_description("Increment adds 1 and returns proper value");
+
+ ocf_refcnt_init(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+ assert_int_equal(1, val);
+ assert_int_equal(1, env_atomic_read(&rc.counter));
+
+ val = ocf_refcnt_inc(&rc);
+ assert_int_equal(2, val);
+ assert_int_equal(2, env_atomic_read(&rc.counter));
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_inc_test)
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_init.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_init.c
new file mode 100644
index 000000000..3f28207dd
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_init.c
@@ -0,0 +1,51 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_init</tested_function>
+ * <functions_to_leave>
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_init_generated_wraps.c"
+
+static void ocf_refcnt_init_test(void **state)
+{
+ struct ocf_refcnt rc;
+
+ print_test_description("Reference counter is properly initialized");
+
+ env_atomic_set(&rc.counter, 1);
+ env_atomic_set(&rc.freeze, 1);
+ env_atomic_set(&rc.callback, 1);
+
+ ocf_refcnt_init(&rc);
+
+ assert_int_equal(0, env_atomic_read(&rc.counter));
+ assert_int_equal(0, env_atomic_read(&rc.freeze));
+ assert_int_equal(0, env_atomic_read(&rc.cb));
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_init_test)
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_register_zero_cb.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_register_zero_cb.c
new file mode 100644
index 000000000..fcb260c03
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_register_zero_cb.c
@@ -0,0 +1,102 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_register_zero_cb</tested_function>
+ * <functions_to_leave>
+ * ocf_refcnt_init
+ * ocf_refcnt_inc
+ * ocf_refcnt_dec
+ * ocf_refcnt_freeze
+* </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_register_zero_cb_generated_wraps.c"
+
+static void zero_cb(void *ctx)
+{
+ function_called();
+ check_expected_ptr(ctx);
+}
+
+static void ocf_refcnt_register_zero_cb_test01(void **state)
+{
+ struct ocf_refcnt rc;
+ int val;
+ void *ptr = 0x12345678;
+
+ print_test_description("Callback fires when counter drops to 0");
+
+ /* cnt = 2 */
+ ocf_refcnt_init(&rc);
+ ocf_refcnt_inc(&rc);
+ ocf_refcnt_inc(&rc);
+
+ /* freeze and register cb */
+ ocf_refcnt_freeze(&rc);
+ ocf_refcnt_register_zero_cb(&rc, zero_cb, ptr);
+
+ /* 2 -> 1 */
+ ocf_refcnt_dec(&rc);
+
+ val = env_atomic_read(&rc.callback);
+ assert_int_equal(1, val);
+
+ /* expect callback now */
+ expect_function_calls(zero_cb, 1);
+ expect_value(zero_cb, ctx, ptr);
+
+ /* 1 -> 0 */
+ ocf_refcnt_dec(&rc);
+
+ val = env_atomic_read(&rc.callback);
+ assert_int_equal(0, val);
+}
+
+static void ocf_refcnt_register_zero_cb_test02(void **state)
+{
+ struct ocf_refcnt rc;
+ int val;
+ void *ptr = 0x12345678;
+
+ print_test_description("Callback fires when counter is already 0");
+
+ /* cnt = 0 */
+ ocf_refcnt_init(&rc);
+
+ /* freeze */
+ ocf_refcnt_freeze(&rc);
+
+ /* expect callback now */
+ expect_function_calls(zero_cb, 1);
+ expect_value(zero_cb, ctx, ptr);
+
+ /* regiser callback */
+ ocf_refcnt_register_zero_cb(&rc, zero_cb, ptr);
+
+ val = env_atomic_read(&rc.callback);
+ assert_int_equal(0, val);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_register_zero_cb_test01),
+ cmocka_unit_test(ocf_refcnt_register_zero_cb_test02),
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_unfreeze.c b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_unfreeze.c
new file mode 100644
index 000000000..2a2e10bf0
--- /dev/null
+++ b/src/spdk/ocf/tests/unit/tests/utils/utils_refcnt.c/utils_refcnt_unfreeze.c
@@ -0,0 +1,101 @@
+/*
+ * <tested_file_path>src/utils/utils_refcnt.c</tested_file_path>
+ * <tested_function>ocf_refcnt_unfreeze</tested_function>
+ * <functions_to_leave>
+ * ocf_refcnt_init
+ * ocf_refcnt_inc
+ * ocf_refcnt_dec
+ * ocf_refcnt_freeze
+ * </functions_to_leave>
+ */
+
+#undef static
+
+#undef inline
+
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+#include "print_desc.h"
+
+#include "../utils/utils_refcnt.h"
+
+#include "utils/utils_refcnt.c/utils_refcnt_unfreeze_generated_wraps.c"
+
+static void ocf_refcnt_unfreeze_test01(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Unfreeze decrements freeze counter");
+
+ ocf_refcnt_init(&rc);
+
+ ocf_refcnt_freeze(&rc);
+ ocf_refcnt_freeze(&rc);
+ val = env_atomic_read(&rc.freeze);
+
+ ocf_refcnt_unfreeze(&rc);
+ val2 = env_atomic_read(&rc.freeze);
+ assert_int_equal(val2, val - 1);
+
+ ocf_refcnt_unfreeze(&rc);
+ val2 = env_atomic_read(&rc.freeze);
+ assert_int_equal(val2, val - 2);
+
+}
+
+static void ocf_refcnt_unfreeze_test02(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Unfreezed counter can be incremented");
+
+ ocf_refcnt_init(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+ ocf_refcnt_freeze(&rc);
+ ocf_refcnt_unfreeze(&rc);
+ val2 = ocf_refcnt_inc(&rc);
+
+ assert_int_equal(val2, val + 1);
+}
+
+static void ocf_refcnt_unfreeze_test03(void **state)
+{
+ struct ocf_refcnt rc;
+ int val, val2;
+
+ print_test_description("Two freezes require two unfreezes");
+
+ ocf_refcnt_init(&rc);
+
+ val = ocf_refcnt_inc(&rc);
+ ocf_refcnt_freeze(&rc);
+ ocf_refcnt_freeze(&rc);
+ ocf_refcnt_unfreeze(&rc);
+ val2 = ocf_refcnt_inc(&rc);
+
+ assert_int_equal(0, val2);
+
+ ocf_refcnt_unfreeze(&rc);
+ val2 = ocf_refcnt_inc(&rc);
+
+ assert_int_equal(val2, val + 1);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_refcnt_unfreeze_test01),
+ cmocka_unit_test(ocf_refcnt_unfreeze_test02),
+ cmocka_unit_test(ocf_refcnt_unfreeze_test03),
+ };
+
+ print_message("Unit test of src/utils/utils_refcnt.c");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}