summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel')
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h16
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c23
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c183
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c3
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c5
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h88
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c101
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c97
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c231
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h188
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c380
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h127
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c1010
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c318
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h89
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c66
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_mig_dev.c130
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c1
47 files changed, 2808 insertions, 539 deletions
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 2524091a5f..56985e3952 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -49,10 +49,10 @@ struct iaa_wq {
struct iaa_device *iaa_device;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct iaa_device_compression_mode {
@@ -73,10 +73,10 @@ struct iaa_device {
int n_wq;
struct list_head wqs;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct wq_table_entry {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index b2191ade90..e810d286ee 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -347,18 +347,16 @@ int add_iaa_compression_mode(const char *name,
goto free;
if (ll_table) {
- mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
+ mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL);
if (!mode->ll_table)
goto free;
- memcpy(mode->ll_table, ll_table, ll_table_size);
mode->ll_table_size = ll_table_size;
}
if (d_table) {
- mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
+ mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL);
if (!mode->d_table)
goto free;
- memcpy(mode->d_table, d_table, d_table_size);
mode->d_table_size = d_table_size;
}
@@ -922,7 +920,7 @@ static void rebalance_wq_table(void)
for_each_node_with_cpus(node) {
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
+ for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
int node_cpu = cpumask_nth(cpu, node_cpus);
if (WARN_ON(node_cpu >= nr_cpu_ids)) {
@@ -1079,8 +1077,8 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
update_total_comp_bytes_out(ctx->req->dlen);
update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
} else {
- update_total_decomp_bytes_in(ctx->req->dlen);
- update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
+ update_total_decomp_bytes_in(ctx->req->slen);
+ update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen);
}
if (ctx->compress && compression_ctx->verify_compress) {
@@ -1498,7 +1496,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- u64 start_time_ns;
int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1572,10 +1569,8 @@ static int iaa_comp_acompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
&req->dlen, &compression_crc, disable_async);
- update_max_comp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1622,7 +1617,6 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
struct iaa_wq *iaa_wq;
struct device *dev;
struct idxd_wq *wq;
- u64 start_time_ns;
int order = -1;
cpu = get_cpu();
@@ -1679,10 +1673,8 @@ alloc_dest:
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, true);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EOVERFLOW) {
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
req->dlen *= 2;
@@ -1713,7 +1705,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
struct device *dev;
- u64 start_time_ns;
struct idxd_wq *wq;
if (!iaa_crypto_enabled) {
@@ -1773,10 +1764,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, false);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -2014,7 +2003,7 @@ static int __init iaa_crypto_init_module(void)
int ret = 0;
int node;
- nr_cpus = num_online_cpus();
+ nr_cpus = num_possible_cpus();
for_each_node_with_cpus(node)
nr_nodes++;
if (!nr_nodes) {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
index c9f83af4b3..f5cc3d29ca 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -17,141 +17,117 @@
#include "iaa_crypto.h"
#include "iaa_crypto_stats.h"
-static u64 total_comp_calls;
-static u64 total_decomp_calls;
-static u64 total_sw_decomp_calls;
-static u64 max_comp_delay_ns;
-static u64 max_decomp_delay_ns;
-static u64 total_comp_bytes_out;
-static u64 total_decomp_bytes_in;
-static u64 total_completion_einval_errors;
-static u64 total_completion_timeout_errors;
-static u64 total_completion_comp_buf_overflow_errors;
+static atomic64_t total_comp_calls;
+static atomic64_t total_decomp_calls;
+static atomic64_t total_sw_decomp_calls;
+static atomic64_t total_comp_bytes_out;
+static atomic64_t total_decomp_bytes_in;
+static atomic64_t total_completion_einval_errors;
+static atomic64_t total_completion_timeout_errors;
+static atomic64_t total_completion_comp_buf_overflow_errors;
static struct dentry *iaa_crypto_debugfs_root;
void update_total_comp_calls(void)
{
- total_comp_calls++;
+ atomic64_inc(&total_comp_calls);
}
void update_total_comp_bytes_out(int n)
{
- total_comp_bytes_out += n;
+ atomic64_add(n, &total_comp_bytes_out);
}
void update_total_decomp_calls(void)
{
- total_decomp_calls++;
+ atomic64_inc(&total_decomp_calls);
}
void update_total_sw_decomp_calls(void)
{
- total_sw_decomp_calls++;
+ atomic64_inc(&total_sw_decomp_calls);
}
void update_total_decomp_bytes_in(int n)
{
- total_decomp_bytes_in += n;
+ atomic64_add(n, &total_decomp_bytes_in);
}
void update_completion_einval_errs(void)
{
- total_completion_einval_errors++;
+ atomic64_inc(&total_completion_einval_errors);
}
void update_completion_timeout_errs(void)
{
- total_completion_timeout_errors++;
+ atomic64_inc(&total_completion_timeout_errors);
}
void update_completion_comp_buf_overflow_errs(void)
{
- total_completion_comp_buf_overflow_errors++;
-}
-
-void update_max_comp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_comp_delay_ns)
- max_comp_delay_ns = time_diff;
-}
-
-void update_max_decomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_decomp_delay_ns)
- max_decomp_delay_ns = time_diff;
+ atomic64_inc(&total_completion_comp_buf_overflow_errors);
}
void update_wq_comp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_calls++;
- wq->iaa_device->comp_calls++;
+ atomic64_inc(&wq->comp_calls);
+ atomic64_inc(&wq->iaa_device->comp_calls);
}
void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_bytes += n;
- wq->iaa_device->comp_bytes += n;
+ atomic64_add(n, &wq->comp_bytes);
+ atomic64_add(n, &wq->iaa_device->comp_bytes);
}
void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_calls++;
- wq->iaa_device->decomp_calls++;
+ atomic64_inc(&wq->decomp_calls);
+ atomic64_inc(&wq->iaa_device->decomp_calls);
}
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_bytes += n;
- wq->iaa_device->decomp_bytes += n;
+ atomic64_add(n, &wq->decomp_bytes);
+ atomic64_add(n, &wq->iaa_device->decomp_bytes);
}
static void reset_iaa_crypto_stats(void)
{
- total_comp_calls = 0;
- total_decomp_calls = 0;
- total_sw_decomp_calls = 0;
- max_comp_delay_ns = 0;
- max_decomp_delay_ns = 0;
- total_comp_bytes_out = 0;
- total_decomp_bytes_in = 0;
- total_completion_einval_errors = 0;
- total_completion_timeout_errors = 0;
- total_completion_comp_buf_overflow_errors = 0;
+ atomic64_set(&total_comp_calls, 0);
+ atomic64_set(&total_decomp_calls, 0);
+ atomic64_set(&total_sw_decomp_calls, 0);
+ atomic64_set(&total_comp_bytes_out, 0);
+ atomic64_set(&total_decomp_bytes_in, 0);
+ atomic64_set(&total_completion_einval_errors, 0);
+ atomic64_set(&total_completion_timeout_errors, 0);
+ atomic64_set(&total_completion_comp_buf_overflow_errors, 0);
}
static void reset_wq_stats(struct iaa_wq *wq)
{
- wq->comp_calls = 0;
- wq->comp_bytes = 0;
- wq->decomp_calls = 0;
- wq->decomp_bytes = 0;
+ atomic64_set(&wq->comp_calls, 0);
+ atomic64_set(&wq->comp_bytes, 0);
+ atomic64_set(&wq->decomp_calls, 0);
+ atomic64_set(&wq->decomp_bytes, 0);
}
static void reset_device_stats(struct iaa_device *iaa_device)
{
struct iaa_wq *iaa_wq;
- iaa_device->comp_calls = 0;
- iaa_device->comp_bytes = 0;
- iaa_device->decomp_calls = 0;
- iaa_device->decomp_bytes = 0;
+ atomic64_set(&iaa_device->comp_calls, 0);
+ atomic64_set(&iaa_device->comp_bytes, 0);
+ atomic64_set(&iaa_device->decomp_calls, 0);
+ atomic64_set(&iaa_device->decomp_bytes, 0);
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
reset_wq_stats(iaa_wq);
@@ -160,10 +136,14 @@ static void reset_device_stats(struct iaa_device *iaa_device)
static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
{
seq_printf(m, " name: %s\n", iaa_wq->wq->name);
- seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_wq->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_wq->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_wq->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n\n",
+ atomic64_read(&iaa_wq->decomp_bytes));
}
static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
@@ -173,30 +153,41 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
seq_puts(m, "iaa device:\n");
seq_printf(m, " id: %d\n", iaa_device->idxd->id);
seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
- seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_device->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_device->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_device->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n",
+ atomic64_read(&iaa_device->decomp_bytes));
seq_puts(m, " wqs:\n");
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
wq_show(m, iaa_wq);
}
-static void global_stats_show(struct seq_file *m)
+static int global_stats_show(struct seq_file *m, void *v)
{
seq_puts(m, "global stats:\n");
- seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
- seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
- seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
- seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
- seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
+ seq_printf(m, " total_comp_calls: %llu\n",
+ atomic64_read(&total_comp_calls));
+ seq_printf(m, " total_decomp_calls: %llu\n",
+ atomic64_read(&total_decomp_calls));
+ seq_printf(m, " total_sw_decomp_calls: %llu\n",
+ atomic64_read(&total_sw_decomp_calls));
+ seq_printf(m, " total_comp_bytes_out: %llu\n",
+ atomic64_read(&total_comp_bytes_out));
+ seq_printf(m, " total_decomp_bytes_in: %llu\n",
+ atomic64_read(&total_decomp_bytes_in));
seq_printf(m, " total_completion_einval_errors: %llu\n",
- total_completion_einval_errors);
+ atomic64_read(&total_completion_einval_errors));
seq_printf(m, " total_completion_timeout_errors: %llu\n",
- total_completion_timeout_errors);
+ atomic64_read(&total_completion_timeout_errors));
seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
- total_completion_comp_buf_overflow_errors);
+ atomic64_read(&total_completion_comp_buf_overflow_errors));
+
+ return 0;
}
static int wq_stats_show(struct seq_file *m, void *v)
@@ -205,8 +196,6 @@ static int wq_stats_show(struct seq_file *m, void *v)
mutex_lock(&iaa_devices_lock);
- global_stats_show(m);
-
list_for_each_entry(iaa_device, &iaa_devices, list)
device_stats_show(m, iaa_device);
@@ -243,6 +232,18 @@ static const struct file_operations wq_stats_fops = {
.release = single_release,
};
+static int global_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, global_stats_show, file);
+}
+
+static const struct file_operations global_stats_fops = {
+ .open = global_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
int __init iaa_crypto_debugfs_init(void)
@@ -252,20 +253,8 @@ int __init iaa_crypto_debugfs_init(void)
iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
- debugfs_create_u64("max_comp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_comp_delay_ns);
- debugfs_create_u64("max_decomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_decomp_delay_ns);
- debugfs_create_u64("total_comp_calls", 0644,
- iaa_crypto_debugfs_root, &total_comp_calls);
- debugfs_create_u64("total_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_decomp_calls);
- debugfs_create_u64("total_sw_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_sw_decomp_calls);
- debugfs_create_u64("total_comp_bytes_out", 0644,
- iaa_crypto_debugfs_root, &total_comp_bytes_out);
- debugfs_create_u64("total_decomp_bytes_in", 0644,
- iaa_crypto_debugfs_root, &total_decomp_bytes_in);
+ debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL,
+ &global_stats_fops);
debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
&wq_stats_fops);
debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
index c916ca83f0..3787a5f507 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -13,8 +13,6 @@ void update_total_comp_bytes_out(int n);
void update_total_decomp_calls(void);
void update_total_sw_decomp_calls(void);
void update_total_decomp_bytes_in(int n);
-void update_max_comp_delay_ns(u64 start_time_ns);
-void update_max_decomp_delay_ns(u64 start_time_ns);
void update_completion_einval_errs(void);
void update_completion_timeout_errs(void);
void update_completion_comp_buf_overflow_errs(void);
@@ -24,8 +22,6 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
-static inline u64 iaa_get_ts(void) { return ktime_get_ns(); }
-
#else
static inline int iaa_crypto_debugfs_init(void) { return 0; }
static inline void iaa_crypto_debugfs_cleanup(void) {}
@@ -35,8 +31,6 @@ static inline void update_total_comp_bytes_out(int n) {}
static inline void update_total_decomp_calls(void) {}
static inline void update_total_sw_decomp_calls(void) {}
static inline void update_total_decomp_bytes_in(int n) {}
-static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
static inline void update_completion_einval_errs(void) {}
static inline void update_completion_timeout_errs(void) {}
static inline void update_completion_comp_buf_overflow_errs(void) {}
@@ -46,8 +40,6 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
-static inline u64 iaa_get_ts(void) { return 0; }
-
#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index a90fbe00b3..45728659fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 1d0ef47a9f..78f0ea4925 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -487,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index ff9c8b5897..9ba202079a 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index fb34fd7f03..9fd7ec53b9 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -454,6 +456,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->bank_state_save = adf_gen4_bank_state_save;
+ hw_data->bank_state_restore = adf_gen4_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -469,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 92ef416ccc..7a06ad519b 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index a882e0ea22..201f9412c5 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index b6d76825a9..7ef633058c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04..a512ca4efd 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index d581f7c87d..cc9255b3b1 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 48cf3eb7c7..6b5b0cf9c7 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 446c3d6386..256786662d 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57f..4aaaaf9217 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 5915cde8a7..eac73cbfdd 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -14,16 +14,20 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
+ adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
adf_gen4_config.o \
+ adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_vf_mig.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
adf_gen4_ras.o \
adf_gen4_timer.o \
adf_clock.o \
+ adf_mstate_mgr.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -35,7 +39,8 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_rl.o \
qat_uclo.o \
qat_hal.o \
- qat_bl.o
+ qat_bl.o \
+ qat_mig_dev.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 08658c3a01..7830ecb1a1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
@@ -140,6 +141,40 @@ struct admin_info {
u32 mailbox_offset;
};
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
@@ -150,22 +185,49 @@ struct adf_hw_csr_ops {
u32 ring);
void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
u32 ring, dma_addr_t addr);
+ u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
+ u32 bank, u32 value);
+ u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+ u32 bank);
void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
u32 bank, u32 value);
+ u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*get_int_col_ctl_enable_mask)(void);
};
struct adf_cfg_device_data;
@@ -197,6 +259,20 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};
+struct qat_migdev_ops {
+ int (*init)(struct qat_mig_dev *mdev);
+ void (*cleanup)(struct qat_mig_dev *mdev);
+ void (*reset)(struct qat_mig_dev *mdev);
+ int (*open)(struct qat_mig_dev *mdev);
+ void (*close)(struct qat_mig_dev *mdev);
+ int (*suspend)(struct qat_mig_dev *mdev);
+ int (*resume)(struct qat_mig_dev *mdev);
+ int (*save_state)(struct qat_mig_dev *mdev);
+ int (*save_setup)(struct qat_mig_dev *mdev);
+ int (*load_state)(struct qat_mig_dev *mdev);
+ int (*load_setup)(struct qat_mig_dev *mdev, int size);
+};
+
struct adf_dev_err_mask {
u32 cppagentcmdpar_mask;
u32 parerr_ath_cph_mask;
@@ -244,6 +320,10 @@ struct adf_hw_device_data {
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+ int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+ int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -260,6 +340,7 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
+ struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -316,6 +397,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
@@ -330,11 +412,17 @@ struct adf_fw_loader_data {
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
bool restarting;
u8 vf_compat_ver;
+ /*
+ * Private area used for device migration.
+ * Memory allocation and free is managed by migration driver.
+ */
+ void *mig_priv;
};
struct adf_dc_data {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 8836f015c3..2cf102ad4c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
@@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
* 3. if the key exists with the same value, then return without doing
* anything (the newly created key_val is freed).
*/
+ down_write(&cfg->lock);
if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
adf_cfg_keyval_remove(key, section);
} else {
kfree(key_val);
- return 0;
+ goto out;
}
}
- down_write(&cfg->lock);
adf_cfg_keyval_add(key_val, section);
+
+out:
up_write(&cfg->lock);
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 57328249c8..3bec9e20ba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
return pmisc->virt_addr;
}
+static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *etr;
+
+ etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+
+ return etr->virt_addr;
+}
+
static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
new file mode 100644
index 0000000000..650c9edd8a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen2_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
new file mode 100644
index 0000000000..55058b0f9e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN2_HW_CSR_DATA_H_
+#define ADF_GEN2_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)((value) & 0xFFFFFFFF); \
+ u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * (index)), value)
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5..1f64bf49b2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
- u32 ring, u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
-
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 6bd341061d..708e918612 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -6,78 +6,9 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
-#define BUILD_RING_BASE_ADDR(addr, size) \
- (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- u32 l_base = 0, u_base = 0; \
- l_base = (u32)((value) & 0xFFFFFFFF); \
- u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
/* AE to function map */
#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
@@ -106,12 +37,6 @@ do { \
#define ADF_ARB_OFFSET 0x30000
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
#define ADF_POWERGATE_DC BIT(23)
@@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
new file mode 100644
index 0000000000..6609c248aa
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen4_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_UO_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NE_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NF_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_F_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_C_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_CTL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+static u32 get_int_col_ctl_enable_mask(void)
+{
+ return ADF_RING_CSR_INT_COL_CTL_ENABLE;
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_stat = read_csr_stat;
+ csr_ops->read_csr_uo_stat = read_csr_uo_stat;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->read_csr_ne_stat = read_csr_ne_stat;
+ csr_ops->read_csr_nf_stat = read_csr_nf_stat;
+ csr_ops->read_csr_f_stat = read_csr_f_stat;
+ csr_ops->read_csr_c_stat = read_csr_c_stat;
+ csr_ops->read_csr_exp_stat = read_csr_exp_stat;
+ csr_ops->read_csr_exp_int_en = read_csr_exp_int_en;
+ csr_ops->write_csr_exp_int_en = write_csr_exp_int_en;
+ csr_ops->read_csr_ring_config = read_csr_ring_config;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->read_csr_ring_base = read_csr_ring_base;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->read_csr_int_en = read_csr_int_en;
+ csr_ops->write_csr_int_en = write_csr_int_en;
+ csr_ops->read_csr_int_flag = read_csr_int_flag;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->read_csr_int_srcsel = read_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val;
+ csr_ops->read_csr_int_col_en = read_csr_int_col_en;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+ csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
new file mode 100644
index 0000000000..6f33e7c87c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
+#define ADF_RING_CSR_RING_CONFIG 0x1000
+#define ADF_RING_CSR_RING_LBASE 0x1040
+#define ADF_RING_CSR_RING_UBASE 0x1080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_STAT 0x140
+#define ADF_RING_CSR_UO_STAT 0x148
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_NE_STAT 0x150
+#define ADF_RING_CSR_NF_STAT 0x154
+#define ADF_RING_CSR_F_STAT 0x158
+#define ADF_RING_CSR_C_STAT 0x15C
+#define ADF_RING_CSR_INT_FLAG_EN 0x16C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_EXP_STAT 0x188
+#define ADF_RING_CSR_EXP_INT_EN 0x18C
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_CSR_ADDR_OFFSET 0x100000
+#define ADF_RING_BUNDLE_SIZE 0x2000
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT)
+#define READ_CSR_UO_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT)
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define READ_CSR_NE_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT)
+#define READ_CSR_NF_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT)
+#define READ_CSR_F_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT)
+#define READ_CSR_C_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT)
+#define READ_CSR_EXP_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT)
+#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN)
+#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_INT_EN, value)
+#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2))
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ void __iomem *_csr_base_addr = csr_base_addr; \
+ u32 _bank = bank; \
+ u32 _ring = ring; \
+ dma_addr_t _value = value; \
+ u32 l_base = 0, u_base = 0; \
+ l_base = lower_32_bits(_value); \
+ u_base = upper_32_bits(_value); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
+} while (0)
+
+static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ u32 l_base, u_base;
+
+ /*
+ * Use special IO wrapper for ring base as LBASE and UBASE are
+ * not physically contigious
+ */
+ l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_LBASE + (ring << 2));
+ u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_UBASE + (ring << 2));
+
+ return (u64)u_base << 32 | (u64)l_base;
+}
+
+#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
+ read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring))
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define READ_CSR_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN)
+#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_EN, (value))
+#define READ_CSR_INT_FLAG(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG, (value))
+#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, (value))
+#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_EN, (value))
+#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN)
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index d28e192194..41a0979e68 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
+#include <asm/div64.h>
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
@@ -8,103 +9,6 @@
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
-
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_GEN4_ACCELERATORS_MASK;
@@ -321,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
- void __iomem *csr;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
int ret;
if (bank_number >= hw_data->num_banks)
@@ -331,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
dev_dbg(&GET_DEV(accel_dev),
"ring pair reset for bank:%d\n", bank_number);
- csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
ret = reset_ring_pair(csr, bank_number);
if (ret)
dev_err(&GET_DEV(accel_dev),
@@ -489,3 +391,281 @@ set_mask:
return ring_to_svc_map;
}
EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
+
+/*
+ * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
+ * @accel_dev: Pointer to the device structure
+ * @bank_idx: Offset to the bank within this device
+ * @timeout_ms: Timeout in milliseconds for the operation
+ *
+ * This function tries to quiesce the coalesced interrupt timer of a bank if
+ * it has been enabled and triggered.
+ *
+ * Returns 0 on success, error code otherwise
+ *
+ */
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
+ void __iomem *csr_etr = adf_get_etr_base(accel_dev);
+ u32 int_col_ctl, int_col_mask, int_col_en;
+ u32 e_stat, intsrc;
+ u64 wait_us;
+ int ret;
+
+ if (timeout_ms < 0)
+ return -EINVAL;
+
+ int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
+ int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
+ if (!(int_col_ctl & int_col_mask))
+ return 0;
+
+ int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
+ int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
+
+ e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
+ if (!(~e_stat & int_col_en))
+ return 0;
+
+ wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
+ do_div(wait_us, hw_data->clock_frequency);
+ wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
+ dev_dbg(&GET_DEV(accel_dev),
+ "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
+ bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
+
+ ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
+ ADF_COALESCED_POLL_DELAY_US, wait_us, true,
+ csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev),
+ "coalesced timer for bank %d expired (%llu us)\n",
+ bank_idx, wait_us);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
+
+static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
+{
+ u32 status;
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_DRAIN);
+
+ return read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+}
+
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+}
+
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
+
+ ret = drain_bank(csr, bank_number, timeout_us);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
+
+ return ret;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+#define CHECK_STAT(op, expect_val, name, args...) \
+({ \
+ u32 __expect_val = (expect_val); \
+ u32 actual_val = op(args); \
+ (__expect_val == actual_val) ? 0 : \
+ (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
+ name, __expect_val, actual_val), -EINVAL); \
+})
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ val = state->ringexpstat;
+ if (tmp_val && !val) {
+ pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
+
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index c6e80df5a8..8b10926ced 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN4_HW_CSR_DATA_H_
-#define ADF_GEN4_HW_CSR_DATA_H_
+#ifndef ADF_GEN4_HW_DATA_H_
+#define ADF_GEN4_HW_DATA_H_
#include <linux/units.h>
@@ -54,95 +54,6 @@
#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
-#define ADF_RING_CSR_RING_CONFIG 0x1000
-#define ADF_RING_CSR_RING_LBASE 0x1040
-#define ADF_RING_CSR_RING_UBASE 0x1080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_ADDR_OFFSET 0x100000
-#define ADF_RING_BUNDLE_SIZE 0x2000
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
- ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- void __iomem *_csr_base_addr = csr_base_addr; \
- u32 _bank = bank; \
- u32 _ring = ring; \
- dma_addr_t _value = value; \
- u32 l_base = 0, u_base = 0; \
- l_base = lower_32_bits(_value); \
- u_base = upper_32_bits(_value); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_EN, (value))
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, (value))
-
-/* Arbiter configuration */
-#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-
/* Default ring mapping */
#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
(ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
@@ -166,10 +77,20 @@ do { \
#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_RPRESET_POLL_DELAY_US 20
#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2)
#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Ring interrupt */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
+#define ADF_COALESCED_POLL_DELAY_US 1000
+#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
+#define ADF_WQM_CSR_RP_IDX_RX 1
+
/* Error source registers */
#define ADF_GEN4_ERRSOU0 (0x41A200)
#define ADF_GEN4_ERRSOU1 (0x41A204)
@@ -197,6 +118,19 @@ do { \
/* Arbiter threads mask with error value */
#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+/* PF2VM communication channel */
+#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20)
+#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20)
+#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20)
+#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20)
+#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20)
+#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20)
+
+struct adf_gen4_vfmig {
+ struct adf_mstate_mgr *mstate_mgr;
+ bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF];
+};
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
enum icp_qat_gen4_slice_mask {
@@ -230,11 +164,20 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms);
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us);
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number);
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3..21474d402d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
@@ -6,12 +6,10 @@
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_hw_data.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
-
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
@@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = {
static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
{
- return ADF_4XXX_PF2VM_OFFSET(i);
+ return ADF_GEN4_PF2VM_OFFSET(i);
}
static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
{
- return ADF_4XXX_VM2PF_OFFSET(i);
+ return ADF_GEN4_VM2PF_OFFSET(i);
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
new file mode 100644
index 0000000000..a62eb5e8db
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_utils.h"
+#include "adf_mstate_mgr.h"
+#include "adf_gen4_vf_mig.h"
+
+#define ADF_GEN4_VF_MSTATE_SIZE 4096
+#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev);
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len);
+
+static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev)
+{
+ u8 *state;
+
+ state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mdev->state = state;
+ mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev)
+{
+ kfree(mdev->state);
+ mdev->state = NULL;
+}
+
+static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev)
+{
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+}
+
+static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+
+ vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
+ if (!vfmig)
+ return -ENOMEM;
+
+ vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size);
+ if (!vfmig->mstate_mgr) {
+ kfree(vfmig);
+ return -ENOMEM;
+ }
+ vf_info->mig_priv = vfmig;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+ if (vf_info->mig_priv) {
+ vfmig = vf_info->mig_priv;
+ adf_mstate_mgr_destroy(vfmig->mstate_mgr);
+ kfree(vfmig);
+ vf_info->mig_priv = NULL;
+ }
+}
+
+static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int ret, i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ /* Stop all inflight jobs */
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr,
+ ADF_RPRESET_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to drain bank %d for vf_nr %d\n", i,
+ vf_nr);
+ return ret;
+ }
+ vf_mig->bank_stopped[i] = true;
+
+ adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr,
+ ADF_COALESCED_POLL_TIMEOUT_US);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ if (vf_mig->bank_stopped[i]) {
+ adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr);
+ vf_mig->bank_stopped[i] = false;
+ }
+ }
+
+ return 0;
+}
+
+struct adf_vf_bank_info {
+ struct adf_accel_dev *accel_dev;
+ u32 vf_nr;
+ u32 bank_nr;
+};
+
+struct mig_user_sla {
+ enum adf_base_services srv;
+ u64 rp_mask;
+ u32 cir;
+ u32 pir;
+};
+
+static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf,
+ u32 src_size, void *opaque)
+{
+ struct adf_mstate_vreginfo _sinfo = { src_buf, src_size };
+ struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque;
+ u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla);
+ u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla);
+ struct mig_user_sla *src_slas = sinfo->addr;
+ struct mig_user_sla *dst_slas = dinfo->addr;
+ int i, j;
+
+ for (i = 0; i < src_sla_cnt; i++) {
+ for (j = 0; j < dst_sla_cnt; j++) {
+ if (src_slas[i].srv != dst_slas[j].srv ||
+ src_slas[i].rp_mask != dst_slas[j].rp_mask)
+ continue;
+
+ if (src_slas[i].cir > dst_slas[j].cir ||
+ src_slas[i].pir > dst_slas[j].pir) {
+ pr_err("QAT: DST VF rate limiting mismatch.\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (j == dst_sla_cnt) {
+ pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n",
+ src_slas[i].srv, src_slas[i].rp_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz)
+{
+ if (src_sz > max_sz || dst_sz > max_sz)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr,
+ u8 *src_buf, u32 src_sz, void *opaque)
+{
+ struct adf_mstate_vreginfo *info = opaque;
+ u8 compat = 0;
+ u8 *pcompat;
+
+ if (src_sz != info->size) {
+ pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n",
+ src_sz, info->size);
+ return -EINVAL;
+ }
+
+ memcpy(info->addr, src_buf, info->size);
+ pcompat = info->addr;
+ if (*pcompat == 0) {
+ pr_warn("QAT: Unable to determine the version of VF\n");
+ return 0;
+ }
+
+ compat = adf_vf_compat_checker(*pcompat);
+ if (compat == ADF_PF2VF_VF_INCOMPATIBLE) {
+ pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ }
+
+ if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN)
+ pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ return 0;
+}
+
+/*
+ * adf_mstate_capmask_compare() - compare QAT device capability mask
+ * @sinfo: Pointer to source capability info
+ * @dinfo: Pointer to target capability info
+ *
+ * This function compares the capability mask between source VF and target VF
+ *
+ * Returns: 0 if target capability mask is identical to source capability mask,
+ * 1 if target mask can represent all the capabilities represented by source mask,
+ * -1 if target mask can't represent all the capabilities represented by source
+ * mask.
+ */
+static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
+ struct adf_mstate_vreginfo *dinfo)
+{
+ u64 src = 0, dst = 0;
+
+ if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) {
+ pr_debug("QAT: Unexpected capability size %u %u %zu\n",
+ sinfo->size, dinfo->size, sizeof(u64));
+ return -1;
+ }
+
+ memcpy(&src, sinfo->addr, sinfo->size);
+ memcpy(&dst, dinfo->addr, dinfo->size);
+
+ pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst);
+
+ if (src == dst)
+ return 0;
+
+ if ((src | dst) == dst)
+ return 1;
+
+ return -1;
+}
+
+static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo *info = opa;
+
+ if (size != info->size) {
+ pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size);
+ return -EINVAL;
+ }
+ memcpy(info->addr, buf, info->size);
+
+ return 0;
+}
+
+static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ struct mig_user_sla *pmig_slas)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla **sla_type_arr = NULL;
+ u64 rp_mask, rp_index;
+ u32 max_num_sla;
+ u32 sla_cnt = 0;
+ int i, j;
+
+ if (!accel_dev->rate_limiting)
+ return 0;
+
+ rp_index = vf_nr * hw_data->num_banks_per_vf;
+ max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr);
+
+ for (i = 0; i < max_num_sla; i++) {
+ if (!sla_type_arr[i])
+ continue;
+
+ rp_mask = 0;
+ for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++)
+ rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]);
+
+ if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) {
+ pmig_slas->rp_mask = rp_mask;
+ pmig_slas->cir = sla_type_arr[i]->cir;
+ pmig_slas->pir = sla_type_arr[i]->pir;
+ pmig_slas->srv = sla_type_arr[i]->srv;
+ pmig_slas++;
+ sla_cnt++;
+ }
+ }
+
+ return sla_cnt;
+}
+
+static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
+ u8 *state, u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+ ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+ subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to lookup sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_load_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 ofs;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ misc_states[i].id,
+ adf_mstate_set_vreg,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load sec %s\n", misc_states[i].id);
+ return -EINVAL;
+ }
+ ADF_CSR_WR(csr, misc_states[i].ofs, regv);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ u32 dst_sla_cnt;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == dst_slas) {
+ dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas);
+ gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ gen_states[i].id,
+ gen_states[i].action,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id,
+ setups[i].action, &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
+ u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr;
+ pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+
+ ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return sizeof(struct bank_state);
+}
+
+static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_vf_bank_info vf_bank_info;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+
+ subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ vf_bank_info.accel_dev = accel_dev;
+ vf_bank_info.vf_nr = vf_nr;
+ vf_bank_info.bank_nr = bank_nr;
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_save_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 offset;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ ktime_t time_exp;
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US);
+ while (!mutex_trylock(&vf_info->pfvf_mig_lock)) {
+ if (ktime_after(ktime_get(), time_exp)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(500, 1000);
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ regv = ADF_CSR_RD(csr, misc_states[i].offset);
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ misc_states[i].id,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ misc_states[i].id);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ u32 src_sla_cnt;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, {src_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == src_slas) {
+ src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas);
+ gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ gen_states[i].id,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id,
+ &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_save_setup(mdev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save setup for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size,
+ mdev->state_size - mdev->setup_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save generic state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr,
+ mdev->state + mdev->remote_setup_size,
+ mdev->state_size - mdev->remote_setup_size,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load general state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->setup_size)
+ return 0;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id);
+ if (ret)
+ return ret;
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+ mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ u32 setup_size;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->remote_setup_size)
+ return 0;
+
+ if (len < sizeof(struct adf_mstate_preh))
+ return -EAGAIN;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr);
+ if (setup_size > mdev->state_size)
+ return -EINVAL;
+
+ if (len < setup_size)
+ return -EAGAIN;
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state,
+ setup_size, NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ mdev->remote_setup_size = setup_size;
+
+ ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load config for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ vfmig_ops->init = adf_gen4_vfmig_init_device;
+ vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device;
+ vfmig_ops->reset = adf_gen4_vfmig_reset_device;
+ vfmig_ops->open = adf_gen4_vfmig_open_device;
+ vfmig_ops->close = adf_gen4_vfmig_close_device;
+ vfmig_ops->suspend = adf_gen4_vfmig_suspend_device;
+ vfmig_ops->resume = adf_gen4_vfmig_resume_device;
+ vfmig_ops->save_state = adf_gen4_vfmig_save_state;
+ vfmig_ops->load_state = adf_gen4_vfmig_load_state;
+ vfmig_ops->load_setup = adf_gen4_vfmig_load_setup;
+ vfmig_ops->save_setup = adf_gen4_vfmig_save_setup;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
new file mode 100644
index 0000000000..72216d078e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_VF_MIG_H_
+#define ADF_GEN4_VF_MIG_H_
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
new file mode 100644
index 0000000000..41cc763a74
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_mstate_mgr.h"
+
+#define ADF_MSTATE_MAGIC 0xADF5CAEA
+#define ADF_MSTATE_VERSION 0x1
+
+struct adf_mstate_sect_h {
+ u8 id[ADF_MSTATE_ID_LEN];
+ u32 size;
+ u32 sub_sects;
+ u8 state[];
+};
+
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr)
+{
+ return mgr->state - mgr->buf;
+}
+
+static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr)
+{
+ return mgr->buf + mgr->size - mgr->state;
+}
+
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size)
+{
+ mgr->buf = buf;
+ mgr->state = buf;
+ mgr->size = size;
+ mgr->n_sects = 0;
+};
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size)
+{
+ struct adf_mstate_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+
+ return mgr;
+}
+
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr)
+{
+ kfree(mgr);
+}
+
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr)
+{
+ adf_mstate_mgr_init(mgr, p_mgr->state,
+ p_mgr->size - adf_mstate_state_size(p_mgr));
+}
+
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect)
+{
+ adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size);
+ mgr->n_sects = p_sect->sub_sects;
+}
+
+static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble)
+{
+ preamble->magic = ADF_MSTATE_MAGIC;
+ preamble->version = ADF_MSTATE_VERSION;
+ preamble->preh_len = sizeof(*preamble);
+ preamble->size = 0;
+ preamble->n_sects = 0;
+}
+
+/* default preambles checker */
+static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble,
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = opaque;
+
+ if (preamble->magic != ADF_MSTATE_MAGIC ||
+ preamble->version > ADF_MSTATE_VERSION ||
+ preamble->preh_len > mgr->size) {
+ pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n",
+ preamble->magic, preamble->version, preamble->preh_len,
+ mgr->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf;
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*pre)) {
+ pr_err("QAT: LM - Not enough space for preamble\n");
+ return NULL;
+ }
+
+ adf_mstate_preamble_init(pre);
+ mgr->state += pre->preh_len;
+
+ return pre;
+}
+
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf;
+
+ preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len;
+ preamble->n_sects = mgr->n_sects;
+
+ return 0;
+}
+
+static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect,
+ const char *prefix)
+{
+ pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id);
+ print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect,
+ sizeof(*sect), true);
+ print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state,
+ sect->size, true);
+}
+
+static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *sect,
+ u32 size,
+ u32 n_subsects)
+{
+ sect->size += size;
+ sect->sub_sects += n_subsects;
+ mgr->n_sects++;
+ mgr->state += sect->size;
+
+ adf_mstate_dump_sect(sect, "Add");
+}
+
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect)
+{
+ __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr),
+ curr_mgr->n_sects);
+}
+
+static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr,
+ const char *id)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state);
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*sect)) {
+ pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id);
+ return NULL;
+ }
+
+ strscpy(sect->id, id, sizeof(sect->id));
+ sect->size = 0;
+ sect->sub_sects = 0;
+ mgr->state += sizeof(*sect);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info)
+{
+ struct adf_mstate_sect_h *sect;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (adf_mstate_avail_room(mgr) < info->size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, info->size);
+ return NULL;
+ }
+
+ memcpy(sect->state, info->addr, info->size);
+ __adf_mstate_sect_update(mgr, sect, info->size, 0);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque)
+{
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *sect;
+ int avail_room, size;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (!populate)
+ return sect;
+
+ avail_room = adf_mstate_avail_room(mgr);
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr);
+
+ size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque);
+ if (size < 0)
+ return NULL;
+
+ size += adf_mstate_state_size(&sub_sects_mgr);
+ if (avail_room < size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, size);
+ return NULL;
+ }
+ __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects);
+
+ return sect;
+}
+
+static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_sect_h *sect = start;
+ u64 end;
+ int i;
+
+ end = (uintptr_t)mgr->buf + mgr->size;
+ for (i = 0; i < mgr->n_sects; i++) {
+ uintptr_t s_start = (uintptr_t)sect->state;
+ uintptr_t s_end = s_start + sect->size;
+
+ if (s_end < s_start || s_end > end) {
+ pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n",
+ i, sect->size, mgr->size, mgr->n_sects);
+ return -EINVAL;
+ }
+ sect = (struct adf_mstate_sect_h *)s_end;
+ }
+
+ pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n",
+ start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start),
+ mgr->size, mgr->n_sects);
+
+ return 0;
+}
+
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf;
+
+ return preh->preh_len + preh->size;
+}
+
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size,
+ adf_mstate_preamble_checker pre_checker,
+ void *opaque)
+{
+ struct adf_mstate_preh *pre;
+ int ret;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+ pre = (struct adf_mstate_preh *)(mgr->buf);
+
+ pr_debug("QAT: LM - Dump state preambles\n");
+ print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0);
+
+ if (pre_checker)
+ ret = (*pre_checker)(pre, opaque);
+ else
+ ret = adf_mstate_preamble_def_checker(pre, mgr);
+ if (ret)
+ return ret;
+
+ mgr->state = mgr->buf + pre->preh_len;
+ mgr->n_sects = pre->n_sects;
+
+ return adf_mstate_sect_validate(mgr);
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_mgr sub_sects_mgr;
+ int i, ret;
+
+ for (i = 0; i < mgr->n_sects; i++) {
+ if (!strncmp(sect->id, id, sizeof(sect->id)))
+ goto found;
+
+ sect = (struct adf_mstate_sect_h *)(sect->state + sect->size);
+ }
+
+ return NULL;
+
+found:
+ adf_mstate_dump_sect(sect, "Found");
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect);
+ if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr))
+ return NULL;
+
+ if (!action)
+ return sect;
+
+ ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque);
+ if (ret)
+ return NULL;
+
+ return sect;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
new file mode 100644
index 0000000000..81d263a596
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+
+#ifndef ADF_MSTATE_MGR_H
+#define ADF_MSTATE_MGR_H
+
+#define ADF_MSTATE_ID_LEN 8
+
+#define ADF_MSTATE_ETRB_IDS "ETRBAR"
+#define ADF_MSTATE_MISCB_IDS "MISCBAR"
+#define ADF_MSTATE_EXTB_IDS "EXTBAR"
+#define ADF_MSTATE_GEN_IDS "GENER"
+#define ADF_MSTATE_CONFIG_IDS "CONFIG"
+#define ADF_MSTATE_SECTION_NUM 5
+
+#define ADF_MSTATE_BANK_IDX_IDS "bnk"
+
+#define ADF_MSTATE_ETR_REGS_IDS "mregs"
+#define ADF_MSTATE_VINTSRC_IDS "visrc"
+#define ADF_MSTATE_VINTMSK_IDS "vimsk"
+#define ADF_MSTATE_SLA_IDS "sla"
+#define ADF_MSTATE_IOV_INIT_IDS "iovinit"
+#define ADF_MSTATE_COMPAT_VER_IDS "compver"
+#define ADF_MSTATE_GEN_CAP_IDS "gencap"
+#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap"
+#define ADF_MSTATE_GEN_EXTDC_IDS "extdc"
+#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv"
+#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv"
+#define ADF_MSTATE_VM2PF_IDS "vm2pf"
+#define ADF_MSTATE_PF2VM_IDS "pf2vm"
+
+struct adf_mstate_mgr {
+ u8 *buf;
+ u8 *state;
+ u32 size;
+ u32 n_sects;
+};
+
+struct adf_mstate_preh {
+ u32 magic;
+ u32 version;
+ u16 preh_len;
+ u16 n_sects;
+ u32 size;
+};
+
+struct adf_mstate_vreginfo {
+ void *addr;
+ u32 size;
+};
+
+struct adf_mstate_sect_h;
+
+typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa);
+typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa);
+typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size,
+ void *opa);
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size);
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr);
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size);
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr);
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect);
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr,
+ u8 *buf, u32 size,
+ adf_mstate_preamble_checker checker,
+ void *opaque);
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr);
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr);
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect);
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info);
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque);
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 9ab93fbfef..b9b5e744a3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
"VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- if (vf_compat_ver == 0)
- compat = ADF_PF2VF_VF_INCOMPATIBLE;
- else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
- compat = ADF_PF2VF_VF_COMPATIBLE;
- else
- compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
-
+ compat = adf_vf_compat_checker(vf_compat_ver);
vf_info->vf_compat_ver = vf_compat_ver;
resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
index 2be048e228..1a044297d8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
@@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg
struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
const struct pfvf_csr_format *fmt);
+static inline u8 adf_vf_compat_checker(u8 vf_compat_ver)
+{
+ if (vf_compat_ver == 0)
+ return ADF_PF2VF_VF_INCOMPATIBLE;
+
+ if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ return ADF_PF2VF_VF_COMPATIBLE;
+
+ return ADF_PF2VF_VF_COMPAT_UNKNOWN;
+}
+
#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index e10f0024f4..346ef8bee9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s
}
/**
- * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
+ * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
* @type: SLA type
* @sla_arr: pointer to variable where requested pointer will be stored
*
* Return: Max number of elements allowed for the returned array
*/
-static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
struct rl_sla ***sla_arr)
{
switch (type) {
@@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
rp_in_use[sla->ring_pairs_ids[i]] = false;
update_budget(sla, old_cir, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
assign_node_to_parent(rl_data->accel_dev, sla, true);
adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
mark_rps_usage(sla, rl_data->rp_in_use, false);
@@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev,
if (!is_update) {
mark_rps_usage(sla, rl_data->rp_in_use, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
sla_type_arr[sla->node_id] = sla;
rl_data->sla[sla->sla_id] = sla;
}
@@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
/* Unregister and remove all SLAs */
for (j = RL_LEAF; j >= end_type; j--) {
- max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
+ max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr);
for (i = 0; i < max_id; i++) {
if (!sla_type_arr[i])
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index 269c6656fb..bfe750ea0e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -151,6 +151,8 @@ struct rl_sla {
u16 ring_pairs_cnt;
};
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+ struct rl_sla ***sla_arr);
int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 87a70c00c4..8d645e7e04 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work)
u32 vf_nr = vf_info->vf_nr;
bool ret;
+ mutex_lock(&vf_info->pfvf_mig_lock);
ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
if (ret)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
kfree(pf2vf_resp);
}
@@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
vf_info->vf_nr = i;
mutex_init(&vf_info->pf2vf_lock);
+ mutex_init(&vf_info->pfvf_mig_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
ADF_VF2PF_RATELIMIT_INTERVAL,
ADF_VF2PF_RATELIMIT_BURST);
@@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, false);
- for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
mutex_destroy(&vf->pf2vf_lock);
+ mutex_destroy(&vf->pfvf_mig_lock);
+ }
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
kfree(accel_dev->pf.vf_info);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4..1efdf46490 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
@@ -474,7 +474,6 @@ err:
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
u32 size;
u32 num_banks = 0;
@@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
}
accel_dev->transport = etr_data;
- i = hw_data->get_etr_bar_id(hw_data);
- csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+ csr_addr = adf_get_etr_base(accel_dev);
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 4128200a90..85c682e248 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -110,6 +110,8 @@ struct qat_dh_ctx {
unsigned int p_size;
bool g2;
struct qat_crypto_instance *inst;
+ struct crypto_kpp *ftfm;
+ bool fallback;
} __packed __aligned(64);
struct qat_asym_request {
@@ -381,6 +383,36 @@ unmap_src:
return ret;
}
+static int qat_dh_generate_public_key(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_generate_public_key(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
+static int qat_dh_compute_shared_secret(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_compute_shared_secret(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
static int qat_dh_check_params_length(unsigned int p_len)
{
switch (p_len) {
@@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (qat_dh_check_params_length(params->p_size << 3))
- return -EINVAL;
-
ctx->p_size = params->p_size;
ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
@@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
+ if (qat_dh_check_params_length(params.p_size << 3)) {
+ ctx->fallback = true;
+ return crypto_kpp_set_secret(ctx->ftfm, buf, len);
+ }
+
+ ctx->fallback = false;
+
/* Free old secret if any */
qat_dh_clear_ctx(dev, ctx);
@@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->ftfm);
+
return ctx->p_size;
}
@@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(numa_node_id());
+ const char *alg = kpp_alg_name(tfm);
+ unsigned int reqsize;
if (!inst)
return -EINVAL;
- kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+ ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm));
+
+ reqsize = max(sizeof(struct qat_asym_request) + 64,
+ sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm));
+
+ kpp_set_reqsize(tfm, reqsize);
ctx->p_size = 0;
ctx->g2 = false;
@@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ if (ctx->ftfm)
+ crypto_free_kpp(ctx->ftfm);
+
qat_dh_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
}
@@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = {
static struct kpp_alg dh = {
.set_secret = qat_dh_set_secret,
- .generate_public_key = qat_dh_compute_value,
- .compute_shared_secret = qat_dh_compute_value,
+ .generate_public_key = qat_dh_generate_public_key,
+ .compute_shared_secret = qat_dh_compute_shared_secret,
.max_size = qat_dh_max_size,
.init = qat_dh_init_tfm,
.exit = qat_dh_exit_tfm,
@@ -1276,6 +1329,7 @@ static struct kpp_alg dh = {
.cra_priority = 1000,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct qat_dh_ctx),
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 76baed0a76..338acf29c4 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!bufl))
return -ENOMEM;
} else {
- bufl = &buf->sgl_src.sgl_hdr;
+ bufl = container_of(&buf->sgl_src.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_src_valid = true;
}
@@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!buflout))
goto err_in;
} else {
- buflout = &buf->sgl_dst.sgl_hdr;
+ buflout = container_of(&buf->sgl_dst.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_dst_valid = true;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index d87e4f35ac..85bc32a9ec 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -15,14 +15,17 @@ struct qat_alg_buf {
} __packed;
struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
+ /* New members must be added within the __struct_group() macro below. */
+ __struct_group(qat_alg_buf_list_hdr, hdr, __packed,
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ );
struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
- struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf_list_hdr sgl_hdr;
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
} __packed __aligned(64);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
new file mode 100644
index 0000000000..892c2283a5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id)
+{
+ struct adf_accel_dev *accel_dev;
+ struct qat_migdev_ops *ops;
+ struct qat_mig_dev *mdev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ if (!accel_dev)
+ return ERR_PTR(-ENODEV);
+
+ ops = GET_VFMIG_OPS(accel_dev);
+ if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open ||
+ !ops->close || !ops->suspend || !ops->resume || !ops->save_state ||
+ !ops->load_state || !ops->save_setup || !ops->load_setup)
+ return ERR_PTR(-EINVAL);
+
+ mdev = kmalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ mdev->vf_id = vf_id;
+ mdev->parent_accel_dev = accel_dev;
+
+ return mdev;
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_create);
+
+int qat_vfmig_init(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->init(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_init);
+
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->cleanup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_cleanup);
+
+void qat_vfmig_reset(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->reset(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_reset);
+
+int qat_vfmig_open(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->open(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_open);
+
+void qat_vfmig_close(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ GET_VFMIG_OPS(accel_dev)->close(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_close);
+
+int qat_vfmig_suspend(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->suspend(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_suspend);
+
+int qat_vfmig_resume(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->resume(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_resume);
+
+int qat_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_state);
+
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_setup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_setup);
+
+int qat_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_state);
+
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_setup);
+
+void qat_vfmig_destroy(struct qat_mig_dev *mdev)
+{
+ kfree(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_destroy);
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 38d6f8e162..cfd3bd7577 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index af14090cc4..6e24d57e6b 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -5,6 +5,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 0153c85ce7..64b54e92b2 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
+ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16e..f4ee4c2e00 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>