summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel')
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h25
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c1
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c118
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c30
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h8
-rw-r--r--drivers/crypto/intel/qat/Kconfig14
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c72
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c72
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c135
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c56
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c53
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c25
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c4
32 files changed, 643 insertions, 314 deletions
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 014420f7be..2524091a5f 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -59,10 +59,8 @@ struct iaa_device_compression_mode {
const char *name;
struct aecs_comp_table_record *aecs_comp_table;
- struct aecs_decomp_table_record *aecs_decomp_table;
dma_addr_t aecs_comp_table_dma_addr;
- dma_addr_t aecs_decomp_table_dma_addr;
};
/* Representation of IAA device with wqs, populated by probe */
@@ -107,23 +105,6 @@ struct aecs_comp_table_record {
u32 reserved_padding[2];
} __packed;
-/* AECS for decompress */
-struct aecs_decomp_table_record {
- u32 crc;
- u32 xor_checksum;
- u32 low_filter_param;
- u32 high_filter_param;
- u32 output_mod_idx;
- u32 drop_init_decomp_out_bytes;
- u32 reserved[36];
- u32 output_accum_data[2];
- u32 out_bits_valid;
- u32 bit_off_indexing;
- u32 input_accum_data[64];
- u8 size_qw[32];
- u32 decomp_state[1220];
-} __packed;
-
int iaa_aecs_init_fixed(void);
void iaa_aecs_cleanup_fixed(void);
@@ -136,9 +117,6 @@ struct iaa_compression_mode {
int ll_table_size;
u32 *d_table;
int d_table_size;
- u32 *header_table;
- int header_table_size;
- u16 gen_decomp_table_flags;
iaa_dev_comp_init_fn_t init;
iaa_dev_comp_free_fn_t free;
};
@@ -148,9 +126,6 @@ int add_iaa_compression_mode(const char *name,
int ll_table_size,
const u32 *d_table,
int d_table_size,
- const u8 *header_table,
- int header_table_size,
- u16 gen_decomp_table_flags,
iaa_dev_comp_init_fn_t init,
iaa_dev_comp_free_fn_t free);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
index 45cf5d74f0..19d9a333ac 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
@@ -78,7 +78,6 @@ int iaa_aecs_init_fixed(void)
sizeof(fixed_ll_sym),
fixed_d_sym,
sizeof(fixed_d_sym),
- NULL, 0, 0,
init_fixed_mode, NULL);
if (!ret)
pr_debug("IAA fixed compression mode initialized\n");
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 64a2e87a55..b2191ade90 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -258,16 +258,14 @@ static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
kfree(mode->name);
kfree(mode->ll_table);
kfree(mode->d_table);
- kfree(mode->header_table);
kfree(mode);
}
/*
- * IAA Compression modes are defined by an ll_table, a d_table, and an
- * optional header_table. These tables are typically generated and
- * captured using statistics collected from running actual
- * compress/decompress workloads.
+ * IAA Compression modes are defined by an ll_table and a d_table.
+ * These tables are typically generated and captured using statistics
+ * collected from running actual compress/decompress workloads.
*
* A module or other kernel code can add and remove compression modes
* with a given name using the exported @add_iaa_compression_mode()
@@ -315,9 +313,6 @@ EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
* @ll_table_size: The ll table size in bytes
* @d_table: The d table
* @d_table_size: The d table size in bytes
- * @header_table: Optional header table
- * @header_table_size: Optional header table size in bytes
- * @gen_decomp_table_flags: Otional flags used to generate the decomp table
* @init: Optional callback function to init the compression mode data
* @free: Optional callback function to free the compression mode data
*
@@ -330,9 +325,6 @@ int add_iaa_compression_mode(const char *name,
int ll_table_size,
const u32 *d_table,
int d_table_size,
- const u8 *header_table,
- int header_table_size,
- u16 gen_decomp_table_flags,
iaa_dev_comp_init_fn_t init,
iaa_dev_comp_free_fn_t free)
{
@@ -370,16 +362,6 @@ int add_iaa_compression_mode(const char *name,
mode->d_table_size = d_table_size;
}
- if (header_table) {
- mode->header_table = kzalloc(header_table_size, GFP_KERNEL);
- if (!mode->header_table)
- goto free;
- memcpy(mode->header_table, header_table, header_table_size);
- mode->header_table_size = header_table_size;
- }
-
- mode->gen_decomp_table_flags = gen_decomp_table_flags;
-
mode->init = init;
mode->free = free;
@@ -420,10 +402,6 @@ static void free_device_compression_mode(struct iaa_device *iaa_device,
if (device_mode->aecs_comp_table)
dma_free_coherent(dev, size, device_mode->aecs_comp_table,
device_mode->aecs_comp_table_dma_addr);
- if (device_mode->aecs_decomp_table)
- dma_free_coherent(dev, size, device_mode->aecs_decomp_table,
- device_mode->aecs_decomp_table_dma_addr);
-
kfree(device_mode);
}
@@ -440,73 +418,6 @@ static int check_completion(struct device *dev,
bool compress,
bool only_once);
-static int decompress_header(struct iaa_device_compression_mode *device_mode,
- struct iaa_compression_mode *mode,
- struct idxd_wq *wq)
-{
- dma_addr_t src_addr, src2_addr;
- struct idxd_desc *idxd_desc;
- struct iax_hw_desc *desc;
- struct device *dev;
- int ret = 0;
-
- idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
- if (IS_ERR(idxd_desc))
- return PTR_ERR(idxd_desc);
-
- desc = idxd_desc->iax_hw;
-
- dev = &wq->idxd->pdev->dev;
-
- src_addr = dma_map_single(dev, (void *)mode->header_table,
- mode->header_table_size, DMA_TO_DEVICE);
- dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n",
- __func__, mode->name, src_addr, dev,
- mode->header_table, mode->header_table_size);
- if (unlikely(dma_mapping_error(dev, src_addr))) {
- dev_dbg(dev, "dma_map_single err, exiting\n");
- ret = -ENOMEM;
- return ret;
- }
-
- desc->flags = IAX_AECS_GEN_FLAG;
- desc->opcode = IAX_OPCODE_DECOMPRESS;
-
- desc->src1_addr = (u64)src_addr;
- desc->src1_size = mode->header_table_size;
-
- src2_addr = device_mode->aecs_decomp_table_dma_addr;
- desc->src2_addr = (u64)src2_addr;
- desc->src2_size = 1088;
- dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n",
- __func__, mode->name, desc->src2_addr, dev, desc->src2_size);
- desc->max_dst_size = 0; // suppressed output
-
- desc->decompr_flags = mode->gen_decomp_table_flags;
-
- desc->priv = 0;
-
- desc->completion_addr = idxd_desc->compl_dma;
-
- ret = idxd_submit_desc(wq, idxd_desc);
- if (ret) {
- pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret);
- goto out;
- }
-
- ret = check_completion(dev, idxd_desc->iax_completion, false, false);
- if (ret)
- dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n",
- __func__, mode->name, ret);
- else
- dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__,
- mode->name);
-out:
- dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE);
-
- return ret;
-}
-
static int init_device_compression_mode(struct iaa_device *iaa_device,
struct iaa_compression_mode *mode,
int idx, struct idxd_wq *wq)
@@ -529,24 +440,11 @@ static int init_device_compression_mode(struct iaa_device *iaa_device,
if (!device_mode->aecs_comp_table)
goto free;
- device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size,
- &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL);
- if (!device_mode->aecs_decomp_table)
- goto free;
-
/* Add Huffman table to aecs */
memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
- if (mode->header_table) {
- ret = decompress_header(device_mode, mode, wq);
- if (ret) {
- pr_debug("iaa header decompression failed: ret=%d\n", ret);
- goto free;
- }
- }
-
if (mode->init) {
ret = mode->init(device_mode);
if (ret)
@@ -1600,6 +1498,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
+ u64 start_time_ns;
int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1673,8 +1572,10 @@ static int iaa_comp_acompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
&req->dlen, &compression_crc, disable_async);
+ update_max_comp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1721,6 +1622,7 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
struct iaa_wq *iaa_wq;
struct device *dev;
struct idxd_wq *wq;
+ u64 start_time_ns;
int order = -1;
cpu = get_cpu();
@@ -1777,8 +1679,10 @@ alloc_dest:
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, true);
+ update_max_decomp_delay_ns(start_time_ns);
if (ret == -EOVERFLOW) {
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
req->dlen *= 2;
@@ -1809,6 +1713,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
struct device *dev;
+ u64 start_time_ns;
struct idxd_wq *wq;
if (!iaa_crypto_enabled) {
@@ -1868,8 +1773,10 @@ static int iaa_comp_adecompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
+ start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, false);
+ update_max_decomp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1920,6 +1827,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
index 2e3b7b73af..c9f83af4b3 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -22,8 +22,6 @@ static u64 total_decomp_calls;
static u64 total_sw_decomp_calls;
static u64 max_comp_delay_ns;
static u64 max_decomp_delay_ns;
-static u64 max_acomp_delay_ns;
-static u64 max_adecomp_delay_ns;
static u64 total_comp_bytes_out;
static u64 total_decomp_bytes_in;
static u64 total_completion_einval_errors;
@@ -92,26 +90,6 @@ void update_max_decomp_delay_ns(u64 start_time_ns)
max_decomp_delay_ns = time_diff;
}
-void update_max_acomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_acomp_delay_ns)
- max_acomp_delay_ns = time_diff;
-}
-
-void update_max_adecomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_adecomp_delay_ns)
- max_adecomp_delay_ns = time_diff;
-}
-
void update_wq_comp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
@@ -151,8 +129,6 @@ static void reset_iaa_crypto_stats(void)
total_sw_decomp_calls = 0;
max_comp_delay_ns = 0;
max_decomp_delay_ns = 0;
- max_acomp_delay_ns = 0;
- max_adecomp_delay_ns = 0;
total_comp_bytes_out = 0;
total_decomp_bytes_in = 0;
total_completion_einval_errors = 0;
@@ -275,17 +251,11 @@ int __init iaa_crypto_debugfs_init(void)
return -ENODEV;
iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
- if (!iaa_crypto_debugfs_root)
- return -ENOMEM;
debugfs_create_u64("max_comp_delay_ns", 0644,
iaa_crypto_debugfs_root, &max_comp_delay_ns);
debugfs_create_u64("max_decomp_delay_ns", 0644,
iaa_crypto_debugfs_root, &max_decomp_delay_ns);
- debugfs_create_u64("max_acomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_comp_delay_ns);
- debugfs_create_u64("max_adecomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_decomp_delay_ns);
debugfs_create_u64("total_comp_calls", 0644,
iaa_crypto_debugfs_root, &total_comp_calls);
debugfs_create_u64("total_decomp_calls", 0644,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
index c10b87b86f..c916ca83f0 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -15,8 +15,6 @@ void update_total_sw_decomp_calls(void);
void update_total_decomp_bytes_in(int n);
void update_max_comp_delay_ns(u64 start_time_ns);
void update_max_decomp_delay_ns(u64 start_time_ns);
-void update_max_acomp_delay_ns(u64 start_time_ns);
-void update_max_adecomp_delay_ns(u64 start_time_ns);
void update_completion_einval_errs(void);
void update_completion_timeout_errs(void);
void update_completion_comp_buf_overflow_errs(void);
@@ -26,6 +24,8 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
+static inline u64 iaa_get_ts(void) { return ktime_get_ns(); }
+
#else
static inline int iaa_crypto_debugfs_init(void) { return 0; }
static inline void iaa_crypto_debugfs_cleanup(void) {}
@@ -37,8 +37,6 @@ static inline void update_total_sw_decomp_calls(void) {}
static inline void update_total_decomp_bytes_in(int n) {}
static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_acomp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {}
static inline void update_completion_einval_errs(void) {}
static inline void update_completion_timeout_errs(void) {}
static inline void update_completion_comp_buf_overflow_errs(void) {}
@@ -48,6 +46,8 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
+static inline u64 iaa_get_ts(void) { return 0; }
+
#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
#endif
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index c120f6715a..02fb8abe4e 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF
To compile this as a module, choose M here: the module
will be called qat_c62xvf.
+
+config CRYPTO_DEV_QAT_ERROR_INJECTION
+ bool "Support for Intel(R) QAT Devices Heartbeat Error Injection"
+ depends on CRYPTO_DEV_QAT
+ depends on DEBUG_FS
+ help
+ Enables a mechanism that allows to inject a heartbeat error on
+ Intel(R) QuickAssist devices for testing purposes.
+
+ This is intended for developer use only.
+ If unsure, say N.
+
+ This functionality is available via debugfs entry of the Intel(R)
+ QuickAssist device
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 7b8abfb797..1d0ef47a9f 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -361,61 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- /* If dcc, all rings handle compression requests */
- if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
- for (i = 0; i < RP_GROUP_COUNT; i++)
- rps[i] = COMP;
- goto set_mask;
- }
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
-set_mask:
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -441,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -504,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->fw_mmp_name = ADF_420XX_MMP;
hw_data->uof_get_name = uof_get_name_420xx;
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 7a5c5f9711..fb34fd7f03 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -320,61 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT];
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- /* If dcc, all rings handle compression requests */
- if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
- for (i = 0; i < RP_GROUP_COUNT; i++)
- rps[i] = COMP;
- goto set_mask;
- }
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
-set_mask:
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -407,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -487,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6908727bff..5915cde8a7 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
adf_gen2_pfvf.o adf_gen4_pfvf.o
+
+intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index a16c7e6edc..08658c3a01 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -248,6 +248,7 @@ struct adf_hw_device_data {
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
+ int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -332,6 +333,7 @@ struct adf_accel_vf_info {
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
+ bool restarting;
u8 vf_compat_ver;
};
@@ -401,6 +403,7 @@ struct adf_accel_dev {
struct adf_error_counters ras_errors;
struct mutex state_lock; /* protect state of the device */
bool is_vf;
+ bool autoreset_on_error;
u32 accel_id;
};
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 621d14ea3b..04260f61d0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -7,8 +7,15 @@
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_pfvf_pf_msg.h"
+
+struct adf_fatal_error_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
static struct workqueue_struct *device_reset_wq;
+static struct workqueue_struct *device_sriov_wq;
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -26,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ if (accel_dev->hw_device->exit_arb) {
+ dev_dbg(&pdev->dev, "Disabling arbitration\n");
+ accel_dev->hw_device->exit_arb(accel_dev);
+ }
+ adf_error_notifier(accel_dev);
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_restarting_notify(accel_dev);
+ adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
+ pci_clear_master(pdev);
+ adf_dev_down(accel_dev, false);
+
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -37,6 +57,13 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
+/* sriov dev data */
+struct adf_sriov_dev_data {
+ struct adf_accel_dev *accel_dev;
+ struct completion compl;
+ struct work_struct sriov_work;
+};
+
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
@@ -82,35 +109,45 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
}
}
+static void adf_device_sriov_worker(struct work_struct *work)
+{
+ struct adf_sriov_dev_data *sriov_data =
+ container_of(work, struct adf_sriov_dev_data, sriov_work);
+
+ adf_reenable_sriov(sriov_data->accel_dev);
+ complete(&sriov_data->compl);
+}
+
static void adf_device_reset_worker(struct work_struct *work)
{
struct adf_reset_dev_data *reset_data =
container_of(work, struct adf_reset_dev_data, reset_work);
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+ unsigned long wait_jiffies = msecs_to_jiffies(10000);
+ struct adf_sriov_dev_data sriov_data;
adf_dev_restarting_notify(accel_dev);
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
+
+ sriov_data.accel_dev = accel_dev;
+ init_completion(&sriov_data.compl);
+ INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker);
+ queue_work(device_sriov_wq, &sriov_data.sriov_work);
+ if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies))
+ adf_pf2vf_notify_restarted(accel_dev);
+
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /*
- * The dev is back alive. Notify the caller if in sync mode
- *
- * If device restart will take a more time than expected,
- * the schedule_reset() function can timeout and exit. This can be
- * detected by calling the completion_done() function. In this case
- * the reset_data structure needs to be freed here.
- */
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
else
complete(&reset_data->compl);
@@ -145,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
if (!timeout) {
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
+ cancel_work_sync(&reset_data->reset_work);
ret = -EFAULT;
- } else {
- kfree(reset_data);
}
+ kfree(reset_data);
return ret;
}
return 0;
@@ -157,14 +194,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int res = 0;
if (!accel_dev) {
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+
+ if (!pdev->is_busmaster)
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ res = adf_dev_up(accel_dev, false);
+ if (res && res != -EALREADY)
return PCI_ERS_RESULT_DISCONNECT;
+ adf_reenable_sriov(accel_dev);
+ adf_pf2vf_notify_restarted(accel_dev);
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -181,11 +229,62 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->autoreset_on_error)
+ return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
+
+ return 0;
+}
+
+static void adf_notify_fatal_error_worker(struct work_struct *work)
+{
+ struct adf_fatal_error_data *wq_data =
+ container_of(work, struct adf_fatal_error_data, work);
+ struct adf_accel_dev *accel_dev = wq_data->accel_dev;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ adf_error_notifier(accel_dev);
+
+ if (!accel_dev->is_vf) {
+ /* Disable arbitration to stop processing of new requests */
+ if (accel_dev->autoreset_on_error && hw_device->exit_arb)
+ hw_device->exit_arb(accel_dev);
+ if (accel_dev->pf.vf_info)
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_autoreset(accel_dev);
+ }
+
+ kfree(wq_data);
+}
+
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fatal_error_data *wq_data;
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->accel_dev = accel_dev;
+ INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker);
+ adf_misc_wq_queue_work(&wq_data->work);
+
+ return 0;
+}
+
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
WQ_MEM_RECLAIM, 0);
- return !device_reset_wq ? -EFAULT : 0;
+ if (!device_reset_wq)
+ return -EFAULT;
+
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ if (!device_sriov_wq)
+ return -EFAULT;
+
+ return 0;
}
void adf_exit_aer(void)
@@ -193,4 +292,8 @@ void adf_exit_aer(void)
if (device_reset_wq)
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
+
+ if (device_sriov_wq)
+ destroy_workqueue(device_sriov_wq);
+ device_sriov_wq = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 322b76903a..e015ad6cac 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -49,5 +49,6 @@
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+#define ADF_SRIOV_ENABLED "SriovEnabled"
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f06188033a..57328249c8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -40,6 +40,7 @@ enum adf_event {
ADF_EVENT_SHUTDOWN,
ADF_EVENT_RESTARTING,
ADF_EVENT_RESTARTED,
+ ADF_EVENT_FATAL_ERROR,
};
struct service_hndl {
@@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+void adf_error_notifier(struct adf_accel_dev *accel_dev);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -84,12 +87,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -188,6 +193,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
@@ -208,6 +214,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
+static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 86ee36feef..f07b748795 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake)
/**
* adf_clean_vf_map() - Cleans VF id mapings
- *
- * Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
+ *
+ * Function cleans internal ids for virtual functions.
*/
void adf_clean_vf_map(bool vf)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index f752653ccb..d28e192194 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -4,6 +4,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
+#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
@@ -433,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
+
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ unsigned int ae_mask, start_id, worker_obj_cnt, i;
+ u16 ring_to_svc_map;
+ int rp_group;
+
+ if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
+ !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
+ return 0;
+
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+ start_id = worker_obj_cnt - RP_GROUP_COUNT;
+
+ for (i = start_id; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return 0;
+
+ switch (hw_data->uof_get_obj_type(accel_dev, i)) {
+ case ADF_FW_SYM_OBJ:
+ rps[rp_group] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[rp_group] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[rp_group] = COMP;
+ break;
+ default:
+ rps[rp_group] = 0;
+ break;
+ }
+ }
+
+set_mask:
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 7d8a774cad..c6e80df5a8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
index 13f48d2f6d..b19aa1ef8e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -23,12 +23,6 @@
#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
-/* Heartbeat counter pair */
-struct hb_cnt_pair {
- __u16 resp_heartbeat_cnt;
- __u16 req_heartbeat_cnt;
-};
-
static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
{
u64 curr_time = adf_clock_get_current_time();
@@ -211,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time;
+
+ if (time_since_reset < ADF_CFG_HB_RESET_MS)
+ return;
+
+ accel_dev->heartbeat->last_hb_reset_time = curr_time;
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n");
+}
+
void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status)
{
@@ -235,6 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
"Heartbeat ERROR: QAT is not responding.\n");
*hb_status = HB_DEV_UNRESPONSIVE;
hb->hb_failed_counter++;
+ adf_heartbeat_reset(accel_dev);
return;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
index b22e3cb297..16fdfb48b1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -13,17 +13,26 @@ struct dentry;
#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
#define ADF_CFG_HB_COUNT_THRESHOLD 3
+#define ADF_CFG_HB_RESET_MS 5000
+
enum adf_device_heartbeat_status {
HB_DEV_UNRESPONSIVE = 0,
HB_DEV_ALIVE,
HB_DEV_UNSUPPORTED,
};
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
struct adf_heartbeat {
unsigned int hb_sent_counter;
unsigned int hb_failed_counter;
unsigned int hb_timer;
u64 last_hb_check_time;
+ u64 last_hb_reset_time;
bool ctrs_cnt_checked;
struct hb_dma_addr {
dma_addr_t phy_addr;
@@ -35,6 +44,9 @@ struct adf_heartbeat {
struct dentry *cfg;
struct dentry *sent;
struct dentry *failed;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ struct dentry *inject_error;
+#endif
} dbgfs;
};
@@ -51,6 +63,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status);
void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ return -EPERM;
+}
+#endif
+
#else
static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
index 2661af6a2e..cccdff24b4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -155,6 +155,44 @@ static const struct file_operations adf_hb_cfg_fops = {
.write = adf_hb_cfg_write,
};
+static ssize_t adf_hb_error_inject_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct adf_accel_dev *accel_dev = file->private_data;
+ char buf[3];
+ int ret;
+
+ /* last byte left as string termination */
+ if (*ppos != 0 || count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ if (buf[0] != '1')
+ return -EINVAL;
+
+ ret = adf_heartbeat_inject_error(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat error injection failed with status %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n");
+
+ return count;
+}
+
+static const struct file_operations adf_hb_error_inject_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = adf_hb_error_inject_write,
+};
+
void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
@@ -171,6 +209,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
&hb->hb_failed_counter, &adf_hb_stats_fops);
hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
accel_dev, &adf_hb_cfg_fops);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) {
+ struct dentry *inject_error __maybe_unused;
+
+ inject_error = debugfs_create_file("inject_error", 0200,
+ hb->dbgfs.base_dir, accel_dev,
+ &adf_hb_error_inject_fops);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ hb->dbgfs.inject_error = inject_error;
+#endif
+ }
}
EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
@@ -189,6 +238,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
hb->dbgfs.failed = NULL;
debugfs_remove(hb->dbgfs.cfg);
hb->dbgfs.cfg = NULL;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ debugfs_remove(hb->dbgfs.inject_error);
+ hb->dbgfs.inject_error = NULL;
+#endif
debugfs_remove(hb->dbgfs.base_dir);
hb->dbgfs.base_dir = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
new file mode 100644
index 0000000000..a3b474bdef
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/random.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+
+#define MAX_HB_TICKS 0xFFFFFFFF
+
+static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ accel_dev->heartbeat->hb_timer = 0;
+
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
+ return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS);
+}
+
+static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae,
+ u32 thr)
+{
+ struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ size_t thr_id = ae * hb_ctrs + thr;
+ u16 num_rsp = stats[thr_id].resp_heartbeat_cnt;
+
+ /*
+ * Inject live.req != live.rsp and live.rsp == last.rsp
+ * to trigger the heartbeat error detection
+ */
+ stats[thr_id].req_heartbeat_cnt++;
+ stats += (max_aes * hb_ctrs);
+ stats[thr_id].resp_heartbeat_cnt = num_rsp;
+}
+
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ u32 rand, rand_ae, rand_thr;
+ unsigned long ae_mask;
+ int ret;
+
+ ae_mask = hw_device->ae_mask;
+
+ do {
+ /* Ensure we have a valid ae */
+ get_random_bytes(&rand, sizeof(rand));
+ rand_ae = rand % max_aes;
+ } while (!test_bit(rand_ae, &ae_mask));
+
+ get_random_bytes(&rand, sizeof(rand));
+ rand_thr = rand % hb_ctrs;
+
+ /* Increase the heartbeat timer to prevent FW updating HB counters */
+ ret = adf_hb_set_timer_to_max(accel_dev);
+ if (ret)
+ return ret;
+
+ /* Configure worker threads to stop processing any packet */
+ ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr);
+ if (ret)
+ return ret;
+
+ /* Change HB counters memory to simulate a hang */
+ adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr);
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index da69566992..65bd26b25a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
EXPORT_SYMBOL_GPL(adf_exit_arb);
+
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr)
+{
+ void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ const u32 *thd_2_arb_cfg;
+ struct arb_info info;
+ u32 ae_thr_map;
+
+ if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr)
+ thr = ADF_AE_ADMIN_THREAD;
+
+ hw_data->get_arb_info(&info);
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+ if (!thd_2_arb_cfg)
+ return -EFAULT;
+
+ /* Disable scheduling for this particular AE and thread */
+ ae_thr_map = *(thd_2_arb_cfg + ae);
+ ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2)));
+
+ WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae,
+ ae_thr_map);
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f43ae91115..74f0818c07 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
return 0;
}
+void adf_error_notifier(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+
+ list_for_each_entry(service, &service_table, list) {
+ if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send error event to %s.\n",
+ service->name);
+ }
+}
+
static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 3557a0d6de..cae1aee547 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev)
if (ras_ops->handle_interrupt &&
ras_ops->handle_interrupt(accel_dev, &reset_required)) {
- if (reset_required)
+ if (reset_required) {
dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n");
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to notify fatal error\n");
+ }
+
return true;
}
@@ -272,7 +277,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
- irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ irqs = kcalloc_node(msix_num_entries, sizeof(*irqs),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!irqs)
return -ENOMEM;
@@ -375,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
/**
* adf_init_misc_wq() - Init misc workqueue
*
- * Function init workqueue 'qat_misc_wq' for general purpose.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_misc_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
index 204a424389..d1b3ef9cad 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
@@ -99,6 +99,8 @@ enum pf2vf_msgtype {
ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+ ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04,
+ ADF_PF2VF_MSGTYPE_RESTARTED = 0x05,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
};
@@ -112,6 +114,7 @@ enum vf2pf_msgtype {
ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+ ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
};
@@ -124,8 +127,10 @@ enum pfvf_compatibility_version {
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
/* Ring to service mapping support for non-standard mappings */
ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Fallback compat */
+ ADF_PFVF_COMPAT_FALLBACK = 0x05,
/* Reference to the latest version */
- ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x05,
};
/* PF->VF Version Response */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 14c069f0d7..0e31f4b418 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
@@ -1,21 +1,83 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/delay.h>
#include <linux/pci.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_msg.h"
#include "adf_pfvf_pf_proto.h"
+#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100
+#define ADF_VF_SHUTDOWN_RETRY 100
+
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ vf->restarting = false;
+ if (!vf->init)
+ continue;
+ if (adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
+ else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
+ vf->restarting = true;
+ }
+}
+
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+ int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ int i, retries = ADF_VF_SHUTDOWN_RETRY;
+ struct adf_accel_vf_info *vf;
+ bool vf_running;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n");
+ do {
+ vf_running = false;
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++)
+ if (vf->restarting)
+ vf_running = true;
+ if (!vf_running)
+ break;
+ msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY);
+ } while (--retries);
+
+ if (vf_running)
+ dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n");
+}
+
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarted msg to VF%d\n", i);
+ }
+}
+
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send fatal error msg to VF%d\n", i);
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
index e8982d1ac8..f203d88c91 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
@@ -5,7 +5,28 @@
#include "adf_accel_devices.h"
+#if defined(CONFIG_PCI_IOV)
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 compat);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 388e58bcbc..9ab93fbfef 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
vf_info->init = false;
}
break;
+ case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting Complete received from VF%d\n", vf_nr);
+ vf_info->restarting = false;
+ vf_info->init = false;
+ }
+ break;
case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
index 1015155b63..dc284a089c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
@@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
adf_pf2vf_handle_pf_restarting(accel_dev);
return false;
+ case ADF_PF2VF_MSGTYPE_RESTARTED:
+ dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n");
+ return true;
+ case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
+ dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n");
+ return true;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index f44025bb6f..87a70c00c4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
- vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
@@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
return pci_enable_sriov(pdev, totalvfs);
}
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ unsigned long val = 0;
+
+ if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SRIOV_ENABLED, cfg))
+ return;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC))
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC))
+ return;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
+ adf_enable_sriov(accel_dev);
+}
+
/**
* adf_disable_sriov() - Disable SRIOV for the device
* @accel_dev: Pointer to accel device.
@@ -103,6 +128,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
return;
adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
@@ -115,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
mutex_destroy(&vf->pf2vf_lock);
- kfree(accel_dev->pf.vf_info);
- accel_dev->pf.vf_info = NULL;
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+ }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -194,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (ret)
return ret;
+ val = 1;
+ adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
+ &val, ADF_DEC);
+
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index d450dad32c..4e7f70d404 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute
}
static DEVICE_ATTR_RW(pm_idle_enabled);
+static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char *auto_reset;
+ struct adf_accel_dev *accel_dev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
+
+ return sysfs_emit(buf, "%s\n", auto_reset);
+}
+
+static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ bool enabled = false;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ accel_dev->autoreset_on_error = enabled;
+
+ return count;
+}
+static DEVICE_ATTR_RW(auto_reset);
+
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
@@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = {
&dev_attr_pm_idle_enabled.attr,
&dev_attr_rp2srv.attr,
&dev_attr_num_rps.attr,
+ &dev_attr_auto_reset.attr,
NULL,
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index b05c3957a1..cdbb2d687b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
/**
* adf_init_vf_wq() - Init workqueue for VF
*
- * Function init workqueue 'adf_vf_stop_wq' for VF.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_vf_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
index 40c8e74d1c..101c6ea416 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
@@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
/**
- * qat_crypto_vf_dev_config()
- * create dev config required to create crypto inst.
+ * qat_crypto_vf_dev_config() - create dev config required to create
+ * crypto inst.
*
* @accel_dev: Pointer to acceleration device.
*