summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/intel/qat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel/qat')
-rw-r--r--drivers/crypto/intel/qat/Kconfig14
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c72
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c72
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c135
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c56
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c53
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c25
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c64
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c4
27 files changed, 626 insertions, 149 deletions
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index c120f6715a..02fb8abe4e 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF
To compile this as a module, choose M here: the module
will be called qat_c62xvf.
+
+config CRYPTO_DEV_QAT_ERROR_INJECTION
+ bool "Support for Intel(R) QAT Devices Heartbeat Error Injection"
+ depends on CRYPTO_DEV_QAT
+ depends on DEBUG_FS
+ help
+ Enables a mechanism that allows to inject a heartbeat error on
+ Intel(R) QuickAssist devices for testing purposes.
+
+ This is intended for developer use only.
+ If unsure, say N.
+
+ This functionality is available via debugfs entry of the Intel(R)
+ QuickAssist device
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 7b8abfb797..1d0ef47a9f 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -361,61 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- /* If dcc, all rings handle compression requests */
- if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
- for (i = 0; i < RP_GROUP_COUNT; i++)
- rps[i] = COMP;
- goto set_mask;
- }
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
-set_mask:
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -441,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -504,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->fw_mmp_name = ADF_420XX_MMP;
hw_data->uof_get_name = uof_get_name_420xx;
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 7a5c5f9711..fb34fd7f03 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -320,61 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
}
}
-static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-{
- enum adf_cfg_service_type rps[RP_GROUP_COUNT];
- const struct adf_fw_config *fw_config;
- u16 ring_to_svc_map;
- int i, j;
-
- fw_config = get_fw_config(accel_dev);
- if (!fw_config)
- return 0;
-
- /* If dcc, all rings handle compression requests */
- if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
- for (i = 0; i < RP_GROUP_COUNT; i++)
- rps[i] = COMP;
- goto set_mask;
- }
-
- for (i = 0; i < RP_GROUP_COUNT; i++) {
- switch (fw_config[i].ae_mask) {
- case ADF_AE_GROUP_0:
- j = RP_GROUP_0;
- break;
- case ADF_AE_GROUP_1:
- j = RP_GROUP_1;
- break;
- default:
- return 0;
- }
-
- switch (fw_config[i].obj) {
- case ADF_FW_SYM_OBJ:
- rps[j] = SYM;
- break;
- case ADF_FW_ASYM_OBJ:
- rps[j] = ASYM;
- break;
- case ADF_FW_DC_OBJ:
- rps[j] = COMP;
- break;
- default:
- rps[j] = 0;
- break;
- }
- }
-
-set_mask:
- ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
- rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
- rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-
- return ring_to_svc_map;
-}
-
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
const char * const fw_objs[], int num_objs)
{
@@ -407,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
}
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return -EINVAL;
+
+ return fw_config[obj_num].obj;
+}
+
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
const struct adf_fw_config *fw_config;
@@ -487,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->get_rp_group = get_rp_group;
hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->enable_pm = adf_gen4_enable_pm;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 6908727bff..5915cde8a7 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
adf_gen2_pfvf.o adf_gen4_pfvf.o
+
+intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index a16c7e6edc..08658c3a01 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -248,6 +248,7 @@ struct adf_hw_device_data {
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
+ int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -332,6 +333,7 @@ struct adf_accel_vf_info {
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
+ bool restarting;
u8 vf_compat_ver;
};
@@ -401,6 +403,7 @@ struct adf_accel_dev {
struct adf_error_counters ras_errors;
struct mutex state_lock; /* protect state of the device */
bool is_vf;
+ bool autoreset_on_error;
u32 accel_id;
};
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 621d14ea3b..04260f61d0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -7,8 +7,15 @@
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_pfvf_pf_msg.h"
+
+struct adf_fatal_error_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
static struct workqueue_struct *device_reset_wq;
+static struct workqueue_struct *device_sriov_wq;
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -26,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ if (accel_dev->hw_device->exit_arb) {
+ dev_dbg(&pdev->dev, "Disabling arbitration\n");
+ accel_dev->hw_device->exit_arb(accel_dev);
+ }
+ adf_error_notifier(accel_dev);
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_restarting_notify(accel_dev);
+ adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
+ pci_clear_master(pdev);
+ adf_dev_down(accel_dev, false);
+
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -37,6 +57,13 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
+/* sriov dev data */
+struct adf_sriov_dev_data {
+ struct adf_accel_dev *accel_dev;
+ struct completion compl;
+ struct work_struct sriov_work;
+};
+
void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
@@ -82,35 +109,45 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
}
}
+static void adf_device_sriov_worker(struct work_struct *work)
+{
+ struct adf_sriov_dev_data *sriov_data =
+ container_of(work, struct adf_sriov_dev_data, sriov_work);
+
+ adf_reenable_sriov(sriov_data->accel_dev);
+ complete(&sriov_data->compl);
+}
+
static void adf_device_reset_worker(struct work_struct *work)
{
struct adf_reset_dev_data *reset_data =
container_of(work, struct adf_reset_dev_data, reset_work);
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+ unsigned long wait_jiffies = msecs_to_jiffies(10000);
+ struct adf_sriov_dev_data sriov_data;
adf_dev_restarting_notify(accel_dev);
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
+
+ sriov_data.accel_dev = accel_dev;
+ init_completion(&sriov_data.compl);
+ INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker);
+ queue_work(device_sriov_wq, &sriov_data.sriov_work);
+ if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies))
+ adf_pf2vf_notify_restarted(accel_dev);
+
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /*
- * The dev is back alive. Notify the caller if in sync mode
- *
- * If device restart will take a more time than expected,
- * the schedule_reset() function can timeout and exit. This can be
- * detected by calling the completion_done() function. In this case
- * the reset_data structure needs to be freed here.
- */
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
else
complete(&reset_data->compl);
@@ -145,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
if (!timeout) {
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
+ cancel_work_sync(&reset_data->reset_work);
ret = -EFAULT;
- } else {
- kfree(reset_data);
}
+ kfree(reset_data);
return ret;
}
return 0;
@@ -157,14 +194,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int res = 0;
if (!accel_dev) {
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+
+ if (!pdev->is_busmaster)
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ res = adf_dev_up(accel_dev, false);
+ if (res && res != -EALREADY)
return PCI_ERS_RESULT_DISCONNECT;
+ adf_reenable_sriov(accel_dev);
+ adf_pf2vf_notify_restarted(accel_dev);
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -181,11 +229,62 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->autoreset_on_error)
+ return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
+
+ return 0;
+}
+
+static void adf_notify_fatal_error_worker(struct work_struct *work)
+{
+ struct adf_fatal_error_data *wq_data =
+ container_of(work, struct adf_fatal_error_data, work);
+ struct adf_accel_dev *accel_dev = wq_data->accel_dev;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ adf_error_notifier(accel_dev);
+
+ if (!accel_dev->is_vf) {
+ /* Disable arbitration to stop processing of new requests */
+ if (accel_dev->autoreset_on_error && hw_device->exit_arb)
+ hw_device->exit_arb(accel_dev);
+ if (accel_dev->pf.vf_info)
+ adf_pf2vf_notify_fatal_error(accel_dev);
+ adf_dev_autoreset(accel_dev);
+ }
+
+ kfree(wq_data);
+}
+
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fatal_error_data *wq_data;
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->accel_dev = accel_dev;
+ INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker);
+ adf_misc_wq_queue_work(&wq_data->work);
+
+ return 0;
+}
+
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
WQ_MEM_RECLAIM, 0);
- return !device_reset_wq ? -EFAULT : 0;
+ if (!device_reset_wq)
+ return -EFAULT;
+
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ if (!device_sriov_wq)
+ return -EFAULT;
+
+ return 0;
}
void adf_exit_aer(void)
@@ -193,4 +292,8 @@ void adf_exit_aer(void)
if (device_reset_wq)
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
+
+ if (device_sriov_wq)
+ destroy_workqueue(device_sriov_wq);
+ device_sriov_wq = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 322b76903a..e015ad6cac 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -49,5 +49,6 @@
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+#define ADF_SRIOV_ENABLED "SriovEnabled"
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f06188033a..57328249c8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -40,6 +40,7 @@ enum adf_event {
ADF_EVENT_SHUTDOWN,
ADF_EVENT_RESTARTING,
ADF_EVENT_RESTARTED,
+ ADF_EVENT_FATAL_ERROR,
};
struct service_hndl {
@@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
+int adf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+void adf_error_notifier(struct adf_accel_dev *accel_dev);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -84,12 +87,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
+int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -188,6 +193,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
@@ -208,6 +214,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
+static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 86ee36feef..f07b748795 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake)
/**
* adf_clean_vf_map() - Cleans VF id mapings
- *
- * Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
+ *
+ * Function cleans internal ids for virtual functions.
*/
void adf_clean_vf_map(bool vf)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index f752653ccb..d28e192194 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -4,6 +4,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
+#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
@@ -433,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
+
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ unsigned int ae_mask, start_id, worker_obj_cnt, i;
+ u16 ring_to_svc_map;
+ int rp_group;
+
+ if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
+ !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
+ return 0;
+
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+ start_id = worker_obj_cnt - RP_GROUP_COUNT;
+
+ for (i = start_id; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return 0;
+
+ switch (hw_data->uof_get_obj_type(accel_dev, i)) {
+ case ADF_FW_SYM_OBJ:
+ rps[rp_group] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[rp_group] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[rp_group] = COMP;
+ break;
+ default:
+ rps[rp_group] = 0;
+ break;
+ }
+ }
+
+set_mask:
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 7d8a774cad..c6e80df5a8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
index 13f48d2f6d..b19aa1ef8e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -23,12 +23,6 @@
#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
-/* Heartbeat counter pair */
-struct hb_cnt_pair {
- __u16 resp_heartbeat_cnt;
- __u16 req_heartbeat_cnt;
-};
-
static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
{
u64 curr_time = adf_clock_get_current_time();
@@ -211,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time;
+
+ if (time_since_reset < ADF_CFG_HB_RESET_MS)
+ return;
+
+ accel_dev->heartbeat->last_hb_reset_time = curr_time;
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n");
+}
+
void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status)
{
@@ -235,6 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
"Heartbeat ERROR: QAT is not responding.\n");
*hb_status = HB_DEV_UNRESPONSIVE;
hb->hb_failed_counter++;
+ adf_heartbeat_reset(accel_dev);
return;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
index b22e3cb297..16fdfb48b1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -13,17 +13,26 @@ struct dentry;
#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
#define ADF_CFG_HB_COUNT_THRESHOLD 3
+#define ADF_CFG_HB_RESET_MS 5000
+
enum adf_device_heartbeat_status {
HB_DEV_UNRESPONSIVE = 0,
HB_DEV_ALIVE,
HB_DEV_UNSUPPORTED,
};
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
struct adf_heartbeat {
unsigned int hb_sent_counter;
unsigned int hb_failed_counter;
unsigned int hb_timer;
u64 last_hb_check_time;
+ u64 last_hb_reset_time;
bool ctrs_cnt_checked;
struct hb_dma_addr {
dma_addr_t phy_addr;
@@ -35,6 +44,9 @@ struct adf_heartbeat {
struct dentry *cfg;
struct dentry *sent;
struct dentry *failed;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ struct dentry *inject_error;
+#endif
} dbgfs;
};
@@ -51,6 +63,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
enum adf_device_heartbeat_status *hb_status);
void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ return -EPERM;
+}
+#endif
+
#else
static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
index 2661af6a2e..cccdff24b4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -155,6 +155,44 @@ static const struct file_operations adf_hb_cfg_fops = {
.write = adf_hb_cfg_write,
};
+static ssize_t adf_hb_error_inject_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct adf_accel_dev *accel_dev = file->private_data;
+ char buf[3];
+ int ret;
+
+ /* last byte left as string termination */
+ if (*ppos != 0 || count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ if (buf[0] != '1')
+ return -EINVAL;
+
+ ret = adf_heartbeat_inject_error(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat error injection failed with status %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n");
+
+ return count;
+}
+
+static const struct file_operations adf_hb_error_inject_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = adf_hb_error_inject_write,
+};
+
void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
{
struct adf_heartbeat *hb = accel_dev->heartbeat;
@@ -171,6 +209,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
&hb->hb_failed_counter, &adf_hb_stats_fops);
hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
accel_dev, &adf_hb_cfg_fops);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) {
+ struct dentry *inject_error __maybe_unused;
+
+ inject_error = debugfs_create_file("inject_error", 0200,
+ hb->dbgfs.base_dir, accel_dev,
+ &adf_hb_error_inject_fops);
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ hb->dbgfs.inject_error = inject_error;
+#endif
+ }
}
EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
@@ -189,6 +238,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
hb->dbgfs.failed = NULL;
debugfs_remove(hb->dbgfs.cfg);
hb->dbgfs.cfg = NULL;
+#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION
+ debugfs_remove(hb->dbgfs.inject_error);
+ hb->dbgfs.inject_error = NULL;
+#endif
debugfs_remove(hb->dbgfs.base_dir);
hb->dbgfs.base_dir = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
new file mode 100644
index 0000000000..a3b474bdef
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/random.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+
+#define MAX_HB_TICKS 0xFFFFFFFF
+
+static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ accel_dev->heartbeat->hb_timer = 0;
+
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
+ return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS);
+}
+
+static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae,
+ u32 thr)
+{
+ struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ size_t thr_id = ae * hb_ctrs + thr;
+ u16 num_rsp = stats[thr_id].resp_heartbeat_cnt;
+
+ /*
+ * Inject live.req != live.rsp and live.rsp == last.rsp
+ * to trigger the heartbeat error detection
+ */
+ stats[thr_id].req_heartbeat_cnt++;
+ stats += (max_aes * hb_ctrs);
+ stats[thr_id].resp_heartbeat_cnt = num_rsp;
+}
+
+int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ const size_t max_aes = hw_device->get_num_aes(hw_device);
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ u32 rand, rand_ae, rand_thr;
+ unsigned long ae_mask;
+ int ret;
+
+ ae_mask = hw_device->ae_mask;
+
+ do {
+ /* Ensure we have a valid ae */
+ get_random_bytes(&rand, sizeof(rand));
+ rand_ae = rand % max_aes;
+ } while (!test_bit(rand_ae, &ae_mask));
+
+ get_random_bytes(&rand, sizeof(rand));
+ rand_thr = rand % hb_ctrs;
+
+ /* Increase the heartbeat timer to prevent FW updating HB counters */
+ ret = adf_hb_set_timer_to_max(accel_dev);
+ if (ret)
+ return ret;
+
+ /* Configure worker threads to stop processing any packet */
+ ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr);
+ if (ret)
+ return ret;
+
+ /* Change HB counters memory to simulate a hang */
+ adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr);
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index da69566992..65bd26b25a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
EXPORT_SYMBOL_GPL(adf_exit_arb);
+
+int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr)
+{
+ void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ const u32 *thd_2_arb_cfg;
+ struct arb_info info;
+ u32 ae_thr_map;
+
+ if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr)
+ thr = ADF_AE_ADMIN_THREAD;
+
+ hw_data->get_arb_info(&info);
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+ if (!thd_2_arb_cfg)
+ return -EFAULT;
+
+ /* Disable scheduling for this particular AE and thread */
+ ae_thr_map = *(thd_2_arb_cfg + ae);
+ ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2)));
+
+ WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae,
+ ae_thr_map);
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f43ae91115..74f0818c07 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
return 0;
}
+void adf_error_notifier(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+
+ list_for_each_entry(service, &service_table, list) {
+ if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send error event to %s.\n",
+ service->name);
+ }
+}
+
static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 3557a0d6de..cae1aee547 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev)
if (ras_ops->handle_interrupt &&
ras_ops->handle_interrupt(accel_dev, &reset_required)) {
- if (reset_required)
+ if (reset_required) {
dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n");
+ if (adf_notify_fatal_error(accel_dev))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to notify fatal error\n");
+ }
+
return true;
}
@@ -272,7 +277,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
- irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ irqs = kcalloc_node(msix_num_entries, sizeof(*irqs),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!irqs)
return -ENOMEM;
@@ -375,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
/**
* adf_init_misc_wq() - Init misc workqueue
*
- * Function init workqueue 'qat_misc_wq' for general purpose.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_misc_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
index 204a424389..d1b3ef9cad 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
@@ -99,6 +99,8 @@ enum pf2vf_msgtype {
ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+ ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04,
+ ADF_PF2VF_MSGTYPE_RESTARTED = 0x05,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
};
@@ -112,6 +114,7 @@ enum vf2pf_msgtype {
ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+ ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
};
@@ -124,8 +127,10 @@ enum pfvf_compatibility_version {
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
/* Ring to service mapping support for non-standard mappings */
ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Fallback compat */
+ ADF_PFVF_COMPAT_FALLBACK = 0x05,
/* Reference to the latest version */
- ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x05,
};
/* PF->VF Version Response */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
index 14c069f0d7..0e31f4b418 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
@@ -1,21 +1,83 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/delay.h>
#include <linux/pci.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_pf_msg.h"
#include "adf_pfvf_pf_proto.h"
+#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100
+#define ADF_VF_SHUTDOWN_RETRY 100
+
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ vf->restarting = false;
+ if (!vf->init)
+ continue;
+ if (adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
+ else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
+ vf->restarting = true;
+ }
+}
+
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+ int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ int i, retries = ADF_VF_SHUTDOWN_RETRY;
+ struct adf_accel_vf_info *vf;
+ bool vf_running;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n");
+ do {
+ vf_running = false;
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++)
+ if (vf->restarting)
+ vf_running = true;
+ if (!vf_running)
+ break;
+ msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY);
+ } while (--retries);
+
+ if (vf_running)
+ dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n");
+}
+
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarted msg to VF%d\n", i);
+ }
+}
+
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+
+ dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n");
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK &&
+ adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send fatal error msg to VF%d\n", i);
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
index e8982d1ac8..f203d88c91 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
@@ -5,7 +5,28 @@
#include "adf_accel_devices.h"
+#if defined(CONFIG_PCI_IOV)
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev);
+void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
u8 *buffer, u8 compat);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 388e58bcbc..9ab93fbfef 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
vf_info->init = false;
}
break;
+ case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting Complete received from VF%d\n", vf_nr);
+ vf_info->restarting = false;
+ vf_info->init = false;
+ }
+ break;
case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
index 1015155b63..dc284a089c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
@@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
adf_pf2vf_handle_pf_restarting(accel_dev);
return false;
+ case ADF_PF2VF_MSGTYPE_RESTARTED:
+ dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n");
+ return true;
+ case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
+ dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n");
+ return true;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index f44025bb6f..87a70c00c4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
- vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
@@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
return pci_enable_sriov(pdev, totalvfs);
}
+void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ unsigned long val = 0;
+
+ if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SRIOV_ENABLED, cfg))
+ return;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC))
+ return;
+
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC))
+ return;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
+ adf_enable_sriov(accel_dev);
+}
+
/**
* adf_disable_sriov() - Disable SRIOV for the device
* @accel_dev: Pointer to accel device.
@@ -103,6 +128,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
return;
adf_pf2vf_notify_restarting(accel_dev);
+ adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
@@ -115,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
mutex_destroy(&vf->pf2vf_lock);
- kfree(accel_dev->pf.vf_info);
- accel_dev->pf.vf_info = NULL;
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+ }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -194,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
if (ret)
return ret;
+ val = 1;
+ adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
+ &val, ADF_DEC);
+
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index d450dad32c..4e7f70d404 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute
}
static DEVICE_ATTR_RW(pm_idle_enabled);
+static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char *auto_reset;
+ struct adf_accel_dev *accel_dev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
+
+ return sysfs_emit(buf, "%s\n", auto_reset);
+}
+
+static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ bool enabled = false;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ accel_dev->autoreset_on_error = enabled;
+
+ return count;
+}
+static DEVICE_ATTR_RW(auto_reset);
+
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
@@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = {
&dev_attr_pm_idle_enabled.attr,
&dev_attr_rp2srv.attr,
&dev_attr_num_rps.attr,
+ &dev_attr_auto_reset.attr,
NULL,
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index b05c3957a1..cdbb2d687b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
/**
* adf_init_vf_wq() - Init workqueue for VF
*
- * Function init workqueue 'adf_vf_stop_wq' for VF.
- *
* Return: 0 on success, error code otherwise.
*/
int __init adf_init_vf_wq(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
index 40c8e74d1c..101c6ea416 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
@@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
/**
- * qat_crypto_vf_dev_config()
- * create dev config required to create crypto inst.
+ * qat_crypto_vf_dev_config() - create dev config required to create
+ * crypto inst.
*
* @accel_dev: Pointer to acceleration device.
*