summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
commit638a9e433ecd61e64761352dbec1fa4f5874c941 (patch)
treefdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/ufs/core
parentReleasing progress-linux version 6.9.12-1~progress7.99u1. (diff)
downloadlinux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz
linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/ufs-mcq.c10
-rw-r--r--drivers/ufs/core/ufs_bsg.c3
-rw-r--r--drivers/ufs/core/ufshcd.c364
3 files changed, 95 insertions, 282 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index c532416aec..408fef9c6f 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -230,8 +230,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
/* Operation and runtime registers configuration */
#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
-#define MCQ_OPR_OFFSET_n(p, i) \
- (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
@@ -342,10 +340,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
MCQ_CFG_n(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
MCQ_CFG_n(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
MCQ_CFG_n(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
@@ -355,10 +353,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
MCQ_CFG_n(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
MCQ_CFG_n(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
MCQ_CFG_n(REG_CQISAO, i));
/* Save the base addresses for quicker access */
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 374e5aae4e..433d048039 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -253,7 +253,8 @@ int ufs_bsg_probe(struct ufs_hba *hba)
if (ret)
goto out;
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request,
+ NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ad192b7453..46433ecf0c 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -748,8 +748,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == ufshci_version(1, 0))
- return INTERRUPT_MASK_ALL_VER_10;
if (hba->ufs_version <= ufshci_version(2, 0))
return INTERRUPT_MASK_ALL_VER_11;
@@ -990,30 +988,6 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
-{
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if (hba->ufs_version <= ufshci_version(1, 1))
- return UFS_UNIPRO_VER_1_41;
- else
- return UFS_UNIPRO_VER_1_6;
-}
-EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
-
-static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
-{
- /*
- * If both host and device support UniPro ver1.6 or later, PA layer
- * parameters tuning happens during link startup itself.
- *
- * We can manually tune PA layer parameters if either host or device
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
- * logic simple, we will only do manual tuning if local unipro version
- * doesn't support ver1.6 or later.
- */
- return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
-}
-
/**
* ufshcd_pm_qos_init - initialize PM QoS request
* @hba: per adapter instance
@@ -2674,14 +2648,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = set & INTERRUPT_MASK_RW_VER_10;
- set = rw | ((set ^ intrs) & intrs);
- } else {
- set |= intrs;
- }
-
+ set |= intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
@@ -2694,34 +2661,30 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = (set & INTERRUPT_MASK_RW_VER_10) &
- ~(intrs & INTERRUPT_MASK_RW_VER_10);
- set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
-
- } else {
- set &= ~intrs;
- }
-
+ set &= ~intrs;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
+ * @hba: per adapter instance
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
* @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
- enum dma_data_direction cmd_dir, int ehs_length)
+static void
+ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ u8 *upiu_flags, enum dma_data_direction cmd_dir,
+ int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
struct request_desc_header *h = &req_desc->header;
enum utp_data_direction data_direction;
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
*h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
@@ -2854,12 +2817,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
u8 upiu_flags;
int ret = 0;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2882,13 +2841,7 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
@@ -3061,15 +3014,21 @@ out:
return err;
}
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ enum dev_cmd_type cmd_type, u8 lun, int tag)
{
lrbp->cmd = NULL;
lrbp->task_tag = tag;
- lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->lun = lun;
lrbp->intr_cmd = true; /* No interrupt aggregation */
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
+}
+
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
return ufshcd_compose_devman_upiu(hba, lrbp);
}
@@ -3082,16 +3041,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
- struct request *rq;
-
- if (!cmd)
- return false;
-
- rq = scsi_cmd_to_rq(cmd);
- if (!blk_mq_request_started(rq))
- return false;
-
- return true;
+ return cmd && blk_mq_rq_state(scsi_cmd_to_rq(cmd)) == MQ_RQ_IN_FLIGHT;
}
/*
@@ -3276,6 +3226,39 @@ retry:
return err;
}
+static void ufshcd_dev_man_lock(struct ufs_hba *hba)
+{
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
+{
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+}
+
+static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ const u32 tag, int timeout)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int err;
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+
+ return err;
+}
+
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
@@ -3290,34 +3273,18 @@ retry:
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
-
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out;
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
+ return err;
-out:
- up_read(&hba->clk_scaling_lock);
- return err;
+ return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
}
/**
@@ -3387,8 +3354,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3430,8 +3397,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3461,9 +3427,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3493,8 +3458,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
*attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -3557,9 +3521,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);
- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
hba->dev_cmd.query.descriptor = desc_buf;
@@ -3592,8 +3555,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}
@@ -4772,12 +4734,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
- * Make sure base address and interrupt setup are updated before
- * enabling the run/stop registers below.
- */
- wmb();
-
- /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
@@ -5074,8 +5030,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
hba->nop_out_timeout);
@@ -5085,8 +5041,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+
+ ufshcd_dev_man_unlock(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -5264,9 +5220,6 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
*/
sdev->silence_suspend = 1;
- if (hba->vops && hba->vops->config_scsi_dev)
- hba->vops->config_scsi_dev(sdev);
-
ufshcd_crypto_register(hba, q);
return 0;
@@ -5549,15 +5502,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- if (cqe) {
- ocs = le32_to_cpu(cqe->status) & MASK_OCS;
- lrbp->utr_descriptor_ptr->header.ocs = ocs;
- }
- complete(hba->dev_cmd.complete);
+ } else if (hba->dev_cmd.complete) {
+ if (cqe) {
+ ocs = le32_to_cpu(cqe->status) & MASK_OCS;
+ lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
+ complete(hba->dev_cmd.complete);
}
}
@@ -7094,10 +7044,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
-
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -7205,35 +7152,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
u8 upiu_flags;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
-
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0;
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = cmd_type;
+ ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
/* update the task tag in the request upiu */
req_upiu->header.task_tag = tag;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
-
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
@@ -7247,17 +7180,12 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7280,7 +7208,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
- up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7319,13 +7246,11 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
break;
case UPIU_TRANSACTION_TASK_REQ:
@@ -7375,41 +7300,21 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
- DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
int result;
u8 upiu_flags;
u8 *ehs_data;
u16 ehs_len;
+ int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
+ ufshcd_dev_man_lock(hba);
- lrbp = &hba->lrb[tag];
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = UFS_UPIU_RPMB_WLUN;
-
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
-
- /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
- /*
- * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
- * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
- * HW controller takes EHS length from UTRD.
- */
- if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
- else
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
+ ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
/* update the task tag */
req_upiu->header.task_tag = tag;
@@ -7424,11 +7329,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
- hba->dev_cmd.complete = &wait;
-
- ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
-
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+ err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
if (!err) {
/* Just copy the upiu response as it is */
@@ -7453,9 +7354,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}
- up_read(&hba->clk_scaling_lock);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
+
return err ? : result;
}
@@ -8402,83 +8302,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
}
/**
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
- * @hba: per-adapter instance
- *
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
- * the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(
- RX_MIN_ACTIVATETIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_min_activatetime);
- if (ret)
- goto out;
-
- /* make sure proper unit conversion is applied */
- tuned_pa_tactivate =
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
- / PA_TACTIVATE_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- tuned_pa_tactivate);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
- * @hba: per-adapter instance
- *
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
- * This optimal value can help reduce the hibern8 exit latency.
- *
- * Return: zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
- u32 max_hibern8_time, tuned_pa_hibern8time;
-
- ret = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &local_tx_hibern8_time_cap);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_hibern8_time_cap);
- if (ret)
- goto out;
-
- max_hibern8_time = max(local_tx_hibern8_time_cap,
- peer_rx_hibern8_time_cap);
- /* make sure proper unit conversion is applied */
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
- / PA_HIBERN8_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- tuned_pa_hibern8time);
-out:
- return ret;
-}
-
-/**
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
* less than device PA_TACTIVATE time.
* @hba: per-adapter instance
@@ -8550,11 +8373,6 @@ out:
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
- if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
- ufshcd_tune_pa_tactivate(hba);
- ufshcd_tune_pa_hibern8time(hba);
- }
-
ufshcd_vops_apply_dev_quirks(hba);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
@@ -8718,9 +8536,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
if (dev_info->wspecversion < 0x400)
return;
- ufshcd_hold(hba);
-
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -8738,8 +8554,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
}
/**
@@ -10904,7 +10719,6 @@ static void ufshcd_check_header_layout(void)
static struct scsi_driver ufs_dev_wlun_template = {
.gendrv = {
.name = "ufs_device_wlun",
- .owner = THIS_MODULE,
.probe = ufshcd_wl_probe,
.remove = ufshcd_wl_remove,
.pm = &ufshcd_wl_pm_ops,