summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
authorYongqin Liu <yongqin.liu@linaro.org>2020-04-28 00:49:45 +0800
committerYongQin Liu <yongqin.liu@linaro.org>2020-04-28 03:23:39 +0000
commit9d52ec64ef44b20151bc9a8b57db4982b6969723 (patch)
treed4586d2eeb7fa6d8418c097188628501844e1769 /drivers/scsi/ufs/ufshcd.c
parenteca3a1de761510076ef78a8921345ad166609c31 (diff)
parentfdd560ec460793a02cf226cc4f2aac6d0e637e5c (diff)
Merge branch 'mirror-android-4.14-stable' into android-hikey-linaro-4.14
* mirror-android-4.14-stable: (1307 commits) ANDROID: Incremental fs: Use simple compression in log buffer ANDROID: dm-bow: Fix not to skip trim at framented range ANDROID: Remove VLA from uid_sys_stats.c ANDROID: cuttlefish_defconfig: enable CONFIG_DEBUG_LIST Linux 4.14.177 KEYS: Don't write out to userspace while holding key semaphore KEYS: Use individual pages in big_key for crypto buffers mtd: phram: fix a double free issue in error path mtd: lpddr: Fix a double free in probe() locktorture: Print ratio of acquisitions, not failures tty: evh_bytechan: Fix out of bounds accesses fbdev: potential information leak in do_fb_ioctl() net: dsa: bcm_sf2: Fix overflow checks iommu/amd: Fix the configuration of GCR3 table root pointer libnvdimm: Out of bounds read in __nd_ioctl() ext2: fix debug reference to ext2_xattr_cache ext2: fix empty body warnings when -Wextra is used iommu/vt-d: Fix mm reference leak NFS: Fix memory leaks in nfs_pageio_stop_mirroring() drm/amdkfd: kfree the wrong pointer ... Test: boot tested with hikey/hikey960 aosp-master-throttled-copped@6345223 Change-Id: Idba6936f4948affaed68cbcf99b9598a038567a2 Signed-off-by: Yongqin Liu <yongqin.liu@linaro.org>
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c101
1 files changed, 61 insertions, 40 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index bc335df7d491..5a773f4b590a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -405,8 +405,11 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
sizeof(struct utp_upiu_rsp));
- prdt_length = le16_to_cpu(
- lrbp->utr_descriptor_ptr->prd_table_length);
+ prdt_length =
+ le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ prdt_length /= hba->sg_entry_size;
+
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
@@ -414,7 +417,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
+ hba->sg_entry_size * prdt_length);
}
}
@@ -1460,6 +1463,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1981,7 +1989,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
- struct ufshcd_sg_entry *prd_table;
+ struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
struct scsi_cmnd *cmd;
int sg_segments;
@@ -1996,27 +2004,28 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((u16)(sg_segments *
- sizeof(struct ufshcd_sg_entry)));
+ hba->sg_entry_size));
else
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((u16) (sg_segments));
- prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+ prd = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
scsi_for_each_sg(cmd, sg, sg_segments, i) {
- prd_table[i].size =
+ prd->size =
cpu_to_le32(((u32) sg_dma_len(sg))-1);
- prd_table[i].base_addr =
+ prd->base_addr =
cpu_to_le32(lower_32_bits(sg->dma_address));
- prd_table[i].upper_addr =
+ prd->upper_addr =
cpu_to_le32(upper_32_bits(sg->dma_address));
- prd_table[i].reserved = 0;
+ prd->reserved = 0;
+ prd = (void *)prd + hba->sg_entry_size;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
- return 0;
+ return ufshcd_map_sg_crypto(hba, lrbp);
}
/**
@@ -3246,7 +3255,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs);
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3342,7 +3351,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -3374,17 +3383,17 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
(i * sizeof(struct utp_transfer_req_desc));
- hba->lrb[i].ucd_req_ptr =
- (struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
hba->lrb[i].ucd_rsp_ptr =
- (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ (struct utp_upiu_rsp *)cmd_descp->response_upiu;
hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
response_offset;
hba->lrb[i].ucd_prdt_ptr =
- (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ (struct ufshcd_sg_entry *)cmd_descp->prd_table;
hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
prdt_offset;
+ cmd_descp = (void *)cmd_descp + cmd_desc_size;
}
}
@@ -4624,7 +4633,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if (host_byte(result) != DID_OK)
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4857,6 +4866,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
@@ -4881,6 +4891,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
+ hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -4927,6 +4938,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
+ hba->urgent_bkops_lvl = curr_status;
out:
return err;
}
@@ -5151,8 +5163,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -5408,19 +5420,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5791,9 +5814,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
@@ -5827,22 +5856,12 @@ out:
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
- unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
do {
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
- /*
- * After reset the door-bell might be cleared, complete
- * outstanding requests in s/w here.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
return err;
}
@@ -6457,7 +6476,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_icc_levels(hba);
/* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
goto out;
/* Initialize devfreq after UFS device is detected */
@@ -7920,6 +7940,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->host = host;
hba->dev = dev;
*hba_handle = hba;
+ hba->sg_entry_size = sizeof(struct ufshcd_sg_entry);
INIT_LIST_HEAD(&hba->clk_list_head);