summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorChintan Pandya <cpandya@codeaurora.org>2014-05-30 14:20:20 +0530
committerChintan Pandya <cpandya@codeaurora.org>2014-05-31 14:48:18 +0530
commitb14e12340d47d0af531b242bde61c1723bfa1acc (patch)
tree3ba54ce190095d7dd8e9e56878deacb532dc3560 /drivers/iommu
parent3118c155acadeea98af7774f8d98940fcba1dc8a (diff)
iommu: msm: Fix for cache invalidation variance
In some architectures (at least observed on Cortex-A53), CPU cache invalidation is not just pure invalidation but clean + invalidation. If the cache lines are *dirty*, then first clean will happen and DDR gets updated with cached content and then invalidation will be performed. According to the above specification, we cannot just handover buffer to non-Linux entity before cleaning its CPU cache lines. Because, later invalidation will overwrite the DDR content written by non-Linux. Fix this by doing clean with proper range. Change-Id: I8b3c6d13961e9e966a2241d5372584f59bdfbcf0 Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/msm_iommu_sec.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 7ae762e51237..f730e4346e86 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -110,7 +110,7 @@ struct msm_cp_pool_size {
struct msm_scm_fault_regs_dump {
uint32_t dump_size;
uint32_t dump_data[SEC_DUMP_SIZE];
-} __packed;
+} __aligned(cache_line_size());
void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
{
@@ -135,10 +135,11 @@ static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
req_info.buff = virt_to_phys(regs);
req_info.len = sizeof(*regs);
+ dmac_clean_range(regs, regs + 1);
ret = scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
&req_info, sizeof(req_info), &resp, 1);
- dmac_inv_range(regs, regs + sizeof(*regs));
+ dmac_inv_range(regs, regs + 1);
return ret;
}