From b14e12340d47d0af531b242bde61c1723bfa1acc Mon Sep 17 00:00:00 2001 From: Chintan Pandya Date: Fri, 30 May 2014 14:20:20 +0530 Subject: iommu: msm: Fix for cache invalidation variance In some architectures (at least observed on Cortex-A53), CPU cache invalidation is not just pure invalidation but clean + invalidation. If the cache lines are *dirty*, then first clean will happen and DDR gets updated with cached content and then invalidation will be performed. According to the above specification, we cannot just handover buffer to non-Linux entity before cleaning its CPU cache lines. Because, later invalidation will overwrite the DDR content written by non-Linux. Fix this by doing clean with proper range. Change-Id: I8b3c6d13961e9e966a2241d5372584f59bdfbcf0 Signed-off-by: Chintan Pandya --- drivers/iommu/msm_iommu_sec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c index 7ae762e51237..f730e4346e86 100644 --- a/drivers/iommu/msm_iommu_sec.c +++ b/drivers/iommu/msm_iommu_sec.c @@ -110,7 +110,7 @@ struct msm_cp_pool_size { struct msm_scm_fault_regs_dump { uint32_t dump_size; uint32_t dump_data[SEC_DUMP_SIZE]; -} __packed; +} __aligned(cache_line_size()); void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops) { @@ -135,10 +135,11 @@ static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num, req_info.buff = virt_to_phys(regs); req_info.len = sizeof(*regs); + dmac_clean_range(regs, regs + 1); ret = scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS, &req_info, sizeof(req_info), &resp, 1); - dmac_inv_range(regs, regs + sizeof(*regs)); + dmac_inv_range(regs, regs + 1); return ret; } -- cgit v1.2.3