summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-10-26 15:19:31 +0200
committerJason Gunthorpe <jgg@nvidia.com>2020-11-02 14:31:40 -0400
commitfc3325701a6353594083f08e297d4c1965c601aa (patch)
tree40b15b7294d5b61a7eced753cb2913f8c7ce7d29 /drivers/infiniband
parentb4d031cdae1301a8e5e9dba2a862ef028717cb17 (diff)
RDMA/mlx5: Fix corruption of reg_pages in mlx5_ib_rereg_user_mr()
reg_pages should always contain mr->npage since when the mr is finally de-reg'd it is always subtracted out. If there were any error exits then mlx5_ib_rereg_user_mr() would leave the reg_pages adjusted and this will cause it to be double subtracted eventually. The manipulation of reg_pages is inherently connected to the umem, so lift it out of set_mr_fields() and only adjust it around creating/destroying a umem. reg_pages is only used for diagnostics in sysfs. Fixes: 7d0cc6edcc70 ("IB/mlx5: Add MR cache for large UMR regions") Link: https://lore.kernel.org/r/20201026131936.1335664-3-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 50bbd07b9747..f3a28119d145 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1248,10 +1248,8 @@ err_1:
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- int npages, u64 length, int access_flags)
+ u64 length, int access_flags)
{
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
@@ -1291,8 +1289,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- mr->umem = NULL;
- set_mr_fields(dev, mr, 0, length, acc);
+ set_mr_fields(dev, mr, length, acc);
return &mr->ibmr;
@@ -1420,7 +1417,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem;
- set_mr_fields(dev, mr, npages, length, access_flags);
+ mr->npages = npages;
+ atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
+ set_mr_fields(dev, mr, length, access_flags);
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
/*
@@ -1532,8 +1531,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
-
if (!mr->umem)
return -EINVAL;
@@ -1554,12 +1551,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* used.
*/
flags |= IB_MR_REREG_TRANS;
+ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ mr->npages = 0;
ib_umem_release(mr->umem);
mr->umem = NULL;
+
err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err)
goto err;
+ mr->npages = ncont;
+ atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
}
if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
@@ -1610,7 +1612,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- set_mr_fields(dev, mr, npages, len, access_flags);
+ set_mr_fields(dev, mr, len, access_flags);
return 0;