summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGerd Bayer <gbayer@linux.ibm.com>2024-04-09 13:37:53 +0200
committerPaolo Abeni <pabeni@redhat.com>2024-04-11 09:18:18 +0200
commitd51dc8dd6ab6f93a894ff8b38d3b8d02c98eb9fb (patch)
tree02f6d46ae6a036df8841e25fa8e5ca22420207dd /drivers
parent33623113a48ea906f1955cbf71094f6aa4462e8f (diff)
Revert "s390/ism: fix receive message buffer allocation"
This reverts commit 58effa3476536215530c9ec4910ffc981613b413. Review was not finished on this patch. So it's not ready for upstreaming. Signed-off-by: Gerd Bayer <gbayer@linux.ibm.com> Link: https://lore.kernel.org/r/20240409113753.2181368-1-gbayer@linux.ibm.com Fixes: 58effa347653 ("s390/ism: fix receive message buffer allocation") Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/net/ism_drv.c38
1 files changed, 9 insertions, 29 deletions
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index affb05521e14..2c8e964425dc 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -14,8 +14,6 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/processor.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
#include "ism.h"
@@ -294,15 +292,13 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
- dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
- DMA_FROM_DEVICE);
- folio_put(virt_to_folio(dmb->cpu_addr));
+ dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+ dmb->cpu_addr, dmb->dma_addr);
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
unsigned long bit;
- int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@@ -319,30 +315,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr =
- folio_address(folio_alloc(GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_NORETRY,
- get_order(dmb->dmb_len)));
+ dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NOMEMALLOC | __GFP_NORETRY);
+ if (!dmb->cpu_addr)
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
- if (!dmb->cpu_addr) {
- rc = -ENOMEM;
- goto out_bit;
- }
- dmb->dma_addr = dma_map_page(&ism->pdev->dev,
- virt_to_page(dmb->cpu_addr), 0,
- dmb->dmb_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- return 0;
-
-out_free:
- kfree(dmb->cpu_addr);
-out_bit:
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
- return rc;
+ return dmb->cpu_addr ? 0 : -ENOMEM;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,