aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLoic Poulain <loic.poulain@linaro.org>2021-01-11 19:07:42 +0100
committerLoic Poulain <loic.poulain@linaro.org>2021-01-28 09:04:09 +0100
commitc139afda7f355724dc885921fbe94404786a6908 (patch)
tree6e97f574e8407d090011e4031b4b8de263c72e36
parent04cf04db9400fac60231cf3a24c4a3cf2b4830ad (diff)
net: mhi: Get rid of local rx queue countmhi-for-net-next-2020-02mhi-net-immutablemhi-net
Use the new mhi_get_free_desc_count helper to track queue usage instead of relying on the locally maintained rx_queued count. Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
-rw-r--r--drivers/net/mhi_net.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index d23c216eabfc6..bfa874f4a489c 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -25,7 +25,6 @@ struct mhi_net_stats {
u64_stats_t tx_bytes;
u64_stats_t tx_errors;
u64_stats_t tx_dropped;
- atomic_t rx_queued;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync rx_syncp;
};
@@ -138,9 +137,9 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = mhi_res->buf_addr;
- int remaining;
+ int free_desc_count;
- remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+ free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (unlikely(mhi_res->transaction_status)) {
dev_kfree_skb_any(skb);
@@ -164,7 +163,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
}
/* Refill if RX buffers queue becomes low */
- if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
}
@@ -211,7 +210,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
struct sk_buff *skb;
int err;
- while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+ while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
skb = netdev_alloc_skb(ndev, size);
if (unlikely(!skb))
break;
@@ -224,8 +223,6 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
break;
}
- atomic_inc(&mhi_netdev->stats.rx_queued);
-
/* Do not hog the CPU if rx buffers are consumed faster than
* queued (unlikely).
*/
@@ -233,7 +230,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
}
/* If we're still starved of rx buffers, reschedule later */
- if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}