aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLoic Poulain <loic.poulain@linaro.org>2020-11-19 18:51:30 +0100
committerLoic Poulain <loic.poulain@linaro.org>2020-11-20 11:49:30 +0100
commit0e9ad8c11372b18284d173da626b76a0318f1232 (patch)
tree9d93e3210d30697e7d2c84f4132dec47a0ad7a55
parenta13080cdf51ed0f19d40fb6d1b5d746250ef2c25 (diff)
[REWORK] net: mhi: Add dedicated alloc threadsdx55-v72
Moreover testing with loopback mode shows a slight improvement in the maximum download throughput. powertop stats also shows lower CPU usage with the new solution: older: Usage Events/s Category Description 63,2 ms/s 134,0 kWork mhi_net_rx_refill_work 62,8 ms/s 134,3 kWork mhi_net_rx_refill_work 60,8 ms/s 141,4 kWork mhi_net_rx_refill_work newer: Usage Events/s Category Description 20,7 ms/s 155,6 Process [PID 3360] [mhi_rx_alloc] 22,2 ms/s 169,6 Process [PID 3360] [mhi_rx_alloc] 22,3 ms/s 150,2 Process [PID 3360] [mhi_rx_alloc] Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
-rw-r--r--drivers/net/mhi_net.c108
1 files changed, 54 insertions, 54 deletions
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 0333e075e262..56a2441680f2 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -5,6 +5,7 @@
*/
#include <linux/if_arp.h>
+#include <linux/kthread.h>
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -25,7 +26,6 @@ struct mhi_net_stats {
u64_stats_t tx_bytes;
u64_stats_t tx_errors;
u64_stats_t tx_dropped;
- atomic_t rx_queued;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync rx_syncp;
};
@@ -33,33 +33,63 @@ struct mhi_net_stats {
struct mhi_net_dev {
struct mhi_device *mdev;
struct net_device *ndev;
- struct delayed_work rx_refill;
+ struct task_struct *alloc_task;
struct mhi_net_stats stats;
u32 rx_queue_sz;
};
+int mhi_net_alloc_thread(void *data)
+{
+ struct mhi_net_dev *mhi_netdev = data;
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (mhi_queue_is_full(mdev, DMA_FROM_DEVICE))
+ schedule();
+ __set_current_state(TASK_RUNNING);
+
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ break;
+ }
+
+ /* Do not hog the CPU */
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int mhi_ndo_open(struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
- /* Feed the rx buffer pool */
- schedule_delayed_work(&mhi_netdev->rx_refill, 0);
-
/* Carrier is established via out-of-band channel (e.g. qmi) */
netif_carrier_on(ndev);
netif_start_queue(ndev);
+ wake_up_process(mhi_netdev->alloc_task);
+
return 0;
}
static int mhi_ndo_stop(struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
-
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- cancel_delayed_work_sync(&mhi_netdev->rx_refill);
return 0;
}
@@ -138,9 +168,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = mhi_res->buf_addr;
- int remaining;
-
- remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
if (unlikely(mhi_res->transaction_status)) {
dev_kfree_skb_any(skb);
@@ -163,9 +190,8 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
netif_rx(skb);
}
- /* Refill if RX buffers queue becomes low */
- if (remaining <= mhi_netdev->rx_queue_sz / 2)
- schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+ if (mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE) >= mhi_netdev->rx_queue_sz / 3)
+ wake_up_process(mhi_netdev->alloc_task);
}
static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
@@ -200,42 +226,6 @@ static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
netif_wake_queue(ndev);
}
-static void mhi_net_rx_refill_work(struct work_struct *work)
-{
- struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
- rx_refill.work);
- struct net_device *ndev = mhi_netdev->ndev;
- struct mhi_device *mdev = mhi_netdev->mdev;
- int size = READ_ONCE(ndev->mtu);
- struct sk_buff *skb;
- int err;
-
- while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
- skb = netdev_alloc_skb(ndev, size);
- if (unlikely(!skb))
- break;
-
- err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
- if (unlikely(err)) {
- net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
- ndev->name, err);
- kfree_skb(skb);
- break;
- }
-
- atomic_inc(&mhi_netdev->stats.rx_queued);
-
- /* Do not hog the CPU if rx buffers are consumed faster than
- * queued (unlikely).
- */
- cond_resched();
- }
-
- /* If we're still starved of rx buffers, reschedule later */
- if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
- schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
-}
-
static int mhi_net_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
@@ -256,25 +246,33 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
mhi_netdev->mdev = mhi_dev;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
u64_stats_init(&mhi_netdev->stats.tx_syncp);
+ mhi_netdev->alloc_task = kthread_create(mhi_net_alloc_thread, mhi_netdev,
+ "mhi_rx_alloc");
+ if (IS_ERR(mhi_netdev->alloc_task)) {
+ err = PTR_ERR(mhi_netdev->alloc_task);
+ goto out_free_netdev;
+ }
+
/* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev, 0);
if (err)
- goto out_err;
+ goto out_stop_kthread;
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
err = register_netdev(ndev);
if (err)
- goto out_err;
+ goto out_stop_kthread;
return 0;
-out_err:
+out_stop_kthread:
+ kthread_stop(mhi_netdev->alloc_task);
+out_free_netdev:
free_netdev(ndev);
return err;
}
@@ -283,6 +281,8 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ kthread_stop(mhi_netdev->alloc_task);
+
unregister_netdev(mhi_netdev->ndev);
mhi_unprepare_from_transfer(mhi_netdev->mdev);