summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLoic Poulain <loic.poulain@linaro.org>2021-02-25 19:15:21 +0100
committerLoic Poulain <loic.poulain@linaro.org>2021-03-02 19:34:04 +0100
commit502283fff2b9dabc3d63ed8da43ff00ae41a326f (patch)
tree30bad47d764165eb6dd7b517ce54e8da732f674e
parentcba737479ae03bb8d15f8816fb69306730c33c16 (diff)
mhi: core: Fix runtime_pm call
This change ensures that PM reference is always get during packet queueing and released either after queuing completion (RX) or once the buffer has been consumed (TX). Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
-rw-r--r--drivers/bus/mhi/core/main.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index c780234c9ef0..16b9640465af 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -584,8 +584,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- if (mhi_chan->dir == DMA_TO_DEVICE)
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
/*
* Recycle the buffer if buffer is pre-allocated,
@@ -1021,9 +1024,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(ret))
goto exit_unlock;
- /* trigger M3 exit if necessary */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+ * for device->host buffer, balanced put is after ringing the DB
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
/* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1034,6 +1039,9 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
@@ -1431,6 +1439,10 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
result.buf_addr = buf_info->cb_buf;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
}
+
+ /* Release the reference got from mhi_queue() */
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
}
}