aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-17 16:47:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-17 16:47:17 -0800
commit296455ade1fdcf5f8f8c033201633b60946c589a (patch)
tree6058ed978b2787009b1c25c2c0ad5326e2e77130 /drivers/bus
parente1aa9df440186af73a9e690244eb49cbc99f36ac (diff)
parent5850edccec30325707f953bc088497b3b9041231 (diff)
downloadlinux-296455ade1fdcf5f8f8c033201633b60946c589a.tar.gz
Merge tag 'char-misc-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc and other driver updates from Greg KH: "Here is the big set of char/misc and other driver subsystem changes for 6.8-rc1. Other than lots of binder driver changes (as you can see by the merge conflicts) included in here are: - lots of iio driver updates and additions - spmi driver updates - eeprom driver updates - firmware driver updates - ocxl driver updates - mhi driver updates - w1 driver updates - nvmem driver updates - coresight driver updates - platform driver remove callback api changes - tags.sh script updates - bus_type constant marking cleanups - lots of other small driver updates All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (341 commits) android: removed duplicate linux/errno uio: Fix use-after-free in uio_open drivers: soc: xilinx: add check for platform firmware: xilinx: Export function to use in other module scripts/tags.sh: remove find_sources scripts/tags.sh: use -n to test archinclude scripts/tags.sh: add local annotation scripts/tags.sh: use more portable -path instead of -wholename scripts/tags.sh: Update comment (addition of gtags) firmware: zynqmp: Convert to platform remove callback returning void firmware: turris-mox-rwtm: Convert to platform remove callback returning void firmware: stratix10-svc: Convert to platform remove callback returning void firmware: stratix10-rsu: Convert to platform remove callback returning void firmware: raspberrypi: Convert to platform remove callback returning void firmware: qemu_fw_cfg: Convert to platform remove callback returning void firmware: mtk-adsp-ipc: Convert to platform remove callback returning void firmware: imx-dsp: Convert to platform remove callback returning void firmware: coreboot_table: Convert to platform remove callback returning void firmware: arm_scpi: Convert to platform remove callback returning void firmware: arm_scmi: Convert to platform remove callback returning void ...
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/mhi/ep/internal.h4
-rw-r--r--drivers/bus/mhi/ep/main.c387
-rw-r--r--drivers/bus/mhi/ep/ring.c60
-rw-r--r--drivers/bus/mhi/host/init.c1
-rw-r--r--drivers/bus/mhi/host/internal.h2
-rw-r--r--drivers/bus/mhi/host/main.c34
-rw-r--r--drivers/bus/mhi/host/pci_generic.c22
-rw-r--r--drivers/bus/mhi/host/pm.c24
-rw-r--r--drivers/bus/moxtet.c2
9 files changed, 370 insertions, 166 deletions
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index a2125fa5fe2f9..577965f95fda9 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -126,6 +126,7 @@ struct mhi_ep_ring {
union mhi_ep_ring_ctx *ring_ctx;
struct mhi_ring_element *ring_cache;
enum mhi_ep_ring_type type;
+ struct delayed_work intmodt_work;
u64 rbase;
size_t rd_offset;
size_t wr_offset;
@@ -135,7 +136,9 @@ struct mhi_ep_ring {
u32 ch_id;
u32 er_index;
u32 irq_vector;
+ u32 intmodt;
bool started;
+ bool irq_pending;
};
struct mhi_ep_cmd {
@@ -159,6 +162,7 @@ struct mhi_ep_chan {
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
enum mhi_ch_state state;
enum dma_data_direction dir;
+ size_t rd_offset;
u64 tre_loc;
u32 tre_size;
u32 tre_bytes_left;
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 600881808982a..65fc1d738bec2 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
mutex_unlock(&mhi_cntrl->event_lock);
/*
- * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
- * set this flag for interrupt moderation as per MHI protocol.
+ * As per the MHI specification, section 4.3, Interrupt moderation:
+ *
+ * 1. If BEI flag is not set, cancel any pending intmodt work if started
+ * for the event ring and raise IRQ immediately.
+ *
+ * 2. If both BEI and intmodt are set, and if no IRQ is pending for the
+ * same event ring, start the IRQ delayed work as per the value of
+ * intmodt. If previous IRQ is pending, then do nothing as the pending
+ * IRQ is enough for the host to process the current event ring element.
+ *
+ * 3. If BEI is set and intmodt is not set, no need to raise IRQ.
*/
- if (!bei)
+ if (!bei) {
+ if (READ_ONCE(ring->irq_pending))
+ cancel_delayed_work(&ring->intmodt_work);
+
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+ } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) {
+ WRITE_ONCE(ring->irq_pending, true);
+ schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt));
+ }
return 0;
@@ -71,45 +87,77 @@ err_unlock:
static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
- event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
- event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
- return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+ ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
}
int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
- event.dword[0] = MHI_SC_EV_DWORD0(state);
- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ event->dword[0] = MHI_SC_EV_DWORD0(state);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
}
int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
- event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
}
static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
{
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event->dword[0] = MHI_CC_EV_DWORD0(code);
+ event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
- event.dword[0] = MHI_CC_EV_DWORD0(code);
- event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ return ret;
}
static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
@@ -151,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
goto err_unlock;
}
+
+ mhi_chan->rd_offset = ch_ring->rd_offset;
}
/* Set channel state to RUNNING */
@@ -280,22 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
- return !!(ring->rd_offset == ring->wr_offset);
+ return !!(mhi_chan->rd_offset == ring->wr_offset);
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (buf_info->code != MHI_EV_CC_OVERFLOW) {
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ }
+ }
+
+ mhi_ep_ring_inc_index(ring);
+
+err_free_tre_buf:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
+}
+
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
- struct mhi_ep_ring *ring,
- struct mhi_result *result,
- u32 len)
+ struct mhi_ep_ring *ring)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
+ struct mhi_ep_buf_info buf_info = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
bool tr_done = false;
- void *write_addr;
- u64 read_addr;
+ void *buf_addr;
u32 buf_left;
int ret;
@@ -308,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
return -ENODEV;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
@@ -324,81 +437,51 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- read_addr = mhi_chan->tre_loc + read_offset;
- write_addr = result->buf_addr + write_offset;
+
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ if (!buf_addr)
+ return -ENOMEM;
+
+ buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+ buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_read_completion;
+ buf_info.cb_buf = buf_addr;
+ buf_info.mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_chan->tre_bytes_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
- return ret;
+ goto err_free_buf_addr;
}
buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- /*
- * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
- * read completely:
- *
- * 1. Send completion event to the host based on the flags set in TRE.
- * 2. Increment the local read offset of the transfer ring.
- */
if (!mhi_chan->tre_bytes_left) {
- /*
- * The host will split the data packet into multiple TREs if it can't fit
- * the packet in a single TRE. In that case, CHAIN flag will be set by the
- * host for all TREs except the last one.
- */
- if (MHI_TRE_DATA_GET_CHAIN(el)) {
- /*
- * IEOB (Interrupt on End of Block) flag will be set by the host if
- * it expects the completion event for all TREs of a TD.
- */
- if (MHI_TRE_DATA_GET_IEOB(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOB);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
- } else {
- /*
- * IEOT (Interrupt on End of Transfer) flag will be set by the host
- * for the last TRE of the TD and expects the completion event for
- * the same.
- */
- if (MHI_TRE_DATA_GET_IEOT(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOT);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
-
+ if (MHI_TRE_DATA_GET_IEOT(el))
tr_done = true;
- }
- mhi_ep_ring_inc_index(ring);
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
}
-
- result->bytes_xferd += tr_len;
} while (buf_left && !tr_done);
return 0;
+
+err_free_buf_addr:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
+
+ return ret;
}
-static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct mhi_result result = {};
- u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ep_chan *mhi_chan;
int ret;
@@ -419,44 +502,59 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- result.buf_addr = kzalloc(len, GFP_KERNEL);
- if (!result.buf_addr)
- return -ENOMEM;
-
do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- kfree(result.buf_addr);
return ret;
}
- result.dir = mhi_chan->dir;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- result.bytes_xferd = 0;
- memset(result.buf_addr, 0, len);
-
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
-
- kfree(result.buf_addr);
}
return 0;
}
+static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
+ buf_info->code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ return;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+}
+
/* TODO: Handle partially formed TDs */
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
{
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
- enum mhi_ev_ccs code;
- void *read_addr;
- u64 write_addr;
size_t tr_len;
u32 tre_len;
int ret;
@@ -480,40 +578,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
goto err_exit;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
tre_len = MHI_TRE_DATA_GET_LEN(el);
tr_len = min(buf_left, tre_len);
read_offset = skb->len - buf_left;
- read_addr = skb->data + read_offset;
- write_addr = MHI_TRE_DATA_GET_PTR(el);
- dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
- if (ret < 0) {
- dev_err(dev, "Error writing to the channel\n");
- goto err_exit;
- }
+ buf_info.dev_addr = skb->data + read_offset;
+ buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
+ buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_skb_completion;
+ buf_info.cb_buf = skb;
+ buf_info.mhi_dev = mhi_dev;
- buf_left -= tr_len;
/*
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
* the host so that the host can adjust the packet boundary to next TREs. Else send
* the EOT event to the host indicating the packet boundary.
*/
- if (buf_left)
- code = MHI_EV_CC_OVERFLOW;
+ if (buf_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
else
- code = MHI_EV_CC_EOT;
+ buf_info.code = MHI_EV_CC_EOT;
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
- if (ret) {
- dev_err(dev, "Error sending transfer completion event\n");
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
goto err_exit;
}
- mhi_ep_ring_inc_index(ring);
+ buf_left -= tr_len;
+
+ /*
+ * Update the read offset cached in mhi_chan. Actual read offset
+ * will be updated by the completion handler.
+ */
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
} while (buf_left);
mutex_unlock(&mhi_chan->lock);
@@ -714,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_ring_item *itr, *tmp;
- struct mhi_ring_element *el;
struct mhi_ep_ring *ring;
struct mhi_ep_chan *chan;
unsigned long flags;
@@ -748,31 +849,29 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}
/* Sanity check to make sure there are elements in the ring */
- if (ring->rd_offset == ring->wr_offset) {
+ if (chan->rd_offset == ring->wr_offset) {
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}
- el = &ring->ring_cache[ring->rd_offset];
-
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
- ret = mhi_ep_process_ch_ring(ring, el);
+ ret = mhi_ep_process_ch_ring(ring);
if (ret) {
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
}
}
@@ -828,7 +927,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon
u32 ch_id = ch_idx + i;
ring = &mhi_cntrl->mhi_chan[ch_id].ring;
- item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
if (!item)
return;
@@ -1365,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
return -EINVAL;
+ if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync ||
+ !mhi_cntrl->read_async || !mhi_cntrl->write_async)
+ return -EINVAL;
+
ret = mhi_ep_chan_init(mhi_cntrl, config);
if (ret)
return ret;
@@ -1375,6 +1478,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_ch;
}
+ mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
+ sizeof(struct mhi_ring_element), 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->tre_buf_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_ev_ring_el_cache;
+ }
+
+ mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
+ sizeof(struct mhi_ep_ring_item), 0,
+ 0, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_tre_buf_cache;
+ }
+
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
@@ -1383,7 +1509,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
- goto err_free_cmd;
+ goto err_destroy_ring_item_cache;
}
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
@@ -1442,6 +1568,12 @@ err_ida_free:
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
err_destroy_wq:
destroy_workqueue(mhi_cntrl->wq);
+err_destroy_ring_item_cache:
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+err_destroy_ev_ring_el_cache:
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+err_destroy_tre_buf_cache:
+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
err_free_cmd:
kfree(mhi_cntrl->mhi_cmd);
err_free_ch:
@@ -1463,6 +1595,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
free_irq(mhi_cntrl->irq, mhi_cntrl);
+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_chan);
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index 115518ec76a43..aeb53b2c34a8c 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t start, copy_size;
+ struct mhi_ep_buf_info buf_info = {};
+ size_t start;
int ret;
/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
start = ring->wr_offset;
if (start < end) {
- copy_size = (end - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
- copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
if (end) {
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
- &ring->ring_cache[0],
- end * sizeof(struct mhi_ring_element));
+ buf_info.host_addr = ring->rbase;
+ buf_info.dev_addr = &ring->ring_cache[0];
+ buf_info.size = end * sizeof(struct mhi_ring_element);
+
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
}
- dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
return 0;
}
@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
size_t old_offset = 0;
u32 num_free_elem;
__le64 rp;
@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
- sizeof(*el));
- if (ret < 0)
- return ret;
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
- return 0;
+ return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
@@ -157,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32
}
}
+static void mhi_ep_raise_irq(struct work_struct *work)
+{
+ struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+ WRITE_ONCE(ring->irq_pending, false);
+}
+
int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
union mhi_ep_ring_ctx *ctx)
{
@@ -173,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
if (ring->type == RING_TYPE_CH)
ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
- if (ring->type == RING_TYPE_ER)
+ if (ring->type == RING_TYPE_ER) {
ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+ ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK,
+ le32_to_cpu(ring->ring_ctx->ev.intmod));
+
+ INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq);
+ }
/* During ring init, both rp and wp are equal */
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
@@ -201,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
{
+ if (ring->type == RING_TYPE_ER)
+ cancel_delayed_work_sync(&ring->intmodt_work);
+
ring->started = false;
kfree(ring->ring_cache);
ring->ring_cache = NULL;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index f78aefd2d7a36..65ceac1837f9a 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -881,6 +881,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl,
if (!mhi_cntrl->timeout_ms)
mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+ mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
mhi_cntrl->bounce_buf = config->use_bounce_buf;
mhi_cntrl->buffer_len = config->buf_len;
if (!mhi_cntrl->buffer_len)
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 2e139e76de4c0..30ac415a3000f 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -321,7 +321,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
u32 *out);
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
- u32 val, u32 delayus);
+ u32 val, u32 delayus, u32 timeout_ms);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index dcf627b36e829..abb561db9ae1d 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset,
- u32 mask, u32 val, u32 delayus)
+ u32 mask, u32 val, u32 delayus,
+ u32 timeout_ms)
{
int ret;
- u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ u32 out, retry = (timeout_ms * 1000) / delayus;
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
@@ -268,7 +269,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
{
- return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
+ !(addr & (sizeof(struct mhi_ring_element) - 1));
}
int mhi_destroy_device(struct device *dev, void *data)
@@ -642,6 +644,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
+ read_unlock_bh(&mhi_chan->lock);
+
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
@@ -667,6 +671,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
kfree(buf_info->cb_buf);
}
}
+
+ read_lock_bh(&mhi_chan->lock);
}
break;
} /* CC_EOT */
@@ -1122,17 +1128,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
- if (unlikely(ret)) {
- ret = -EAGAIN;
- goto exit_unlock;
- }
+ if (unlikely(ret))
+ return -EAGAIN;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
- goto exit_unlock;
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* Packet is queued, take a usage ref to exit M3 if necessary
* for host->device buffer, balanced put is done on buffer completion
@@ -1152,7 +1156,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (dir == DMA_FROM_DEVICE)
mhi_cntrl->runtime_put(mhi_cntrl);
-exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
@@ -1204,6 +1207,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int eot, eob, chain, bei;
int ret;
+ /* Protect accesses for reading and incrementing WP */
+ write_lock_bh(&mhi_chan->lock);
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1221,8 +1227,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
+ if (ret) {
+ write_unlock_bh(&mhi_chan->lock);
return ret;
+ }
}
eob = !!(flags & MHI_EOB);
@@ -1239,6 +1247,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+ write_unlock_bh(&mhi_chan->lock);
+
return 0;
}
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 08f3f039dbddc..cd6cd14b3d29b 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .ready_timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+ .ch_cfg = modem_qcom_v1_mhi_channels,
+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+ .event_cfg = modem_qcom_v1_mhi_events,
+};
+
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
+ .name = "qcom-sdx75m",
+ .fw = "qcom/sdx75m/xbl.elf",
+ .edl = "qcom/sdx75m/edl.mbn",
+ .config = &modem_qcom_v2_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
@@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 8a4362d75fc43..a2f2feef14768 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -163,6 +163,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ u32 timeout_ms;
int ret, i;
/* Check if device entered error state */
@@ -173,14 +174,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
/* Wait for RESET to be cleared and READY bit to be set by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, interval_us);
+ MHICTRL_RESET_MASK, 0, interval_us,
+ mhi_cntrl->timeout_ms);
if (ret) {
dev_err(dev, "Device failed to clear MHI Reset\n");
return ret;
}
+ timeout_ms = mhi_cntrl->ready_timeout_ms ?
+ mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
- MHISTATUS_READY_MASK, 1, interval_us);
+ MHISTATUS_READY_MASK, 1, interval_us,
+ timeout_ms);
if (ret) {
dev_err(dev, "Device failed to enter MHI Ready\n");
return ret;
@@ -479,7 +484,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Wait for the reset bit to be cleared by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, 25000);
+ MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to clear MHI Reset\n");
@@ -492,8 +497,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
/* wait for ready to be set */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
- MHISTATUS,
- MHISTATUS_READY_MASK, 1, 25000);
+ MHISTATUS, MHISTATUS_READY_MASK,
+ 1, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to enter READY state\n");
}
@@ -1111,7 +1116,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, interval_us);
+ MHICTRL_RESET_MASK, 0, interval_us,
+ mhi_cntrl->timeout_ms);
if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
goto error_exit;
@@ -1202,14 +1208,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
+ u32 timeout_ms;
if (ret)
return ret;
+ /* Some devices need more time to set ready during power up */
+ timeout_ms = mhi_cntrl->ready_timeout_ms ?
+ mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
if (ret)
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index e384fbc6c1d93..641c1a6adc8ae 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static struct bus_type moxtet_bus_type = {
+static const struct bus_type moxtet_bus_type = {
.name = "moxtet",
.dev_groups = moxtet_dev_groups,
.match = moxtet_match,