aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/soundwire
diff options
context:
space:
mode:
authorRichard Fitzgerald <rf@opensource.cirrus.com>2022-12-02 16:18:12 +0000
committerVinod Koul <vkoul@kernel.org>2023-01-09 21:36:08 +0530
commit0603a47bd3a8f439d7844b841eee1819353063e0 (patch)
tree0017f93cc5e236d039d481ce8cd1222bf7e40704 /drivers/soundwire
parent827c32d0df4bbe0d1c47d79f6a5eabfe9ac75216 (diff)
downloadlinux-0603a47bd3a8f439d7844b841eee1819353063e0.tar.gz
soundwire: cadence: Drain the RX FIFO after an IO timeout
If wait_for_completion_timeout() times-out in _cdns_xfer_msg() it is possible that something could have been written to the RX FIFO. In this case, we should drain the RX FIFO so that anything in it doesn't carry over and mess up the next transfer. Obviously, if we got to this state something went wrong, and we don't really know the state of everything. The cleanup in this situation cannot be bullet-proof but we should attempt to avoid breaking future transaction, if only to reduce the amount of error noise when debugging the failure from a kernel log. Note that this patch only implements the draining for blocking (non-deferred) transfers. The deferred API doesn't have any proper handling of error conditions and would need some re-design before implementing cleanup. That is a task for a separate patch... Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> Link: https://lore.kernel.org/r/20221202161812.4186897-4-rf@opensource.cirrus.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/soundwire')
-rw-r--r--drivers/soundwire/cadence_master.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index a6635f7f350ef..5213873221458 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -555,6 +555,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
return SDW_CMD_OK;
}
+static void cdns_read_response(struct sdw_cdns *cdns)
+{
+ u32 num_resp, cmd_base;
+ int i;
+
+ /* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
+ BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
+
+ num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+ num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+ if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
+ dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
+ num_resp = ARRAY_SIZE(cdns->response_buf);
+ }
+
+ cmd_base = CDNS_MCP_CMD_BASE;
+
+ for (i = 0; i < num_resp; i++) {
+ cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+ cmd_base += CDNS_MCP_CMD_WORD_LEN;
+ }
+}
+
static enum sdw_command_response
_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
int offset, int count, bool defer)
@@ -596,6 +619,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
cmd, msg->dev_num, msg->addr, msg->len);
msg->len = 0;
+
+ /* Drain anything in the RX_FIFO */
+ cdns_read_response(cdns);
+
return SDW_CMD_TIMEOUT;
}
@@ -769,29 +796,6 @@ EXPORT_SYMBOL(cdns_read_ping_status);
* IRQ handling
*/
-static void cdns_read_response(struct sdw_cdns *cdns)
-{
- u32 num_resp, cmd_base;
- int i;
-
- /* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
- BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
-
- num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
- num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
- if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
- dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
- num_resp = ARRAY_SIZE(cdns->response_buf);
- }
-
- cmd_base = CDNS_MCP_CMD_BASE;
-
- for (i = 0; i < num_resp; i++) {
- cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
- cmd_base += CDNS_MCP_CMD_WORD_LEN;
- }
-}
-
static int cdns_update_slave_status(struct sdw_cdns *cdns,
u64 slave_intstat)
{