aboutsummaryrefslogtreecommitdiffstats
path: root/queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch
diff options
context:
space:
mode:
Diffstat (limited to 'queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch')
-rw-r--r--queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch150
1 files changed, 150 insertions, 0 deletions
diff --git a/queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch b/queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch
new file mode 100644
index 0000000000..abe20d5eb1
--- /dev/null
+++ b/queue-6.6/eth-bnxt-fix-counting-packets-discarded-due-to-oom-a.patch
@@ -0,0 +1,150 @@
+From 41c80ca897ba17a08df35e5526a5187159f96cea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Apr 2024 17:21:48 -0700
+Subject: eth: bnxt: fix counting packets discarded due to OOM and netpoll
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 730117730709992c9f6535dd7b47638ee561ec45 ]
+
+I added OOM and netpoll discard counters, naively assuming that
+the cpr pointer is pointing to a common completion ring.
+Turns out that is usually *a* completion ring but not *the*
+completion ring which bnapi->cp_ring points to. bnapi->cp_ring
+is where the stats are read from, so we end up reporting 0
+thru ethtool -S and qstat even though the drop events have happened.
+Make 100% sure we're recording statistics in the correct structure.
+
+Fixes: 907fd4a294db ("bnxt: count discards due to memory allocation errors")
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://lore.kernel.org/r/20240424002148.3937059-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 44 ++++++++++-------------
+ 1 file changed, 18 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index d40b91719b79b..724624737d095 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1659,7 +1659,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ } else {
+@@ -1669,7 +1669,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+
+@@ -1685,7 +1685,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ if (!skb) {
+ skb_free_frag(data);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ skb_reserve(skb, bp->rx_offset);
+@@ -1696,7 +1696,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ }
+@@ -1914,11 +1914,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+- if (!frag_len) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!frag_len)
++ goto oom_next_rx;
+ }
+ xdp_active = true;
+ }
+@@ -1941,9 +1938,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
++ goto oom_next_rx;
+ }
+ } else {
+ u32 payload;
+@@ -1954,29 +1949,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ payload = 0;
+ skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ payload | len);
+- if (!skb) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!skb)
++ goto oom_next_rx;
+ }
+
+ if (agg_bufs) {
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+- if (!skb) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!skb)
++ goto oom_next_rx;
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
++ goto oom_next_rx;
+ }
+ }
+ }
+@@ -2054,6 +2041,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
++
++oom_next_rx:
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
++ rc = -ENOMEM;
++ goto next_rx;
+ }
+
+ /* In netpoll mode, if we are using a combined completion ring, we need to
+@@ -2099,7 +2091,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
+ }
+ rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
+ if (rc && rc != -EBUSY)
+- cpr->sw_stats.rx.rx_netpoll_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
+ return rc;
+ }
+
+--
+2.43.0
+