aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-22 09:40:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-22 09:40:05 -0800
commit0f0d819aef6feb711dfd773ef906b9cbd02a2419 (patch)
treed6b8e0ae4dcd3acf1a3b6da067a94c3b04c1307e
parent6613476e225e090cc9aad49be7fa504e290dd33d (diff)
parentc7ec4f2d684e17d69bbdd7c4324db0ef5daac26a (diff)
downloadlinux-bcache-0f0d819aef6feb711dfd773ef906b9cbd02a2419.tar.gz
Merge tag 'xsa448-6.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen netback fix from Juergen Gross: "Transmit requests in Xen's virtual network protocol can consist of multiple parts. While not really useful, except for the initial part any of them may be of zero length, i.e. carry no data at all. Besides a certain initial portion of the to be transferred data, these parts are directly translated into what Linux calls SKB fragments. Such converted request parts can, when for a particular SKB they are all of length zero, lead to a de-reference of NULL in core networking code" * tag 'xsa448-6.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen-netback: don't produce zero-size SKB frags
-rw-r--r--drivers/net/xen-netback/netback.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 88f760a7cbc354..d7503aef599f04 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
- shinfo->nr_frags++, gop++, nr_slots--) {
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
+ ++txp;
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
if (txp == first)
txp = txfrags;
@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, txp++, gop++) {
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ spin_unlock_irqrestore(&queue->response_lock,
+ flags);
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
}
- skb_shinfo(skb)->frag_list = nskb;
- } else if (nskb) {
+ if (shinfo->nr_frags) {
+ skb_shinfo(skb)->frag_list = nskb;
+ nskb = NULL;
+ }
+ }
+
+ if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
- * because enough slots were converted to copy ops above.
+ * because enough slots were converted to copy ops above or some
+ * were empty.
*/
kfree_skb(nskb);
}