aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c151
1 files changed, 99 insertions, 52 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7ce4a1011ea2..c22d1118a1333 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -80,6 +80,11 @@ struct virtnet_stat_desc {
size_t offset;
};
+struct virtnet_sq_free_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64_stats_t packets;
@@ -304,6 +309,12 @@ struct virtnet_info {
/* Work struct for config space updates */
struct work_struct config_work;
+ /* Work struct for setting rx mode */
+ struct work_struct rx_mode_work;
+
+ /* OK to queue work setting RX mode? */
+ bool rx_mode_work_enabled;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -366,6 +377,31 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
+static void __free_old_xmit(struct send_queue *sq, bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ unsigned int len;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ ++stats->packets;
+
+ if (!is_xdp_frame(ptr)) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ stats->bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ stats->bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ }
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -447,6 +483,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
+static void enable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = true;
+ rtnl_unlock();
+}
+
+static void disable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = false;
+ rtnl_unlock();
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -776,39 +826,21 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
virtnet_rq_free_buf(vi, rq, buf);
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, bool in_napi)
{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
+ struct virtnet_sq_free_stats stats = {0};
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
+ __free_old_xmit(sq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
- if (!packets)
+ if (!stats.packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -848,7 +880,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -947,15 +979,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_sq_free_stats stats = {0};
struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
- unsigned int len;
- int packets = 0;
- int bytes = 0;
int nxmit = 0;
int kicks = 0;
- void *ptr;
int ret;
int i;
@@ -974,20 +1003,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(is_xdp_frame(ptr))) {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- } else {
- struct sk_buff *skb = ptr;
-
- bytes += skb->len;
- napi_consume_skb(skb, false);
- }
- packets++;
- }
+ __free_old_xmit(sq, false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1007,8 +1023,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_add(&sq->stats.xdp_tx, n);
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
u64_stats_add(&sq->stats.kicks, kicks);
@@ -2160,7 +2176,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
do {
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
@@ -2308,7 +2324,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
@@ -2398,7 +2414,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (use_napi)
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
} while (use_napi && kick &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
@@ -2550,8 +2566,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
* into the hypervisor, so the request should be handled immediately.
*/
while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq))
+ !virtqueue_is_broken(vi->cvq)) {
+ cond_resched();
cpu_relax();
+ }
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -2706,9 +2724,11 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_set_rx_mode(struct net_device *dev)
+static void virtnet_rx_mode_work(struct work_struct *work)
{
- struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, rx_mode_work);
+ struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
@@ -2721,6 +2741,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
+ rtnl_lock();
+
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
@@ -2738,14 +2760,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl->allmulti ? "en" : "dis");
+ netif_addr_lock_bh(dev);
+
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf)
+ if (!buf) {
+ netif_addr_unlock_bh(dev);
+ rtnl_unlock();
return;
+ }
sg_init_table(sg, 2);
@@ -2766,6 +2793,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
+ netif_addr_unlock_bh(dev);
+
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -2773,9 +2802,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
+ rtnl_unlock();
+
kfree(buf);
}
+static void virtnet_set_rx_mode(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (vi->rx_mode_work_enabled)
+ schedule_work(&vi->rx_mode_work);
+}
+
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -3856,6 +3895,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
@@ -3878,6 +3919,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
enable_delayed_refill(vi);
+ enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
@@ -4676,6 +4718,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4798,6 +4841,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ enable_rx_mode_work(vi);
+
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@@ -4895,6 +4940,8 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
unregister_netdev(vi->dev);