aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-14 21:31:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-14 21:31:23 -0700
commitae50dfd61665086e617cc9e554a1285d52765670 (patch)
treedeed89baf661867ed18005307fc12580c49b32f0
parent352526f45387cb96671f13b003bdd5b249e509bd (diff)
parent1e6a1cd888de06b09d2341d782aadb20c6034210 (diff)
downloadlinux-ae50dfd61665086e617cc9e554a1285d52765670.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Ensure that mtu is at least IPV6_MIN_MTU in ipv6 VTI tunnel driver, from Steffen Klassert. 2) Fix crashes when user tries to get_next_key on an LPM bpf map, from Alexei Starovoitov. 3) Fix detection of VLAN fitlering feature for bnx2x VF devices, from Michal Schmidt. 4) We can get a divide by zero when TCP socket are morphed into listening state, fix from Eric Dumazet. 5) Fix socket refcounting bugs in skb_complete_wifi_ack() and skb_complete_tx_timestamp(). From Eric Dumazet. 6) Use after free in dccp_feat_activate_values(), also from Eric Dumazet. 7) Like bonding team needs to use ETH_MAX_MTU as netdev->max_mtu, from Jarod Wilson. 8) Fix use after free in vrf_xmit(), from David Ahern. 9) Don't do UDP Fragmentation Offload on IPComp ipsec packets, from Alexey Kodanev. 10) Properly check napi_complete_done() return value in order to decide whether to re-enable IRQs or not in amd-xgbe driver, from Thomas Lendacky. 11) Fix double free of hwmon device in marvell phy driver, from Andrew Lunn. 12) Don't crash on malformed netlink attributes in act_connmark, from Etienne Noss. 13) Don't remove routes with a higher metric in ipv6 ECMP route replace, from Sabrina Dubroca. 14) Don't write into a cloned SKB in ipv6 fragmentation handling, from Florian Westphal. 15) Fix routing redirect races in dccp and tcp, basically the ICMP handler can't modify the socket's cached route in it's locked by the user at this moment. From Jon Maxwell. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (108 commits) qed: Enable iSCSI Out-of-Order qed: Correct out-of-bound access in OOO history qed: Fix interrupt flags on Rx LL2 qed: Free previous connections when releasing iSCSI qed: Fix mapping leak on LL2 rx flow qed: Prevent creation of too-big u32-chains qed: Align CIDs according to DORQ requirement mlxsw: reg: Fix SPVMLR max record count mlxsw: reg: Fix SPVM max record count net: Resend IGMP memberships upon peer notification. dccp: fix memory leak during tear-down of unsuccessful connection request tun: fix premature POLLOUT notification on tun devices dccp/tcp: fix routing redirect race ucc/hdlc: fix two little issue vxlan: fix ovs support net: use net->count to check whether a netns is alive or not bridge: drop netfilter fake rtable unconditionally ipv6: avoid write to a possibly cloned skb net: wimax/i2400m: fix NULL-deref at probe isdn/gigaset: fix NULL-deref at probe ...
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/emac.txt62
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--crypto/af_alg.c9
-rw-r--r--crypto/algif_hash.c9
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c206
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c110
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c104
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h43
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c184
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c47
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c73
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c20
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/linux/dccp.h1
-rw-r--r--include/linux/filter.h16
-rw-r--r--include/linux/list_nulls.h5
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/rculist_nulls.h14
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h9
-rw-r--r--include/uapi/linux/packet_diag.h2
-rw-r--r--kernel/bpf/hashtab.c119
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--net/atm/svc.c5
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c106
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c24
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib_cm.c47
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/tcp.c38
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rxrpc/input.c27
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/sendmsg.c49
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_policy.c19
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h18
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c4
139 files changed, 1447 insertions, 826 deletions
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
index 712baf6c3e246f..44b842b6ca154d 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
@@ -71,6 +71,9 @@
For Axon it can be absent, though my current driver
doesn't handle phy-address yet so for now, keep
0x00ffffff in it.
+ - phy-handle : Used to describe configurations where a external PHY
+ is used. Please refer to:
+ Documentation/devicetree/bindings/net/ethernet.txt
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
operations (if absent the value is the same as
rx-fifo-size). For Axon, either absent or 2048.
@@ -81,8 +84,22 @@
offload, phandle of the TAH device node.
- tah-channel : 1 cell, optional. If appropriate, channel used on the
TAH engine.
+ - fixed-link : Fixed-link subnode describing a link to a non-MDIO
+ managed entity. See
+ Documentation/devicetree/bindings/net/fixed-link.txt
+ for details.
+ - mdio subnode : When the EMAC has a phy connected to its local
+ mdio, which us supported by the kernel's network
+ PHY library in drivers/net/phy, there must be device
+ tree subnode with the following required properties:
+ - #address-cells: Must be <1>.
+ - #size-cells: Must be <0>.
- Example:
+ For PHY definitions: Please refer to
+ Documentation/devicetree/bindings/net/phy.txt and
+ Documentation/devicetree/bindings/net/ethernet.txt
+
+ Examples:
EMAC0: ethernet@40000800 {
device_type = "network";
@@ -104,6 +121,48 @@
zmii-channel = <0>;
};
+ EMAC1: ethernet@ef600c00 {
+ device_type = "network";
+ compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
+ interrupt-parent = <&EMAC1>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
+ 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
+ reg = <0xef600c00 0x000000c4>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <16384>;
+ tx-fifo-size = <2048>;
+ fifo-entry-size = <10>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <0>;
+ tah-device = <&TAH0>;
+ tah-channel = <0>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+ };
+
+
ii) McMAL node
Required properties:
@@ -145,4 +204,3 @@
- revision : as provided by the RGMII new version register if
available.
For Axon: 0x0000012a
-
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fc73eeb7b3b8b1..ab02304613771b 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
FALSE (router)
forwarding - BOOLEAN
- Enable IP forwarding on this interface.
+ Enable IP forwarding on this interface. This controls whether packets
+ received _on_ this interface can be forwarded.
mc_forwarding - BOOLEAN
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f5e18c2a48527b..690deca17c3528 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -266,7 +266,7 @@ unlock:
return err;
}
-int af_alg_accept(struct sock *sk, struct socket *newsock)
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
if (!type)
goto unlock;
- sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
err = -ENOMEM;
if (!sk2)
goto unlock;
@@ -323,9 +323,10 @@ unlock:
}
EXPORT_SYMBOL_GPL(af_alg_accept);
-static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
- return af_alg_accept(sock->sk, newsock);
+ return af_alg_accept(sock->sk, newsock, kern);
}
static const struct proto_ops alg_proto_ops = {
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 54fc90e8339ce8..5e92bd275ef38e 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,8 @@ unlock:
return err ?: len;
}
-static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
if (err)
return err;
- err = af_alg_accept(ask->parent, newsock);
+ err = af_alg_accept(ask->parent, newsock, kern);
if (err)
return err;
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
int err;
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
if (err)
return err;
- return hash_accept(sock, newsock, flags);
+ return hash_accept(sock, newsock, flags, kern);
}
static struct proto_ops algif_hash_ops_nokey = {
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 11e13c56126fba..2da3ff650e1d55 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 248f60d171a5a0..ffea9859f5a72b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
processed = xgbe_rx_poll(channel, budget);
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
if (pdata->channel_irq_mode)
xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
} while ((processed < budget) && (processed != last_processed));
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
xgbe_enable_rx_tx_ints(pdata);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 581de71a958a35..4c6c882c6a1c42 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
if (!((1U << i) & self->msix_entry_mask))
continue;
- free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
self->msix_entry_mask &= ~(1U << i);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d8d06fdfc42b9d..ac76fc251d268e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- /* VF with OLD Hypervisor or old PF do not support filtering */
if (IS_PF(bp)) {
if (chip_is_e1x)
bp->accept_any_vlan = true;
else
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
- } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
}
+ /* For VF we'll know whether to enable VLAN filtering after
+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+ */
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
"PTP adjfreq called while the interface is down\n");
- return -EFAULT;
+ return -ENETDOWN;
}
if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjtime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP gettime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timecounter_read(&bp->timecounter);
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP settime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timespec64_to_ns(ts);
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+#endif
}
/* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fad22adbbb9e7..bdfd53b46bc568 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
/* Add/Remove the filter */
rc = bnx2x_config_vlan_mac(bp, &ramrod);
- if (rc && rc != -EEXIST) {
+ if (rc == -EEXIST)
+ return 0;
+ if (rc) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
return rc;
}
+ filter->applied = true;
+
return 0;
}
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* Rollback if needed */
if (i != filters->count) {
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
- i, filters->count + 1);
+ i, filters->count);
while (--i >= 0) {
+ if (!filters->filters[i].applied)
+ continue;
filters->filters[i].add = !filters->filters[i].add;
bnx2x_vf_mac_vlan_config(bp, vf, qid,
&filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
continue;
}
- DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "add addresses for vf %d\n", vf->abs_vfid);
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
cpu_to_le32(U64_LO(q_stats_addr));
- DP(BNX2X_MSG_IOV,
- "added address %x %x for vf %d queue %d client %d\n",
- cur_query_entry->address.hi,
- cur_query_entry->address.lo, cur_query_entry->funcID,
- j, cur_query_entry->index);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo,
+ cur_query_entry->funcID,
+ j, cur_query_entry->index);
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406f4c1117..888d0b6632e86f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
+ bool applied;
u8 *mac;
u16 vid;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300cf25ff8..76a4668c50fe98 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc, i = 0;
+ int rc = 0, i = 0;
struct netdev_hw_addr *ha;
if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
/* Get Rx mode requested */
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+ /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+ if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ rc = -EINVAL;
+ goto out;
+ }
+
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
i++;
}
- /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
- * addresses tops
- */
- if (i >= PFVF_MAX_MULTICAST_PER_VF) {
- DP(NETIF_MSG_IFUP,
- "VF supports not more than %d multicast MAC addresses\n",
- PFVF_MAX_MULTICAST_PER_VF);
- return -EINVAL;
- }
-
req->n_multicast = i;
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
+ /* build vlan list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 235733e91c791b..32de4589d16a2c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
+ if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
- link_info->support_auto_speeds =
- le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_speeds_auto_mode)
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
+ int rc;
+
+ if (!silent)
+ bnxt_ulp_stop(bp);
bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ if (!silent && !rc)
+ bnxt_ulp_start(bp);
}
}
@@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_fw_set_time(bp);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc)
- goto init_err_pci_clean;
-
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index faf26a2f726b80..c7a5b84a5cb20e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -993,6 +993,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fdf2d8caf7bfaa..03532061d211b1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
return;
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
else
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a4c..69015fa50f2096 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static int bcmgenet_begin(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn on the clock */
+ return clk_prepare_enable(priv->clk);
+}
+
+static void bcmgenet_complete(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn off the clock */
+ clk_disable_unprepare(priv->clk);
+}
+
static int bcmgenet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
/* Misc UniMAC counters */
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
- UMAC_RBUF_OVFL_CNT),
- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ UMAC_RBUF_OVFL_CNT_V1),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+ UMAC_RBUF_ERR_CNT_V1),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
}
}
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+ u16 new_offset;
+ u32 val;
+
+ switch (offset) {
+ case UMAC_RBUF_OVFL_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_OVFL_CNT_V2;
+ else
+ new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ case UMAC_RBUF_ERR_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_ERR_CNT_V2;
+ else
+ new_offset = RBUF_ERR_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ default:
+ val = bcmgenet_umac_readl(priv, offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, offset);
+ break;
+ }
+
+ return val;
+}
+
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
{
int i, j = 0;
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
case BCMGENET_STAT_NETDEV:
case BCMGENET_STAT_SOFT:
continue;
- case BCMGENET_STAT_MIB_RX:
- case BCMGENET_STAT_MIB_TX:
case BCMGENET_STAT_RUNT:
- if (s->type != BCMGENET_STAT_MIB_RX)
- offset = BCMGENET_STAT_OFFSET;
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_TX:
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
+ offset = 0; /* Reset Offset */
break;
case BCMGENET_STAT_MISC:
- val = bcmgenet_umac_readl(priv, s->reg_offset);
- /* clear if overflowed */
- if (val == ~0)
- bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ if (GENET_IS_V1(priv)) {
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0,
+ s->reg_offset);
+ } else {
+ val = bcmgenet_update_stat_misc(priv,
+ s->reg_offset);
+ }
break;
}
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .begin = bcmgenet_begin,
+ .complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
- struct netdev_queue *txq;
unsigned int pkts_compl = 0;
unsigned int bytes_compl = 0;
unsigned int c_index;
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
- txq = netdev_get_tx_queue(dev, ring->queue);
- netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-
- if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
- }
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ pkts_compl, bytes_compl);
return pkts_compl;
}
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
struct bcmgenet_tx_ring *ring =
container_of(napi, struct bcmgenet_tx_ring, napi);
unsigned int work_done = 0;
+ struct netdev_queue *txq;
+ unsigned long flags;
- work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ spin_lock_irqsave(&ring->lock, flags);
+ work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ netif_tx_wake_queue(txq);
+ }
+ spin_unlock_irqrestore(&ring->lock, flags);
if (work_done == 0) {
napi_complete(napi);
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Interrupt bottom half */
static void bcmgenet_irq_task(struct work_struct *work)
{
+ unsigned long flags;
+ unsigned int status;
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->irq0_stat;
+ priv->irq0_stat = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status & UMAC_IRQ_MPD_R) {
netif_dbg(priv, wol, priv->dev,
"magic packet detected, waking up\n");
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
}
/* Link UP/DOWN event */
- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
- }
+ !!(status & UMAC_IRQ_LINK_UP));
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
- unsigned int index;
+ unsigned int index, status;
- /* Save irq status for bottom-half processing. */
- priv->irq1_stat =
- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ "%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
for (index = 0; index < priv->hw_params->rx_queues; index++) {
- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
/* Check Tx priority queue interrupts */
for (index = 0; index < priv->hw_params->tx_queues; index++) {
- if (!(priv->irq1_stat & BIT(index)))
+ if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
+ unsigned int status;
+ unsigned long flags;
- /* Save irq status for bottom-half processing. */
- priv->irq0_stat =
- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "IRQ=0x%x\n", priv->irq0_stat);
+ "IRQ=0x%x\n", status);
- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+ if (status & UMAC_IRQ_RXDMA_DONE) {
rx_ring = &priv->rx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+ if (status & UMAC_IRQ_TXDMA_DONE) {
tx_ring = &priv->tx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
- UMAC_IRQ_PHY_DET_F |
- UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_HFB_SM |
- UMAC_IRQ_HFB_MM |
- UMAC_IRQ_MPD_R)) {
- /* all other interested interrupts handled in bottom half */
- schedule_work(&priv->bcmgenet_irq_work);
- }
-
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
}
+ /* all other interested interrupts handled in bottom half */
+ status &= (UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_MPD_R);
+ if (status) {
+ /* Save irq status for bottom-half processing. */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->irq0_stat |= status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
return IRQ_HANDLED;
}
@@ -2801,6 +2875,8 @@ err_irq0:
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ /* This is reserved so should require special treatment */
+ if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
+ }
+
/* This is the good old scheme, just GPHY major, no minor nor patch */
if ((gphy_rev & 0xf0) != 0)
priv->gphy_rev = gphy_rev << 8;
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
else if ((gphy_rev & 0xff00) != 0)
priv->gphy_rev = gphy_rev;
- /* This is reserved so should require special treatment */
- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
- return;
- }
-
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
const void *macaddr;
struct resource *r;
int err = -EIO;
+ const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->lock);
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_eee = NULL;
}
+ /* If this is an internal GPHY, power it on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+ !strcasecmp(phy_mode_str, "internal"))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
return 0;
out_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a49..db7f289d65ae2a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
-#define UMAC_RBUF_OVFL_CNT 0x61C
+#define UMAC_RBUF_OVFL_CNT_V1 0x61C
+#define RBUF_OVFL_CNT_V2 0x80
+#define RBUF_OVFL_CNT_V3PLUS 0x94
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MPD_PW_MS 0x624
#define UMAC_MPD_PW_LS 0x628
-#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_RBUF_ERR_CNT_V1 0x634
+#define RBUF_ERR_CNT_V2 0x84
+#define RBUF_ERR_CNT_V3PLUS 0x98
#define UMAC_MDF_ERR_CNT 0x638
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
struct work_struct bcmgenet_irq_work;
int irq0;
int irq1;
- unsigned int irq0_stat;
- unsigned int irq1_stat;
int wol_irq;
bool wol_irq_disabled;
+ /* shared status */
+ spinlock_t lock;
+ unsigned int irq0_stat;
+
/* HW descriptors/checksum variables */
bool desc_64b_en;
bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index be9c0e3f5ade7d..92f46b1375c325 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -152,7 +152,7 @@ struct octnic_gather {
*/
struct octeon_sg_entry *sg;
- u64 sg_dma_ptr;
+ dma_addr_t sg_dma_ptr;
};
struct handshake {
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg) {
- dma_unmap_single(&lio->oct_dev->
- pci_dev->dev,
- g->sg_dma_ptr,
- g->sg_size,
- DMA_TO_DEVICE);
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- }
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
- kfree((void *)lio->glist);
- kfree((void *)lio->glist_lock);
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
}
/**
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
GFP_KERNEL);
if (!lio->glist) {
- kfree((void *)lio->glist_lock);
- return 1;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
numa_node);
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc_node(g->sg_size + 8,
- GFP_KERNEL, numa_node);
- if (!g->sg)
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
-
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
- g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev,
- g->sg_dma_ptr)) {
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
- g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
if (OCTEON_CN23XX_PF(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9d5e03502c76cb..7b83be4ce1fe0c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -108,6 +108,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
};
struct octeon_device_priv {
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
kfree(lio->glist);
- kfree(lio->glist_lock);
+ lio->glist = NULL;
}
/**
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
lio->glist_lock =
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist =
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
- return 1;
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- for (j = 1; j <= frags; j++) {
- frag = &skb_shinfo(skb)->frags[j - 1];
- dma_unmap_page(&oct->pci_dev->dev,
- g->sg[j >> 2].ptr[j & 3],
- frag->size, DMA_TO_DEVICE);
- }
- return NETDEV_TX_BUSY;
- }
+ dptr = g->sg_dma_ptr;
ndata.cmd.cmd3.dptr = dptr;
finfo->dptr = dptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3dc2e9651a8e2..d29ebc531151f0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,17 +71,17 @@
#define CN23XX_MAX_RINGS_PER_VF 8
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
#define CN23XX_DB_MIN 1
#define CN23XX_DB_MAX 8
#define CN23XX_DB_TIMEOUT 1
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
#define CN23XX_OQ_BUF_SIZE 1536
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
-#define CN23XX_OQ_REFIL_THRESHOLD 128
+#define CN23XX_OQ_REFIL_THRESHOLD 16
#define CN23XX_OQ_INTR_PKT 64
#define CN23XX_OQ_INTR_TIME 100
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0be87d119a979e..79f809479af6e7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
pg_info);
- if (droq->desc_ring && droq->desc_ring[i].info_ptr)
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
droq->recv_buf_list[i].buffer = NULL;
}
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
vfree(droq->recv_buf_list);
if (droq->info_base_addr)
- cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
- droq->info_alloc_size,
- droq->info_base_addr,
- droq->info_list_dma);
+ lio_free_info_buffer(oct, droq);
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list =
- cnnic_numa_alloc_aligned_dma((droq->max_count *
- OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- numa_node);
+ droq->info_list = lio_alloc_info_buffer(oct, droq);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index e62074090681d3..6982c0af5eccb7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -325,10 +325,10 @@ struct octeon_droq {
size_t desc_ring_dma;
/** Info ptr list are allocated at this virtual address. */
- size_t info_base_addr;
+ void *info_base_addr;
/** DMA mapped address of the info list */
- size_t info_list_dma;
+ dma_addr_t info_list_dma;
/** Allocated size of info list. */
u32 info_alloc_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index aa36e9ae767655..bed9ef17bc26b4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -140,48 +140,6 @@ err_release_region:
return 1;
}
-static inline void *
-cnnic_numa_alloc_aligned_dma(u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- int numa_node)
-{
- int retries = 0;
- void *ptr = NULL;
-
-#define OCTEON_MAX_ALLOC_RETRIES 1
- do {
- struct page *page = NULL;
-
- page = alloc_pages_node(numa_node,
- GFP_KERNEL,
- get_order(size));
- if (!page)
- page = alloc_pages(GFP_KERNEL,
- get_order(size));
- ptr = (void *)page_address(page);
- if ((unsigned long)ptr & 0x07) {
- __free_pages(page, get_order(size));
- ptr = NULL;
- /* Increment the size required if the first
- * attempt failed.
- */
- if (!retries)
- size += 7;
- }
- retries++;
- } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
-
- *alloc_size = size;
- *orig_ptr = (unsigned long)ptr;
- if ((unsigned long)ptr & 0x07)
- ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
- return ptr;
-}
-
-#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
- free_pages(orig_ptr, get_order(size))
-
static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 6bb89419006eb5..eef2a1e8a7e3f9 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -62,6 +62,9 @@ struct lio {
/** Array of gather component linked lists */
struct list_head *glist;
+ void **glists_virt_base;
+ dma_addr_t *glists_dma_base;
+ u32 glist_entry_size;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
+static inline void *
+lio_alloc_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ void *virt_ptr;
+
+ virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_list_dma);
+ if (virt_ptr) {
+ droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
+ droq->info_base_addr = virt_ptr;
+ }
+
+ return virt_ptr;
+}
+
+static inline void lio_free_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
+ droq->info_list_dma);
+}
+
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
{
- dma_addr_t dma_addr;
- struct octeon_device *oct = droq->oct_dev;
-
- dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
- OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
-
- WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
-
- return (u64)dma_addr;
-}
-
-static inline void
-lio_unmap_ring_info(struct pci_dev *pci_dev,
- u64 info_ptr, u32 size)
-{
- dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+ return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
}
static inline u64
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c715356283..2269ff562d9562 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
+ void *iommu_domain;
u8 vf_id;
u8 sqs_id;
bool sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24bcfd42b..24017588f53171 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
#include <linux/log2.h>
#include <linux/prefetch.h>
#include <linux/irq.h>
+#include <linux/iommu.h>
#include "nic_reg.h"
#include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
/* Get actual TSO descriptors and free them */
tso_sqe =
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
int err = 0;
int rq_idx;
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
if (err && !cqe_rx->rb_cnt)
return;
- skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ skb = nicvf_get_rcv_skb(snic, cqe_rx);
if (!skb) {
netdev_dbg(nic->netdev, "Packet not received\n");
return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ /* Get iommu domain for iova to physical addr conversion */
+ nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
if (sdevid == 0xA134)
nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390be3b126e..f13289f0d2386d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
@@ -18,6 +19,16 @@
#include "q_struct.h"
#include "nicvf_queues.h"
+#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
+ int order = NICVF_PAGE_ORDER;
/* Check if request can be accomodated in previous allocated page */
if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
}
nicvf_get_page(nic);
- nic->rb_page = NULL;
/* Allocate a new page */
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
if (!nic->rb_page) {
- nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- order);
- if (!nic->rb_page) {
- this_cpu_inc(nic->pnicvf->drv_stats->
- rcv_buffer_alloc_failures);
- return -ENOMEM;
- }
- nic->rb_page_offset = 0;
+ this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
+ return -ENOMEM;
}
-
+ nic->rb_page_offset = 0;
ret:
- *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ nic->rb_page_offset, buf_len,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC));
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, order);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
nic->rb_page_offset += buf_len;
return 0;
@@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr->dma_size = buf_size;
rbdr->enable = true;
rbdr->thresh = RBDR_THRESH;
+ rbdr->head = 0;
+ rbdr->tail = 0;
nic->rb_page = NULL;
for (idx = 0; idx < ring_len; idx++) {
err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
&rbuf);
- if (err)
+ if (err) {
+ /* To free already allocated and mapped ones */
+ rbdr->tail = idx - 1;
return err;
+ }
desc = GET_RBDR_DESC(rbdr, idx);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
}
nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
int head, tail;
- u64 buf_addr;
+ u64 buf_addr, phys_addr;
struct rbdr_entry_t *desc;
if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
head = rbdr->head;
tail = rbdr->tail;
- /* Free SKBs */
+ /* Release page references */
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
head++;
head &= (rbdr->dmem.q_len - 1);
}
- /* Free SKB of tail desc */
+ /* Release buffer of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
break;
desc = GET_RBDR_DESC(rbdr, tail);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
refill_rb_cnt--;
new_rb++;
}
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
return 0;
}
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt)
+{
+ u8 idx;
+ struct sq_gather_subdesc *gather;
+
+ /* Unmap DMA mapped skb data buffers */
+ for (idx = 0; idx < subdesc_cnt; idx++) {
+ hdr_sqe++;
+ hdr_sqe &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
+ gather->size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
+ struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
if (!sq)
return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
- if (skb)
- dev_kfree_skb_any(skb);
+ if (!skb)
+ goto next;
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and unmap them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+ hdr->subdesc_cnt);
+ }
+ dev_kfree_skb_any(skb);
+next:
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_send_msg_to_pf(nic, &mbx);
if (!nic->sqs_mode && (qidx == 0)) {
- /* Enable checking L3/L4 length and TCP/UDP checksums */
+ /* Enable checking L3/L4 length and TCP/UDP checksums
+ * Also allow IPv6 pkts with zero UDP checksum.
+ */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
- (BIT(24) | BIT(23) | BIT(21)));
+ (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
}
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
return qentry;
}
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+ int qentry, int desc_cnt)
+{
+ sq->tail = qentry;
+ atomic_add(desc_cnt, &sq->free_cnt);
+}
+
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
@@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num)
{
int i, size;
- int subdesc_cnt, tso_sqe = 0;
+ int subdesc_cnt, hdr_sqe = 0;
int qentry;
+ u64 dma_addr;
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
- tso_sqe = qentry;
+ hdr_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
- nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
+ offset_in_page(skb->data), size,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
/* Check for scattered buffer */
if (!skb_is_nonlinear(skb))
@@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
- nicvf_sq_add_gather_subdesc(sq, qentry, size,
- virt_to_phys(
- skb_frag_address(frag)));
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev,
+ skb_frag_page(frag),
+ frag->page_offset, size,
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ /* Free entire chain of mapped buffers
+ * here 'i' = frags mapped + above mapped skb->data
+ */
+ nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
}
doorbell:
if (nic->t88 && skb_shinfo(skb)->gso_size) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
- nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
}
nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
+ u64 phys_addr;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
else
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
- netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
- __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
-
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
payload_len = rb_lens[frag_num(frag)];
+ phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+ if (!phys_addr) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
if (!frag) {
/* First fragment */
+ dma_unmap_page_attrs(&nic->pdev->dev,
+ *rb_ptrs - cqe_rx->align_pad,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
skb = nicvf_rb_ptr_to_skb(nic,
- *rb_ptrs - cqe_rx->align_pad,
+ phys_addr - cqe_rx->align_pad,
payload_len);
if (!skb)
return NULL;
@@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- page = virt_to_page(phys_to_virt(*rb_ptrs));
- offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page = virt_to_page(phys_to_virt(phys_addr));
+ offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
offset, payload_len, RCV_FRAG_LEN);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da99a2de5..10cb4b84625b14 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -87,7 +87,7 @@
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
-#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -301,6 +301,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf730bbc2..64a1095e4d1495 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
return 1;
}
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+ u16 sdevid;
+
+ if (max_bgx_per_node)
+ return;
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_81XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN81XX;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN83XX;
+ break;
+ case PCI_SUBSYS_DEVID_88XX_BGX:
+ default:
+ max_bgx_per_node = MAX_BGX_PER_CN88XX;
+ break;
+ }
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+ int idx = (node * max_bgx_per_node) + bgx_idx;
+
+ return bgx_vnic[idx];
+}
+
/* Return number of BGX present in HW */
unsigned bgx_get_map(int node)
{
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_NODE; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
+ for (i = 0; i < max_bgx_per_node; i++) {
+ if (bgx_vnic[(node * max_bgx_per_node) + i])
map |= (1 << i);
}
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac_count;
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
dev_info(dev, "%s: 40G_KR4\n", (char *)str);
break;
case BGX_MODE_QSGMII:
- if ((lmacid == 0) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
- return;
- if ((lmacid == 2) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
- return;
dev_info(dev, "%s: QSGMII\n", (char *)str);
break;
case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
+ set_max_bgx_per_node(pdev);
+
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
bgx->bgx_id = (pci_resource_start(pdev,
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
} else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189429bb65..c5080f2cead5d0 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
#define MAX_BGX_PER_CN88XX 2
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
#define MAX_BGX_PER_CN83XX 4
-#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 275c2e2349ad92..c44036d5761a4c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
- int res;
-
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
struct device_node *phy_handle;
- int res = 0;
+ int res = 1;
phy_handle = of_parse_phandle(np, "phy-handle", 0);
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
int res = emac_dt_phy_probe(dev);
- mutex_unlock(&emac_phy_map_lock);
- if (!res)
+ switch (res) {
+ case 1:
+ /* No phy-handle property configured.
+ * Continue with the existing phy probe
+ * and setup code.
+ */
+ break;
+
+ case 0:
+ mutex_unlock(&emac_phy_map_lock);
goto init_phy;
- dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
- res);
- return res;
+ default:
+ mutex_unlock(&emac_phy_map_lock);
+ dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
+ res);
+ return res;
+ }
}
if (dev->phy_address != 0xffffffff)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9198e6bd5160f9..5f11b4dc95d2d1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
send_map_query(adapter);
for (i = 0; i < rxadd_subcrqs; i++) {
init_rx_pool(adapter, &adapter->rx_pool[i],
- IBMVNIC_BUFFS_PER_POOL, i,
+ adapter->req_rx_add_entries_per_subcrq, i,
be64_to_cpu(size_array[i]), 1);
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
tx_pool->tx_buff =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
if (!tx_pool->tx_buff)
goto tx_pool_alloc_failed;
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- adapter->max_tx_entries_per_subcrq *
+ adapter->req_tx_entries_per_subcrq *
adapter->req_mtu))
goto tx_ltb_alloc_failed;
tx_pool->free_map =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map)
goto tx_fm_alloc_failed;
- for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
tx_pool->free_map[j] = j;
tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
+ struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int ret = 0;
tx_pool = &adapter->tx_pool[queue_num];
+ tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
tx_buff = &tx_pool->tx_buff[index];
tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (tx_pool->consumer_index == 0)
tx_pool->consumer_index =
- adapter->max_tx_entries_per_subcrq - 1;
+ adapter->req_tx_entries_per_subcrq - 1;
else
tx_pool->consumer_index--;
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_BUSY;
goto out;
}
+
+ atomic_inc(&tx_scrq->used);
+
+ if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ netdev_info(netdev, "Stopping queue %d\n", queue_num);
+ netif_stop_subqueue(netdev, queue_num);
+ }
+
tx_packets++;
tx_bytes += skb->len;
txq->trans_start = jiffies;
@@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
+ atomic_set(&scrq->used, 0);
scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
@@ -1355,14 +1366,28 @@ restart_loop:
DMA_TO_DEVICE);
}
- if (txbuff->last_frag)
+ if (txbuff->last_frag) {
+ atomic_dec(&scrq->used);
+
+ if (atomic_read(&scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ netif_subqueue_stopped(adapter->netdev,
+ txbuff->skb)) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+
dev_kfree_skb_any(txbuff->skb);
+ }
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index;
adapter->tx_pool[pool].producer_index =
(adapter->tx_pool[pool].producer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
}
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 422824f1f42a8a..1993b42666f73d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
+ atomic_t used;
};
struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ddb4ca4ff930a7..117170014e8897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ depends on IPV6=y || IPV6=n || MLX5_CORE=m
imply PTP_1588_CLOCK
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0523ed47f597c7..8fa23f6a1f67f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (mode & DCB_CAP_DCBX_LLD_MANAGED)
+ return 1;
+
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
}
- if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+ if (!(mode & DCB_CAP_DCBX_HOST))
return 1;
- if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
- !(mode & DCB_CAP_DCBX_HOST))
+ if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 31e3cb7ee5feeb..5621dcfda4f186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct iphdr *iph;
/* We are only going to peek, no need to clone the SKB */
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
@@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
- lbtp->pt.type = htons(ETH_P_ALL);
+ lbtp->pt.type = htons(ETH_P_IP);
lbtp->pt.func = mlx5e_test_loopback_validate;
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 44406a5ec15d96..79481f4cf26432 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -48,9 +48,14 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5E_TC_FLOW_ESWITCH = BIT(0),
+};
+
struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
+ u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
struct mlx5_esw_flow_attr *attr;
@@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5_fc_destroy(priv->mdev, counter);
}
- if (esw && esw->mode == SRIOV_OFFLOADS) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
@@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
static int parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
@@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && esw->mode == SRIOV_OFFLOADS &&
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) {
netdev_warn(priv->netdev,
@@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err = 0;
- bool fdb_flow = false;
+ int err, attr_size = 0;
u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u8 flow_flags = 0;
- if (esw && esw->mode == SRIOV_OFFLOADS)
- fdb_flow = true;
-
- if (fdb_flow)
- flow = kzalloc(sizeof(*flow) +
- sizeof(struct mlx5_esw_flow_attr),
- GFP_KERNEL);
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ flow_flags = MLX5E_TC_FLOW_ESWITCH;
+ attr_size = sizeof(struct mlx5_esw_flow_attr);
+ }
+ flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
err = -ENOMEM;
@@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
}
flow->cookie = f->cookie;
+ flow->flags = flow_flags;
- err = parse_cls_flower(priv, spec, f);
+ err = parse_cls_flower(priv, flow, spec, f);
if (err < 0)
goto err_free;
- if (fdb_flow) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
err = parse_tc_fdb_actions(priv, f->exts, flow);
if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2478516a61e2ea..ded27bb9a3b604 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = ft->node.children.prev;
+ struct list_head *prev = &ft->node.children;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c4242a4e81309f..e2bd600d19de09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
if (err)
goto clean_load;
+ pci_save_state(pdev);
return 0;
clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state and drain the health wq */
+ /* In case of kernel call drain the health wq */
if (state) {
- pci_save_state(pdev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
}
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0899e2d310e262..d9616daf8a7056 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
#define MLXSW_REG_SPVM_ID 0x200F
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
#define MLXSW_REG_SPVMLR_ID 0x2020
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
MLXSW_REG_SPVMLR_REC_LEN * \
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 22ab429253778d..ae6cccc666e461 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
MLXSW_SP_ACL_PROFILE_FLOWER);
- if (WARN_ON(IS_ERR(ruleset)))
+ if (IS_ERR(ruleset))
return;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!WARN_ON(!rule)) {
+ if (rule) {
mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03df751acb..7e3a6fed3da6d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ u32 align = elems_per_page * DQ_RANGE_ALIGN;
- p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ p_conn->cid_count = roundup(p_conn->cid_count, align);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e2a081ceaf520c..e518f914eab13f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
* size/capacity fields are of a u32 type.
*/
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
- chain_size > 0x10000) ||
- (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
- chain_size > 0x100000000ULL)) {
+ chain_size > ((u32)U16_MAX + 1)) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 3a44d6b395fac9..098766f7fe88a6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ooo_enable = p_params->ooo_enable;
+ p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
+void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+ kfree(p_conn);
+}
+
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
+ struct qed_iscsi_conn *p_conn = NULL;
+
+ while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_iscsi_free_connection(p_hwfn, p_conn);
+ }
+ }
+
kfree(p_iscsi_info);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af10a572f..0d3cef409c96d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* If need to reuse or there's no replacement buffer, repost this */
if (rc)
goto out_post;
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
skb = build_skb(buffer->data, 0);
if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
- unsigned long lock_flags,
+ unsigned long *p_lock_flags,
bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
- spin_lock_irqsave(&p_rx->lock, lock_flags);
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
break;
case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
- cqe, flags, b_last_cqe);
+ cqe, &flags,
+ b_last_cqe);
break;
default:
rc = -EIO;
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_conn ll2_info;
+ struct qed_ll2_conn ll2_info = { 0 };
int rc;
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 7d731c6cb8923d..378afce58b3f0a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
+ p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
return p_ooo_info;
no_history_mem:
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 65077c77082a2f..91e9bd7159ab37 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
-
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ u32 supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
+ cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
+ DUPLEX_FULL : DUPLEX_HALF;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->port = 0;
- cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
ret = 0;
}
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->port;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+// lp->port = cmd->base.port;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
// if (netif_running(dev))
// smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
static const struct ethtool_ops smc_ethtool_ops = {
- .get_settings = smc_ethtool_getsettings,
- .set_settings = smc_ethtool_setsettings,
.get_drvinfo = smc_ethtool_getdrvinfo,
.get_msglevel = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
.get_eeprom_len = smc_ethtool_geteeprom_len,
.get_eeprom = smc_ethtool_geteeprom,
.set_eeprom = smc_ethtool_seteeprom,
+ .get_link_ksettings = smc_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc_ethtool_set_link_ksettings,
};
static const struct net_device_ops smc_netdev_ops = {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d3e73ac158aee6..f9f3dba7a58800 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -700,6 +700,8 @@ struct net_device_context {
u32 tx_checksum_mask;
+ u32 tx_send_table[VRSS_SEND_TAB_SIZE];
+
/* Ethtool settings */
u8 duplex;
u32 speed;
@@ -757,7 +759,6 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
- u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d35ebd993b3852..4c1d8cca247b92 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
- struct netvsc_device *nvscdev;
struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
int i;
u32 count, *tab;
- nvscdev = get_outbound_net_device(hdev);
- if (!nvscdev)
- return;
-
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
nvmsg->msg.v5_msg.send_table.offset);
for (i = 0; i < count; i++)
- nvscdev->send_table[i] = tab[i];
+ net_device_ctx->tx_send_table[i] = tab[i];
}
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bc05c895d9589d..5ede87f30463e8 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
+ unsigned int num_tx_queues = ndev->real_num_tx_queues;
struct sock *sk = skb->sk;
int q_idx = sk_tx_queue_get(sk);
- if (q_idx < 0 || skb->ooo_okay ||
- q_idx >= ndev->real_num_tx_queues) {
+ if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
int new_idx;
- new_idx = nvsc_dev->send_table[hash]
- % nvsc_dev->num_chn;
+ new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
if (q_idx != new_idx && sk &&
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
q_idx = new_idx;
}
- if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
- q_idx = 0;
-
return q_idx;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f9d0fa315a4762..272b051a019975 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
-static void marvell_remove(struct phy_device *phydev)
-{
-#ifdef CONFIG_HWMON
-
- struct marvell_priv *priv = phydev->priv;
-
- if (priv && priv->hwmon_dev)
- hwmon_device_unregister(priv->hwmon_dev);
-#endif
-}
-
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
- .remove = &marvell_remove,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index daec6555f3b108..5198ccfa347f8b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedfa299412..1e2d4f1179da31 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- ks->regs_attr.size = ks->chip->regs_size;
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+ ks->regs_attr.size = ks->chip->regs_size;
err = ks8995_reset(ks);
if (err)
return err;
+ sysfs_attr_init(&ks->regs_attr.attr);
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
if (err) {
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4a24b5d15f5a5d..1b52520715aec6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dc1b1dd9157c16..34cc3c590aa5c5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
+ struct tun_struct *tun = netdev_priv(dev);
+ int i;
+
netif_tx_start_all_queues(dev);
+
+ for (i = 0; i < tun->numqueues; i++) {
+ struct tun_file *tfile;
+
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_write_space(tfile->socket.sk);
+ }
+
return 0;
}
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
- if (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk)))
+ if (tun->dev->flags & IFF_UP &&
+ (sock_writeable(sk) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ sock_writeable(sk))))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -2570,7 +2582,6 @@ static int __init tun_init(void)
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- pr_info("%s\n", DRV_COPYRIGHT);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 22379da6340077..fea687f35b5ac6 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&dstats->syncp);
dstats->tx_pkts++;
- dstats->tx_bytes += skb->len;
+ dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e375560cc74e5f..bdb6ae16d4a85b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
+static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+ struct vxlan_config *conf)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ err = vxlan_dev_configure(net, dev, conf, false);
+ if (err)
+ return err;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+ return 0;
+}
+
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink)
@@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config conf;
int err;
@@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- err = vxlan_dev_configure(src_net, dev, &conf, false);
- if (err)
- return err;
-
- dev->ethtool_ops = &vxlan_ethtool_ops;
-
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_EXCL | NLM_F_CREATE,
- vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
-
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
- return err;
- }
-
- list_add(&vxlan->next, &vn->vxlan_list);
-
- return 0;
+ return __vxlan_dev_create(src_net, dev, &conf);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = vxlan_dev_configure(net, dev, conf, false);
+ err = __vxlan_dev_create(net, dev, conf);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a5045b5279d70a..6742ae60566045 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
- iowrite16be(bd_status, &bd->status);
iowrite16be(skb->len, &bd->length);
+ iowrite16be(bd_status, &bd->status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
struct sk_buff *skb;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
- u32 bd_status;
+ u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
int i;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910a65191f..f8eb66ef2944ea 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 829b26cd4549a4..8397f6c9245158 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
u16 index;
struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
- /* Drop the packet if queues are not set up */
+ /* Drop the packet if queues are not set up.
+ * This handler should be called inside an RCU read section
+ * so we don't need to enter it here explicitly.
+ */
+ num_queues = READ_ONCE(vif->num_queues);
if (num_queues < 1)
goto drop;
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
+ unsigned int num_queues;
u64 rx_bytes = 0;
u64 rx_packets = 0;
u64 tx_bytes = 0;
u64 tx_packets = 0;
unsigned int index;
- spin_lock(&vif->lock);
- if (vif->queues == NULL)
- goto out;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < vif->num_queues; ++index) {
+ for (index = 0; index < num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
tx_packets += queue->stats.tx_packets;
}
-out:
- spin_unlock(&vif->lock);
+ rcu_read_unlock();
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
struct xenvif *vif = netdev_priv(dev);
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
int i;
unsigned int queue_index;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
+
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
}
data[i] = accum;
}
+
+ rcu_read_unlock();
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f9bcf4a665bcae..602d408fa25e98 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
/* Disable the vif from queue 0's kthread */
- if (vif->queues)
+ if (vif->num_queues)
xenvif_kick_thread(&vif->queues[0]);
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d2d7cd9145b1c2..a56d3eab35dd65 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
struct xenvif *vif = be->vif;
if (vif) {
+ unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
- struct xenvif_queue *queues;
xen_unregister_watchers(vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(vif);
- for (queue_index = 0;
- queue_index < vif->num_queues;
- ++queue_index)
- xenvif_deinit_queue(&vif->queues[queue_index]);
- spin_lock(&vif->lock);
- queues = vif->queues;
+ /* At this point some of the handlers may still be active
+ * so we need to have additional synchronization here.
+ */
vif->num_queues = 0;
- vif->queues = NULL;
- spin_unlock(&vif->lock);
+ synchronize_net();
- vfree(queues);
+ for (queue_index = 0; queue_index < num_queues; ++queue_index)
+ xenvif_deinit_queue(&vif->queues[queue_index]);
+
+ vfree(vif->queues);
+ vif->queues = NULL;
xenvif_disconnect_ctrl(vif);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index b7b87ecefcdfc7..9fca8d225ee092 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
newsock->ops = sock->ops;
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
if (rc == -EAGAIN) {
/* Nothing ready, so wait for activity */
init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(sk_sleep(sock->sk), &wait);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
}
if (rc)
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 7d398d300e972c..9382db998ec954 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
newsock->type = con->sock->type;
newsock->ops = con->sock->ops;
- result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
+ result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
if (result < 0)
goto accept_err;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 4348027384f5ed..d0ab7e56d0b41a 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
new_sock->type = sock->type;
new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
if (ret < 0)
goto out;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a2bfd7843f18f6..e2b9c6fe271496 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock);
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042bbbf6072..68449293c4b623 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@ struct dccp_request_sock {
__u64 dreq_isr;
__u64 dreq_gsr;
__be32 dreq_service;
+ spinlock_t dreq_lock;
struct list_head dreq_featneg;
__u32 dreq_timestamp_echo;
__u32 dreq_timestamp_time;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0c167fdee5f7d1..fbf7b39e810355 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -409,6 +409,7 @@ struct bpf_prog {
u16 pages; /* Number of allocated pages */
kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
+ locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- set_memory_ro((unsigned long)fp, fp->pages);
+ fp->locked = 1;
+ WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
- set_memory_rw((unsigned long)fp, fp->pages);
+ if (fp->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+ * to crash here instead of some random place later on.
+ */
+ fp->locked = 0;
+ }
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- set_memory_ro((unsigned long)hdr, hdr->pages);
+ WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
- set_memory_rw((unsigned long)hdr, hdr->pages);
+ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
}
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe100908430..87ff4f58a2f018 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_nulls_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
+ })
/**
* ptr_is_a_nulls - Test if a ptr is a nulls
* @ptr: ptr to be tested
diff --git a/include/linux/net.h b/include/linux/net.h
index cd0c8bd0a1dec0..0620f5e18c96b7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,7 +146,7 @@ struct proto_ops {
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
- struct socket *newsock, int flags);
+ struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 772476028a6507..43a774873aa96d 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_soft_reset(struct phy_device *phydev);
+static inline int genphy_no_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7e8597b0..a23a3315318048 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+/**
+ * hlist_nulls_for_each_entry_safe -
+ * iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ */
+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
#endif
#endif
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b7952d55b9c000..f39ae697347f65 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags, int is_sendmsg);
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
-int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 826f198374f809..c7a577976bec08 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
return (unsigned long)min_t(u64, when, max_when);
}
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a244db5e5ff7fa..07a0b128625a4e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -476,7 +476,8 @@ struct sctp_pf {
int (*send_verify) (struct sctp_sock *, union sctp_addr *);
int (*supported_addrs)(const struct sctp_sock *, __be16 *);
struct sock *(*create_accept_sk) (struct sock *sk,
- struct sctp_association *asoc);
+ struct sctp_association *asoc,
+ bool kern);
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
diff --git a/include/net/sock.h b/include/net/sock.h
index 5e5997654db645..03252d53975de7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -236,6 +236,7 @@ struct sock_common {
* @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer
+ * @sk_kern_sock: True if sock is using kernel lock classes
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
@@ -430,7 +431,8 @@ struct sock {
#endif
kmemcheck_bitfield_begin(flags);
- unsigned int sk_padding : 2,
+ unsigned int sk_padding : 1,
+ sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
sk_userlocks : 4,
@@ -1015,7 +1017,8 @@ struct proto {
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
- struct sock * (*accept)(struct sock *sk, int flags, int *err);
+ struct sock * (*accept)(struct sock *sk, int flags, int *err,
+ bool kern);
int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
@@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
int sock_no_bind(struct socket *, struct sockaddr *, int);
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
-int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
unsigned int sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f3dd6ff4..0c5d5dd61b6ab1 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@ struct packet_diag_mclist {
__u32 pdmc_count;
__u16 pdmc_type;
__u16 pdmc_alen;
- __u8 pdmc_addr[MAX_ADDR_LEN];
+ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
};
struct packet_diag_ring {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ea87fb19a9416..afe5bab376c981 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,11 +13,12 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
+#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
struct bucket {
- struct hlist_head head;
+ struct hlist_nulls_head head;
raw_spinlock_t lock;
};
@@ -44,9 +45,14 @@ enum extra_elem_state {
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
- struct hlist_node hash_node;
- struct bpf_htab *htab;
- struct pcpu_freelist_node fnode;
+ struct hlist_nulls_node hash_node;
+ struct {
+ void *padding;
+ union {
+ struct bpf_htab *htab;
+ struct pcpu_freelist_node fnode;
+ };
+ };
};
union {
struct rcu_head rcu;
@@ -162,7 +168,8 @@ skip_percpu_elems:
offsetof(struct htab_elem, lru_node),
htab->elem_size, htab->map.max_entries);
else
- pcpu_freelist_populate(&htab->freelist, htab->elems,
+ pcpu_freelist_populate(&htab->freelist,
+ htab->elems + offsetof(struct htab_elem, fnode),
htab->elem_size, htab->map.max_entries);
return 0;
@@ -217,6 +224,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
int err, i;
u64 cost;
+ BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+ BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+
if (lru && !capable(CAP_SYS_ADMIN))
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,7 +338,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_HEAD(&htab->buckets[i].head);
+ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
@@ -366,20 +378,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
return &htab->buckets[hash & (htab->n_buckets - 1)];
}
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
{
return &__select_bucket(htab, hash)->head;
}
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+/* this lookup function can only be called with bucket lock taken */
+static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
void *key, u32 key_size)
{
+ struct hlist_nulls_node *n;
+ struct htab_elem *l;
+
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
+ if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ return l;
+
+ return NULL;
+}
+
+/* can be called without bucket lock. it will repeat the loop in
+ * the unlikely event when elements moved from one bucket into another
+ * while link list is being walked
+ */
+static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
+ u32 hash, void *key,
+ u32 key_size, u32 n_buckets)
+{
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_rcu(l, head, hash_node)
+again:
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l->hash == hash && !memcmp(&l->key, key, key_size))
return l;
+ if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
+ goto again;
+
return NULL;
}
@@ -387,7 +423,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l;
u32 hash, key_size;
@@ -400,7 +436,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
head = select_bucket(htab, hash);
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
return l;
}
@@ -433,8 +469,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
struct bpf_htab *htab = (struct bpf_htab *)arg;
- struct htab_elem *l, *tgt_l;
- struct hlist_head *head;
+ struct htab_elem *l = NULL, *tgt_l;
+ struct hlist_nulls_head *head;
+ struct hlist_nulls_node *n;
unsigned long flags;
struct bucket *b;
@@ -444,9 +481,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
raw_spin_lock_irqsave(&b->lock, flags);
- hlist_for_each_entry_rcu(l, head, hash_node)
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
break;
}
@@ -459,7 +496,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
int i;
@@ -473,7 +510,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
head = select_bucket(htab, hash);
/* lookup the key */
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
if (!l) {
i = 0;
@@ -481,7 +518,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
}
/* key was found, get next key in the same bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
struct htab_elem, hash_node);
if (next_l) {
@@ -500,7 +537,7 @@ find_first_elem:
head = select_bucket(htab, i);
/* pick first element in the bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
struct htab_elem, hash_node);
if (next_l) {
/* if it's not empty, just return it */
@@ -582,9 +619,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
int err = 0;
if (prealloc) {
- l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
- if (!l_new)
+ struct pcpu_freelist_node *l;
+
+ l = pcpu_freelist_pop(&htab->freelist);
+ if (!l)
err = -E2BIG;
+ else
+ l_new = container_of(l, struct htab_elem, fnode);
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
@@ -661,7 +702,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -700,9 +741,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
*/
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
- hlist_del_rcu(&l_old->hash_node);
+ hlist_nulls_del_rcu(&l_old->hash_node);
free_htab_elem(htab, l_old);
}
ret = 0;
@@ -716,7 +757,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new, *l_old = NULL;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -757,10 +798,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
*/
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
bpf_lru_node_set_ref(&l_new->lru_node);
- hlist_del_rcu(&l_old->hash_node);
+ hlist_nulls_del_rcu(&l_old->hash_node);
}
ret = 0;
@@ -781,7 +822,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -820,7 +861,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
ret = PTR_ERR(l_new);
goto err;
}
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
}
ret = 0;
err:
@@ -834,7 +875,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -882,7 +923,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
} else {
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
value, onallcpus);
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
l_new = NULL;
}
ret = 0;
@@ -910,7 +951,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct bucket *b;
struct htab_elem *l;
unsigned long flags;
@@ -930,7 +971,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
free_htab_elem(htab, l);
ret = 0;
}
@@ -942,7 +983,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct bucket *b;
struct htab_elem *l;
unsigned long flags;
@@ -962,7 +1003,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
ret = 0;
}
@@ -977,12 +1018,12 @@ static void delete_all_elements(struct bpf_htab *htab)
int i;
for (i = 0; i < htab->n_buckets; i++) {
- struct hlist_head *head = select_bucket(htab, i);
- struct hlist_node *n;
+ struct hlist_nulls_head *head = select_bucket(htab, i);
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_safe(l, n, head, hash_node) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+ hlist_nulls_del_rcu(&l->hash_node);
if (l->state != HTAB_EXTRA_ELEM_USED)
htab_elem_free(htab, l);
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 8bfe0afaee1082..b37bd9ab7f5742 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -500,9 +500,15 @@ unlock:
raw_spin_unlock(&trie->lock);
}
+static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ return -ENOTSUPP;
+}
+
static const struct bpf_map_ops trie_ops = {
.map_alloc = trie_alloc,
.map_free = trie_free,
+ .map_get_next_key = trie_get_next_key,
.map_lookup_elem = trie_lookup_elem,
.map_update_elem = trie_update_elem,
.map_delete_elem = trie_delete_elem,
diff --git a/net/atm/svc.c b/net/atm/svc.c
index db9794ec61d88e..5589de7086af4e 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -318,7 +318,8 @@ out:
return error;
}
-static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
+static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
lock_sock(sk);
- error = svc_create(sock_net(sk), newsock, 0, 0);
+ error = svc_create(sock_net(sk), newsock, 0, kern);
if (error)
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a8e42cedf1dbc7..b7c486752b3acf 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1320,7 +1320,8 @@ out_release:
return err;
}
-static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f307b145ea5405..507b80d59dec4f 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -301,7 +301,7 @@ done:
}
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa1a814ceddca7..ac3c650cb234f9 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -471,7 +471,8 @@ done:
return err;
}
-static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e4e9a2da1e7e7a..728e0c8dc8e74c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -627,7 +627,7 @@ done:
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *ch;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 236f34244dbe1f..013f2290bfa56d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
static int
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ br_drop_fake_rtable(skb);
return netif_receive_skb(skb);
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 95087e6e825836..fa87fbd62bb788 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
}
-/* PF_BRIDGE/LOCAL_IN ************************************************/
-/* The packet is locally destined, which requires a real
- * dst_entry, so detach the fake one. On the way up, the
- * packet would pass through PRE_ROUTING again (which already
- * took place when the packet entered the bridge), but we
- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
- * prevent this from happening. */
-static unsigned int br_nf_local_in(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- br_drop_fake_rtable(skb);
- return NF_ACCEPT;
-}
-
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
@@ -908,12 +893,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
.priority = NF_BR_PRI_BRNF,
},
{
- .hook = br_nf_local_in,
- .pf = NFPROTO_BRIDGE,
- .hooknum = NF_BR_LOCAL_IN,
- .priority = NF_BR_PRI_BRNF,
- },
- {
.hook = br_nf_forward_ip,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
diff --git a/net/core/dev.c b/net/core/dev.c
index 8637b2b71f3d47..7869ae3837ca74 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
{
rtnl_lock();
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
rtnl_unlock();
}
EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3945821e9c1f8f..65ea0ff4017c16 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct kobject *kobj = &dev->_rx[i].kobj;
- if (!list_empty(&dev_net(dev)->exit_list))
+ if (!atomic_read(&dev_net(dev)->count))
kobj->uevent_suppress = 1;
if (dev->sysfs_rx_queue_group)
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct netdev_queue *queue = dev->_tx + i;
- if (!list_empty(&dev_net(dev)->exit_list))
+ if (!atomic_read(&dev_net(dev)->count))
queue->kobj.uevent_suppress = 1;
#ifdef CONFIG_BQL
sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
{
struct device *dev = &(ndev->dev);
- if (!list_empty(&dev_net(ndev)->exit_list))
+ if (!atomic_read(&dev_net(ndev)->count))
dev_set_uevent_suppress(dev, 1);
kobject_get(&dev->kobj);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3557958e9bf14..cd4ba8c6b60916 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3828,13 +3828,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
if (!skb_may_tx_timestamp(sk, false))
return;
- /* take a reference to prevent skb_orphan() from freeing the socket */
- sock_hold(sk);
-
- *skb_hwtstamps(skb) = *hwtstamps;
- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
-
- sock_put(sk);
+ /* Take a reference to prevent skb_orphan() from freeing the socket,
+ * but only if the socket refcount is not zero.
+ */
+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+ *skb_hwtstamps(skb) = *hwtstamps;
+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+ sock_put(sk);
+ }
}
EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
@@ -3893,7 +3894,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
{
struct sock *sk = skb->sk;
struct sock_exterr_skb *serr;
- int err;
+ int err = 1;
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
@@ -3903,14 +3904,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
- /* take a reference to prevent skb_orphan() from freeing the socket */
- sock_hold(sk);
-
- err = sock_queue_err_skb(sk, skb);
+ /* Take a reference to prevent skb_orphan() from freeing the socket,
+ * but only if the socket refcount is not zero.
+ */
+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+ err = sock_queue_err_skb(sk, skb);
+ sock_put(sk);
+ }
if (err)
kfree_skb(skb);
-
- sock_put(sk);
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
diff --git a/net/core/sock.c b/net/core/sock.c
index f6fd79f33097f3..a96d5f7a5734a5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
/*
* Each address family might have different locking rules, so we have
- * one slock key per address family:
+ * one slock key per address family and separate keys for internal and
+ * userspace sockets.
*/
static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_kern_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
+static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
/*
* Make lock validator output more readable. (we pre-construct these
* strings build-time, so that runtime initialization of socket
* locks is fast):
*/
+
+#define _sock_locks(x) \
+ x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
+ x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
+ x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
+ x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
+ x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
+ x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
+ x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
+ x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
+ x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
+ x "27" , x "28" , x "AF_CAN" , \
+ x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
+ x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
+ x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
+ x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
+ x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
+
static const char *const af_family_key_strings[AF_MAX+1] = {
- "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
- "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
- "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
- "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
- "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
- "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
- "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
- "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
- "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
- "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
- "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
- "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
- "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
- "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
- "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
+ _sock_locks("sk_lock-")
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
- "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
- "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
- "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
- "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
- "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
- "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
- "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
- "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
- "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
- "slock-27" , "slock-28" , "slock-AF_CAN" ,
- "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
- "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
- "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
- "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
- "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
+ _sock_locks("slock-")
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
- "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
- "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
- "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
- "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
- "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
- "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
- "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
- "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
- "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
- "clock-27" , "clock-28" , "clock-AF_CAN" ,
- "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
- "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
- "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
- "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
- "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
+ _sock_locks("clock-")
+};
+
+static const char *const af_family_kern_key_strings[AF_MAX+1] = {
+ _sock_locks("k-sk_lock-")
+};
+static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
+ _sock_locks("k-slock-")
+};
+static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
+ _sock_locks("k-clock-")
};
/*
@@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
* so split the lock classes by using a per-AF key:
*/
static struct lock_class_key af_callback_keys[AF_MAX];
+static struct lock_class_key af_kern_callback_keys[AF_MAX];
/* Take into consideration the size of the struct sk_buff overhead in the
* determination of these values, since that is non-constant across
@@ -1293,7 +1283,16 @@ lenout:
*/
static inline void sock_lock_init(struct sock *sk)
{
- sock_lock_init_class_and_name(sk,
+ if (sk->sk_kern_sock)
+ sock_lock_init_class_and_name(
+ sk,
+ af_family_kern_slock_key_strings[sk->sk_family],
+ af_family_kern_slock_keys + sk->sk_family,
+ af_family_kern_key_strings[sk->sk_family],
+ af_family_kern_keys + sk->sk_family);
+ else
+ sock_lock_init_class_and_name(
+ sk,
af_family_slock_key_strings[sk->sk_family],
af_family_slock_keys + sk->sk_family,
af_family_key_strings[sk->sk_family],
@@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
* why we need sk_prot_creator -acme
*/
sk->sk_prot = sk->sk_prot_creator = prot;
+ sk->sk_kern_sock = kern;
sock_lock_init(sk);
sk->sk_net_refcnt = kern ? 0 : 1;
if (likely(sk->sk_net_refcnt))
@@ -2277,7 +2277,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
}
EXPORT_SYMBOL(sock_no_socketpair);
-int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
return -EOPNOTSUPP;
}
@@ -2481,7 +2482,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
}
rwlock_init(&sk->sk_callback_lock);
- lockdep_set_class_and_name(&sk->sk_callback_lock,
+ if (sk->sk_kern_sock)
+ lockdep_set_class_and_name(
+ &sk->sk_callback_lock,
+ af_kern_callback_keys + sk->sk_family,
+ af_family_kern_clock_key_strings[sk->sk_family]);
+ else
+ lockdep_set_class_and_name(
+ &sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
af_family_clock_key_strings[sk->sk_family]);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c48..5e3a7302f7747e 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
for (i = 0; i < hc->tx_seqbufc; i++)
kfree(hc->tx_seqbuf[i]);
hc->tx_seqbufc = 0;
+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 409d0cfd344748..b99168b0fabf2a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
switch (type) {
case ICMP_REDIRECT:
- dccp_do_redirect(skb, sk);
+ if (!sock_owned_by_user(sk))
+ dccp_do_redirect(skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 233b57367758c6..d9b6a4e403e701 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ if (!sock_owned_by_user(sk)) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst)
- dst->ops->redirect(dst, sk, skb);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
goto out;
}
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index e267e6f4c9a556..abd07a44321985 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
struct dccp_request_sock *dreq = dccp_rsk(req);
bool own_req;
+ /* TCP/DCCP listeners became lockless.
+ * DCCP stores complex state in its request_sock, so we need
+ * a protection for them, now this code runs without being protected
+ * by the parent (listener) lock.
+ */
+ spin_lock_bh(&dreq->dreq_lock);
+
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
inet_rtx_syn_ack(sk, req);
}
/* Network Duplicate, discard packet */
- return NULL;
+ goto out;
}
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
req, &own_req);
- if (!child)
- goto listen_overflow;
-
- return inet_csk_complete_hashdance(sk, child, req, own_req);
+ if (child) {
+ child = inet_csk_complete_hashdance(sk, child, req, own_req);
+ goto out;
+ }
-listen_overflow:
- dccp_pr_debug("listen_overflow!\n");
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req);
- return NULL;
+out:
+ spin_unlock_bh(&dreq->dreq_lock);
+ return child;
}
EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
{
struct dccp_request_sock *dreq = dccp_rsk(req);
+ spin_lock_init(&dreq->dreq_lock);
inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
inet_rsk(req)->acked = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index e6e79eda97636d..7de5b40a5d0d12 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
return skb == NULL ? ERR_PTR(err) : skb;
}
-static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
+static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk, *newsk;
struct sk_buff *skb = NULL;
@@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
cb = DN_SKB_CB(skb);
sk->sk_ack_backlog--;
- newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
+ newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
if (newsk == NULL) {
release_sock(sk);
kfree_skb(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 602d40f43687c9..6b1fc6e4278ef4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
-int inet_accept(struct socket *sock, struct socket *newsock, int flags)
+int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
- struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
+ struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
if (!sk2)
goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
int proto = iph->protocol;
int err = -ENOSYS;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
skb_set_inner_network_header(skb, nhoff);
+ }
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4d5980ade3b58..5e313c1ac94fc8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
/*
* This will accept the next outstanding connection.
*/
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 737ce826d7ecfa..7a3fd25e8913a9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9a89b8deafae1e..575e19dcc01763 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
*/
void tcp_v4_mtu_reduced(struct sock *sk)
{
- struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
- u32 mtu = tcp_sk(sk)->mtu_info;
+ struct dst_entry *dst;
+ u32 mtu;
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+ mtu = tcp_sk(sk)->mtu_info;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
@@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
switch (type) {
case ICMP_REDIRECT:
- do_redirect(icmp_skb, sk);
+ if (!sock_owned_by_user(sk))
+ do_redirect(icmp_skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 40d893556e6701..b2ab411c6d3728 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
sk_mem_reclaim_partial(sk);
- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !icsk->icsk_pending)
goto out;
if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04db40620ea65c..a9a9553ee63df8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
err = register_pernet_subsys(&inet6_net_ops);
if (err)
goto register_pernet_fail;
- err = icmpv6_init();
- if (err)
- goto icmp_fail;
err = ip6_mr_init();
if (err)
goto ipmr_fail;
+ err = icmpv6_init();
+ if (err)
+ goto icmp_fail;
err = ndisc_init();
if (err)
goto ndisc_fail;
@@ -1061,10 +1061,10 @@ igmp_fail:
ndisc_cleanup();
ndisc_fail:
ip6_mr_cleanup();
-ipmr_fail:
- icmpv6_cleanup();
icmp_fail:
unregister_pernet_subsys(&inet6_net_ops);
+ipmr_fail:
+ icmpv6_cleanup();
register_pernet_fail:
sock_unregister(PF_INET6);
rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e4266746e4a2af..d4bf2c68a545b4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -923,6 +923,8 @@ add:
ins = &rt->dst.rt6_next;
iter = *ins;
while (iter) {
+ if (iter->rt6i_metric > rt->rt6i_metric)
+ break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 0838e6d01d2e49..93e58a5e18374b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
int err = -ENOSYS;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
skb_set_inner_network_header(skb, nhoff);
+ }
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 528b3c1f3fdee4..58f6288e9ba53e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,13 +768,14 @@ slow_path:
* Fragment the datagram.
*/
- *prevhdr = NEXTHDR_FRAGMENT;
troom = rt->dst.dev->needed_tailroom;
/*
* Keep copying data until we run out.
*/
while (left > 0) {
+ u8 *fragnexthdr_offset;
+
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
*/
skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
+ fragnexthdr_offset = skb_network_header(frag);
+ fragnexthdr_offset += prevhdr - skb_network_header(skb);
+ *fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
/*
* Build fragment header.
*/
@@ -1385,7 +1390,7 @@ emsgsize:
if ((((length + fragheaderlen) > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 644ba59fbd9d5e..3d8a3b63b4fdbe 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (!skb->ignore_df && skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
- if (skb->protocol == htons(ETH_P_IPV6))
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- else
+ } else {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
+ }
return -EMSGSIZE;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 229bfcc451ef50..35c58b669ebdfd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
+ NLA_ALIGN(sizeof(struct rtnexthop))
+ nla_total_size(16) /* RTA_GATEWAY */
- + nla_total_size(4) /* RTA_OIF */
+ lwtunnel_get_encap_size(rt->dst.lwtstate);
nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
}
static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
- unsigned int *flags)
+ unsigned int *flags, bool skip_oif)
{
if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
*flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
goto nla_put_failure;
}
- if (rt->dst.dev &&
+ /* not needed for multipath encoding b/c it has a rtnexthop struct */
+ if (!skip_oif && rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
@@ -3350,6 +3350,7 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
{
struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
rtnh->rtnh_hops = 0;
rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
- if (rt6_nexthop_info(skb, rt, &flags) < 0)
+ if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
goto nla_put_failure;
rtnh->rtnh_flags = flags;
@@ -3515,7 +3516,7 @@ static int rt6_fill_node(struct net *net,
nla_nest_end(skb, mp);
} else {
- if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0)
+ if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
goto nla_put_failure;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60a5295a7de6e8..49fa2e8c3fa921 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ if (!sock_owned_by_user(sk)) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst)
- dst->ops->redirect(dst, sk, skb);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
goto out;
}
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 81adc29a448dc5..8d77ad5cadaff3 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -828,7 +828,8 @@ out:
* Wait for incoming connection
*
*/
-static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
+static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb = NULL;
int err;
- err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
+ err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
if (err)
return err;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 89bbde1081ce5e..84de7b6326dcdf 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -938,7 +938,7 @@ done:
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 06186d608a274e..cb4fff785cbf5a 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
* @sock: Socket which connections arrive on.
* @newsock: Socket to move incoming connection to.
* @flags: User specified operational flags.
+ * @kern: If the socket is kernel internal
*
* Accept a new incoming connection.
* Returns 0 upon success, negative otherwise.
*/
-static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
+static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk, *newsk;
struct llc_sock *llc, *newllc;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3818686182b210..33211f9a265608 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1288,7 +1288,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
/* fall through */
case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN;
- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+ if (event != NETDEV_UNREGISTER)
+ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break;
}
if (event == NETDEV_UNREGISTER)
@@ -2028,6 +2029,7 @@ static void mpls_net_exit(struct net *net)
for (index = 0; index < platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
RCU_INIT_POINTER(platform_label[index], NULL);
+ mpls_notify_route(net, index, rt, NULL, NULL);
mpls_rt_free(rt);
}
rtnl_unlock();
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bbf4526b88566..ebf16f7f90892d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -765,7 +765,8 @@ out_release:
return err;
}
-static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
+static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 879885b31cce5f..2ffb18e73df6c0 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
}
static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *new_sk;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 222bedcd95754c..e81537991ddf0d 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
sock_put(sk);
}
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+ bool kern)
{
struct pep_sock *pn = pep_sk(sk), *newpn;
struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
}
/* Create a new to-be-accepted sock */
- newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
+ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
+ kern);
if (!newsk) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
err = -ENOBUFS;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index a6c8da3ee89349..64634e3ec2fc78 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -305,7 +305,7 @@ out:
}
static int pn_socket_accept(struct socket *sock, struct socket *newsock,
- int flags)
+ int flags, bool kern)
{
struct sock *sk = sock->sk;
struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
if (unlikely(sk->sk_state != TCP_LISTEN))
return -EINVAL;
- newsk = sk->sk_prot->accept(sk, flags, &err);
+ newsk = sk->sk_prot->accept(sk, flags, &err, kern);
if (!newsk)
return err;
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 0e04dcceb1d416..1fa75ab7b73323 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn)
*/
rds_cong_remove_conn(conn);
+ put_net(conn->c_net);
kmem_cache_free(rds_conn_slab, conn);
spin_lock_irqsave(&rds_conn_lock, flags);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ce3775abc6e7a1..1c38d2c7caa8e9 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
+ goto rds_ibdev_out;
}
ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_recv_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
rdsdebug("ib_create_cq recv failed: %d\n", ret);
- goto out;
+ goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
- goto out;
+ goto qp_out;
}
ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
+ goto send_hdrs_dma_out;
}
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
- goto out;
+ goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
- goto out;
+ goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
- goto out;
+ goto sends_out;
}
rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
-out:
+ return ret;
+
+sends_out:
+ vfree(ic->i_sends);
+ack_dma_out:
+ ib_dma_free_coherent(dev, sizeof(struct rds_header),
+ ic->i_ack, ic->i_ack_dma);
+recv_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+send_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_send_hdrs, ic->i_send_hdrs_dma);
+qp_out:
+ rdma_destroy_qp(ic->i_cm_id);
+recv_cq_out:
+ if (!ib_destroy_cq(ic->i_recv_cq))
+ ic->i_recv_cq = NULL;
+send_cq_out:
+ if (!ib_destroy_cq(ic->i_send_cq))
+ ic->i_send_cq = NULL;
+rds_ibdev_out:
+ rds_ib_remove_conn(rds_ibdev, conn);
rds_ib_dev_put(rds_ibdev);
+
return ret;
}
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 39518ef7af4dfb..82d38ccf5e8bcf 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -147,7 +147,7 @@ struct rds_connection {
/* Protocol version */
unsigned int c_version;
- possible_net_t c_net;
+ struct net *c_net;
struct list_head c_map_item;
unsigned long c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
static inline
struct net *rds_conn_net(struct rds_connection *conn)
{
- return read_pnet(&conn->c_net);
+ return conn->c_net;
}
static inline
void rds_conn_net_set(struct rds_connection *conn, struct net *net)
{
- write_pnet(&conn->c_net, net);
+ conn->c_net = get_net(net);
}
#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index a973d3b4dff0b2..22569007677357 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
* we do need to clean up the listen socket here.
*/
if (rtn->rds_tcp_listen_sock) {
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
+
rtn->rds_tcp_listen_sock = NULL;
- flush_work(&rtn->rds_tcp_accept_w);
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
}
}
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
rtn->rds_tcp_listen_sock = NULL;
- flush_work(&rtn->rds_tcp_accept_w);
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ struct net *c_net = tc->t_cpath->cp_conn->c_net;
if (net != c_net || !tc->t_sock)
continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
void *rds_tcp_listen_sock_def_readable(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
+
+ if (!lsock)
+ return NULL;
- return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+ return lsock->sk->sk_user_data;
}
static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
- struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ struct net *c_net = tc->t_cpath->cp_conn->c_net;
if (net != c_net || !tc->t_sock)
continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
goto out;
}
- ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
- if (ret) {
- pr_warn("could not register rds_tcp_dev_notifier\n");
+ ret = rds_tcp_recv_init();
+ if (ret)
goto out_slab;
- }
ret = register_pernet_subsys(&rds_tcp_net_ops);
if (ret)
- goto out_notifier;
+ goto out_recv;
- ret = rds_tcp_recv_init();
- if (ret)
+ ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+ if (ret) {
+ pr_warn("could not register rds_tcp_dev_notifier\n");
goto out_pernet;
+ }
rds_trans_register(&rds_tcp_transport);
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
-out_notifier:
- if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
- pr_warn("could not unregister rds_tcp_dev_notifier\n");
+out_recv:
+ rds_tcp_recv_exit();
out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc890657679..56ea6620fcf97c 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
struct socket *rds_tcp_listen_init(struct net *);
-void rds_tcp_listen_stop(struct socket *);
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
void rds_tcp_listen_data_ready(struct sock *sk);
int rds_tcp_accept_one(struct socket *sock);
int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 67d0929c7d3d0c..507678853e6cb3 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
new_sock->type = sock->type;
new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
if (ret < 0)
goto out;
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
* before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening
* socket
+ *
+ * (*ready)() may be null if we are racing with netns delete, and
+ * the listen socket is being torn down.
*/
if (sk->sk_state == TCP_LISTEN)
rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
out:
read_unlock_bh(&sk->sk_callback_lock);
- ready(sk);
+ if (ready)
+ ready(sk);
}
struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
return NULL;
}
-void rds_tcp_listen_stop(struct socket *sock)
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
{
struct sock *sk;
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
+ flush_work(acceptor);
sock_release(sock);
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b8a1df2c978532..4a972925702367 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -871,7 +871,8 @@ out_release:
return err;
}
-static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
+static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9f4cfa25af7c92..18b2ad8be8e2b5 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ enum rxrpc_call_state state;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int ix;
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
_proto("Rx DATA %%%u { #%u f=%02x }",
sp->hdr.serial, seq, sp->hdr.flags);
- if (call->state >= RXRPC_CALL_COMPLETE)
+ state = READ_ONCE(call->state);
+ if (state >= RXRPC_CALL_COMPLETE)
return;
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
- if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
- call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
+ if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+ state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
return;
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
+ bool wake = false;
u32 rwind = ntohl(ackinfo->rwind);
_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
- call->tx_winsize = rwind;
+ if (call->tx_winsize != rwind) {
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (rwind > call->tx_winsize)
+ wake = true;
+ call->tx_winsize = rwind;
+ }
+
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
@@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
spin_unlock_bh(&peer->lock);
_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
}
+
+ if (wake)
+ wake_up(&call->waitq);
}
/*
@@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
return rxrpc_proto_abort("AK0", call, 0);
/* Ignore ACKs unless we are or have just been transmitting. */
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
break;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 6491ca46a03fda..3e2f1a8e9c5b51 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -527,7 +527,7 @@ try_again:
msg->msg_namelen = len;
}
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_ACCEPTING:
ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
mutex_lock(&call->user_mutex);
- switch (call->state) {
+ switch (READ_ONCE(call->state)) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST:
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bc2d3dcff9de76..97ab214ca4118d 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
+ enum rxrpc_call_state state;
enum rxrpc_command cmd;
struct rxrpc_call *call;
unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
return PTR_ERR(call);
/* ... and we have the call lock. */
} else {
- ret = -EBUSY;
- if (call->state == RXRPC_CALL_UNINITIALISED ||
- call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||
- call->state == RXRPC_CALL_SERVER_PREALLOC ||
- call->state == RXRPC_CALL_SERVER_SECURING ||
- call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ switch (READ_ONCE(call->state)) {
+ case RXRPC_CALL_UNINITIALISED:
+ case RXRPC_CALL_CLIENT_AWAIT_CONN:
+ case RXRPC_CALL_SERVER_PREALLOC:
+ case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_ACCEPTING:
+ ret = -EBUSY;
goto error_release_sock;
+ default:
+ break;
+ }
ret = mutex_lock_interruptible(&call->user_mutex);
release_sock(&rx->sk);
@@ -542,10 +547,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
}
}
+ state = READ_ONCE(call->state);
_debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
+ call->debug_id, call->user_call_ID, state, call->conn);
- if (call->state >= RXRPC_CALL_COMPLETE) {
+ if (state >= RXRPC_CALL_COMPLETE) {
/* it's too late for this call */
ret = -ESHUTDOWN;
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
@@ -555,12 +561,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
} else if (cmd != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else if (rxrpc_is_client_call(call) &&
- call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+ state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
/* request phase complete for this client call */
ret = -EPROTO;
} else if (rxrpc_is_service_call(call) &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ state != RXRPC_CALL_SERVER_SEND_REPLY) {
/* Reply phase not begun or not complete for service call. */
ret = -EPROTO;
} else {
@@ -605,14 +611,21 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
_debug("CALL %d USR %lx ST %d on CONN %p",
call->debug_id, call->user_call_ID, call->state, call->conn);
- if (call->state >= RXRPC_CALL_COMPLETE) {
- ret = -ESHUTDOWN; /* it's too late for this call */
- } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
- ret = -EPROTO; /* request phase complete for this client call */
- } else {
+ switch (READ_ONCE(call->state)) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ case RXRPC_CALL_SERVER_SEND_REPLY:
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
+ break;
+ case RXRPC_CALL_COMPLETE:
+ read_lock_bh(&call->state_lock);
+ ret = -call->error;
+ read_unlock_bh(&call->state_lock);
+ break;
+ default:
+ /* Request phase complete for this client call */
+ ret = -EPROTO;
+ break;
}
mutex_unlock(&call->user_mutex);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ab80629099622c..f9bb43c25697e7 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
if (ret < 0)
return ret;
+ if (!tb[TCA_CONNMARK_PARMS])
+ return -EINVAL;
+
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3b7074e2302487..c736627f8f4a0e 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
return skb->len;
nla_put_failure:
- rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 063baac5b9fe40..961ee59f696a0b 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
/* Create and initialize a new sk for the socket to be returned by accept(). */
static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
- struct sctp_association *asoc)
+ struct sctp_association *asoc,
+ bool kern)
{
struct sock *newsk;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
struct ipv6_txoptions *opt;
- newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
+ newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
if (!newsk)
goto out;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1b6d4574d2b02a..989a900383b57c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
/* Create and initialize a new sk for the socket returned by accept(). */
static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
- struct sctp_association *asoc)
+ struct sctp_association *asoc,
+ bool kern)
{
struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
- sk->sk_prot, 0);
+ sk->sk_prot, kern);
struct inet_sock *newinet;
if (!newsk)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6f0a9be50f5055..0f378ea2ae3882 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4116,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
-static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
{
struct sctp_sock *sp;
struct sctp_endpoint *ep;
@@ -4151,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
*/
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
- newsk = sp->pf->create_accept_sk(sk, asoc);
+ newsk = sp->pf->create_accept_sk(sk, asoc, kern);
if (!newsk) {
error = -ENOMEM;
goto out;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 85837ab90e8916..093803786eacf3 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -944,7 +944,7 @@ out:
}
static int smc_accept(struct socket *sock, struct socket *new_sock,
- int flags)
+ int flags, bool kern)
{
struct sock *sk = sock->sk, *nsk;
DECLARE_WAITQUEUE(wait, current);
diff --git a/net/socket.c b/net/socket.c
index 2c1e8677ff2d4f..e034fe4164beec 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1506,7 +1506,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
if (err)
goto out_fd;
- err = sock->ops->accept(sock, newsock, sock->file->f_flags);
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
if (err < 0)
goto out_fd;
@@ -1731,6 +1731,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
/* We assume all kernel code knows the size of sockaddr_storage */
msg.msg_namelen = 0;
msg.msg_iocb = NULL;
+ msg.msg_flags = 0;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3239,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
if (err < 0)
goto done;
- err = sock->ops->accept(sock, *newsock, flags);
+ err = sock->ops->accept(sock, *newsock, flags, true);
if (err < 0) {
sock_release(*newsock);
*newsock = NULL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43e4045e72bc00..7130e73bd42c21 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern);
static void tipc_sk_timeout(unsigned long data);
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
*
* Returns 0 on success, errno otherwise
*/
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
if (res)
goto exit;
security_sk_clone(sock->sk, new_sock->sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ee37b390260a62..928691c434087e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
-static int unix_accept(struct socket *, struct socket *, int);
+static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
set_bit(SOCK_PASSSEC, &new->flags);
}
-static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sock *tsk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9192ead6675114..9f770f33c10098 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1250,7 +1250,8 @@ out:
return err;
}
-static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *listener;
int err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fd28a49dbe8f0c..8b911c29860e79 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
return rc;
}
-static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
+static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
{
struct sock *sk = sock->sk;
struct sock *newsk;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0806dccdf50784..236cbbc0ab9cff 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
- const struct flowi *fl)
+ const struct flowi *fl, u16 family)
{
struct xfrm_policy *pol;
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
- bool match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl, family);
int err = 0;
if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
- pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
{
int rv;
+ /* Initialize the per-net locks here */
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
rv = xfrm_statistics_init(net);
if (rv < 0)
goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
if (rv < 0)
goto out;
- /* Initialize the per-net locks here */
- spin_lock_init(&net->xfrm.xfrm_state_lock);
- spin_lock_init(&net->xfrm.xfrm_policy_lock);
- mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
return 0;
out:
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 00000000000000..0674272598205c
--- /dev/null
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+ struct pt_regs regs;
+ __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 4b498265dae6dc..67531f47781b40 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,12 +1,14 @@
LIBDIR := ../../../lib
BPFOBJ := $(LIBDIR)/bpf/bpf.o
-CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
+CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
TEST_PROGS := test_kmod.sh
+all: $(TEST_GEN_PROGS)
+
.PHONY: all clean force
# force a rebuild of BPFOBJ when its dependencies are updated
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e1f5b9eea1e874..d1555e4240c0fb 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,8 @@
* License as published by the Free Software Foundation.
*/
+#include <asm/types.h>
+#include <linux/types.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
@@ -4583,10 +4585,12 @@ static bool is_admin(void)
cap_flag_value_t sysadmin = CAP_CLEAR;
const cap_value_t cap_val = CAP_SYS_ADMIN;
+#ifdef CAP_IS_SUPPORTED
if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
perror("cap_get_flag");
return false;
}
+#endif
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");