Commit d7c5303f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'net-5.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Networking fixes for 5.13-rc4, including fixes from bpf, netfilter,
  can and wireless trees. Notably including fixes for the recently
  announced "FragAttacks" WiFi vulnerabilities. Rather large batch,
  touching some core parts of the stack, too, but nothing hair-raising.

  Current release - regressions:

   - tipc: make node link identity publish thread safe

   - dsa: felix: re-enable TAS guard band mode

   - stmmac: correct clocks enabled in stmmac_vlan_rx_kill_vid()

   - stmmac: fix system hang if change mac address after interface
     ifdown

  Current release - new code bugs:

   - mptcp: avoid OOB access in setsockopt()

   - bpf: Fix nested bpf_bprintf_prepare with more per-cpu buffers

   - ethtool: stats: fix a copy-paste error - init correct array size

  Previous releases - regressions:

   - sched: fix packet stuck problem for lockless qdisc

   - net: really orphan skbs tied to closing sk

   - mlx4: fix EEPROM dump support

   - bpf: fix alu32 const subreg bound tracking on bitwise operations

   - bpf: fix mask direction swap upon off reg sign change

   - bpf, offload: reorder offload callback 'prepare' in verifier

   - stmmac: Fix MAC WoL not working if PHY does not support WoL

   - packetmmap: fix only tx timestamp on request

   - tipc: skb_linearize the head skb when reassembling msgs

  Previous releases - always broken:

   - mac80211: address recent "FragAttacks" vulnerabilities

   - mac80211: do not accept/forward invalid EAPOL frames

   - mptcp: avoid potential error message floods

   - bpf, ringbuf: deny reserve of buffers larger than ringbuf to
     prevent out of buffer writes

   - bpf: forbid trampoline attach for functions with variable arguments

   - bpf: add deny list of functions to prevent inf recursion of tracing
     programs

   - tls splice: check SPLICE_F_NONBLOCK instead of MSG_DONTWAIT

   - can: isotp: prevent race between isotp_bind() and
     isotp_setsockopt()

   - netfilter: nft_set_pipapo_avx2: Add irq_fpu_usable() check,
     fallback to non-AVX2 version

  Misc:

   - bpf: add kconfig knob for disabling unpriv bpf by default"

* tag 'net-5.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (172 commits)
  net: phy: Document phydev::dev_flags bits allocation
  mptcp: validate 'id' when stopping the ADD_ADDR retransmit timer
  mptcp: avoid error message on infinite mapping
  mptcp: drop unconditional pr_warn on bad opt
  mptcp: avoid OOB access in setsockopt()
  nfp: update maintainer and mailing list addresses
  net: mvpp2: add buffer header handling in RX
  bnx2x: Fix missing error code in bnx2x_iov_init_one()
  net: zero-initialize tc skb extension on allocation
  net: hns: Fix kernel-doc
  sctp: fix the proc_handler for sysctl encap_port
  sctp: add the missing setting for asoc encap_port
  bpf, selftests: Adjust few selftest result_unpriv outcomes
  bpf: No need to simulate speculative domain for immediates
  bpf: Fix mask direction swap upon off reg sign change
  bpf: Wrap aux data inside bpf_sanitize_info container
  bpf: Fix BPF_LSM kconfig symbol dependency
  selftests/bpf: Add test for l3 use of bpf_redirect_peer
  bpftool: Add sock_release help info for cgroup attach/prog load command
  net: dsa: microchip: enable phy errata workaround on 9567
  ...
parents 7ac3a1c1 62f3415d
......@@ -1458,11 +1458,22 @@ unprivileged_bpf_disabled
=========================
Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
``-EPERM``.
once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
will return ``-EPERM``. Once set to 1, this can't be cleared from the
running kernel anymore.
Once set, this can't be cleared.
Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
however, an admin can still change this setting later on, if needed, by
writing 0 or 1 to this entry.
If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
entry will default to 2 instead of 0.
= =============================================================
0 Unprivileged calls to ``bpf()`` are enabled
1 Unprivileged calls to ``bpf()`` are disabled without recovery
2 Unprivileged calls to ``bpf()`` are disabled
= =============================================================
watchdog
========
......
......@@ -10,7 +10,7 @@ allOf:
- $ref: ethernet-controller.yaml#
maintainers:
- Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
- Sergei Shtylyov <sergei.shtylyov@gmail.com>
properties:
compatible:
......
......@@ -4138,6 +4138,14 @@ S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
 
CBS/ETF/TAPRIO QDISCS
M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
S: Maintained
L: netdev@vger.kernel.org
F: net/sched/sch_cbs.c
F: net/sched/sch_etf.c
F: net/sched/sch_taprio.c
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
......@@ -5569,7 +5577,6 @@ F: drivers/soc/fsl/dpio
 
DPAA2 ETHERNET DRIVER
M: Ioana Ciornei <ioana.ciornei@nxp.com>
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
......@@ -12681,9 +12688,9 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h
 
NETRONOME ETHERNET DRIVERS
M: Simon Horman <simon.horman@netronome.com>
M: Simon Horman <simon.horman@corigine.com>
R: Jakub Kicinski <kuba@kernel.org>
L: oss-drivers@netronome.com
L: oss-drivers@corigine.com
S: Maintained
F: drivers/net/ethernet/netronome/
 
......@@ -12710,7 +12717,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
......@@ -12755,7 +12761,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
......@@ -12897,8 +12902,10 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
 
NFC SUBSYSTEM
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
L: linux-nfc@lists.01.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Orphan
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h
......@@ -13206,7 +13213,6 @@ F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
 
NXP-NCI NFC DRIVER
M: Clément Perrochaud <clement.perrochaud@effinnov.com>
R: Charles Gorand <charles.gorand@effinnov.com>
L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Supported
......@@ -15945,6 +15951,7 @@ S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/*iucv*
......@@ -15955,6 +15962,7 @@ S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/
......
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/
obj-$(CONFIG_NET) += net/
obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/
......@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0))
ent->limit = dev->mdev->profile->mr_cache[i].limit;
ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
spin_lock_irq(&ent->lock);
......
......@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
......
......@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
break;
}
dev->base_addr = ioaddr;
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
......@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
goto err_out;
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
......
......@@ -1526,6 +1526,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
slave->bond = bond;
slave->dev = slave_dev;
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
......@@ -1538,7 +1539,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
return NULL;
}
}
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
......
......@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
u32 reg_rgmii_ctrl;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
......@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
......
......@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.phy_errata_9477 = true,
},
};
......
......@@ -1262,14 +1262,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
/* The real fabric path would be decided on the membership in the
* entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
* means potential VLAN can be consisting of certain subset of all
* ports.
*/
mt7530_rmw(priv, MT7530_PCR_P(port),
PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged
* frames pass through.
......
......@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
/* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
* guard band to be implemented for nonschedule queues to schedule
* queues transition.
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
* guard band which reserves the time of a maximum sized frame at the
* end of the time window.
*
* Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
* need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
* operate on the port number.
*/
ocelot_rmw(ocelot,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
......
......@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4,
};
/* Command and entry overlap */
static void
sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
......@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
/* Command and entry are separate */
static void
sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 9, 0, size, op);
}
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
......@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
......@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
......
......@@ -26,6 +26,7 @@
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops;
......@@ -207,6 +208,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
default:
dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode));
return -EINVAL;
}
/* Even though the SerDes port is able to drive SGMII autoneg
......@@ -321,6 +323,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0;
}
/* Set up a default VLAN for untagged traffic injected from the CPU
* using management routes (e.g. STP, PTP) as opposed to tag_8021q.
* All DT-defined ports are members of this VLAN, and there are no
* restrictions on forwarding (since the CPU selects the destination).
* Frames from this VLAN will always be transmitted as untagged, and
* neither the bridge nor the 8021q module cannot create this VLAN ID.
*/
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
......@@ -330,17 +339,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
.vlanid = 1,
.vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
/* The static VLAN table will only contain the initial pvid of 1.
* All other VLANs are to be configured through dynamic entries,
* and kept in the static configuration table as backing memory.
*/
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
......@@ -353,9 +358,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
/* VLAN 1: all DT-defined ports are members; no restrictions on
* forwarding; always transmit as untagged.
*/
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v;
......@@ -366,15 +368,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
/* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
* transmitted as untagged.
*/
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
v->port = port;
v->vid = 1;
v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true;
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
......@@ -2817,11 +2816,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v;
list_for_each_entry(v, vlan_list, list)
if (v->port == port && v->vid == vid &&
v->untagged == untagged && v->pvid == pvid)
list_for_each_entry(v, vlan_list, list) {
if (v->port == port && v->vid == vid) {
/* Already added */
return 0;
if (v->untagged == untagged && v->pvid == pvid)
/* Nothing changed */
return 0;
/* It's the same VLAN, but some of the flags changed
* and the user did not bother to delete it first.
* Update it and trigger sja1105_build_vlan_table.
*/
v->untagged = untagged;
v->pvid = pvid;
return 1;
}
}
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
......@@ -2976,13 +2986,13 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_static_config_load(priv, ports);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
return rc;
goto out_ptp_clock_unregister;
}
/* Configure the CGU (PHY link modes and speeds) */
rc = sja1105_clocking_setup(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
return rc;
goto out_static_config_free;
}
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
......@@ -3003,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds);
if (rc < 0)
return rc;
goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
......@@ -3012,6 +3022,17 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock();
if (rc)
goto out_devlink_teardown;
return 0;
out_devlink_teardown:
sja1105_devlink_teardown(ds);
out_ptp_clock_unregister:
sja1105_ptp_clock_unregister(ds);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
return rc;
}
......@@ -3646,8 +3667,10 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
if (!priv->cbs)
return -ENOMEM;
if (!priv->cbs) {
rc = -ENOMEM;
goto out_unregister_switch;
}
}
/* Connections between dsa_port and sja1105_port */
......@@ -3672,7 +3695,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
goto out;
goto out_destroy_workers;
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
......@@ -3682,7 +3705,8 @@ static int sja1105_probe(struct spi_device *spi)
}
return 0;
out:
out_destroy_workers:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
......@@ -3691,6 +3715,10 @@ static int sja1105_probe(struct spi_device *spi)
kthread_destroy_worker(sp->xmit_worker);
}
out_unregister_switch:
dsa_unregister_switch(ds);
return rc;
}
......
......@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
rc = -EPERM;
goto err_out_unmap;
}
......
......@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0)
if (iov->total == 0) {
err = -EINVAL;
goto failed;
}
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
......
......@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
......@@ -6932,17 +6933,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
u8 pg_size = 0;
if (!rmem->nr_pages)
return;
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
pg_size = 2 << 4;
*pg_attr = pg_size;
BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
......@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
int hdr_count = 0;
u8 *nexthdr;
int start;
/* Check that there are at most 2 IPv6 extension headers, no
* fragment header, and each is <= 64 bytes.
*/
start = nw_off + sizeof(*ip6h);
nexthdr = &ip6h->nexthdr;
while (ipv6_ext_hdr(*nexthdr)) {
struct ipv6_opt_hdr *hp;
int hdrlen;
if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
*nexthdr == NEXTHDR_FRAGMENT)
return false;
hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
skb_headlen(skb), NULL);
if (!hp)
return false;
if (*nexthdr == NEXTHDR_AUTH)
hdrlen = ipv6_authlen(hp);
else
hdrlen = ipv6_optlen(hp);
if (hdrlen > 64)
return false;
nexthdr = &hp->nexthdr;
start += hdrlen;
hdr_count++;
}
if (nextp) {
/* Caller will check inner protocol */
if (skb->encapsulation) {
*nextp = nexthdr;
return true;
}
*nextp = NULL;
}
/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
}
/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
{
struct udphdr *uh = udp_hdr(skb);
__be16 udp_port = uh->dest;
if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
return false;
if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
struct ethhdr *eh = inner_eth_hdr(skb);
switch (eh->h_proto) {
case htons(ETH_P_IP):
return true;
case htons(ETH_P_IPV6):