Commit 291abfea authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris.

 2) Fix thermal zone registration in iwlwifi, from Andrei
    Otcheretianski.

 3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer.

 4) Use after free in mptcp, from Florian Westphal.

 5) Use after free in wireguard's root_remove_peer_lists, from Eric
    Dumazet.

 6) Properly access packets heads in bonding alb code, from Eric
    Dumazet.

 7) Fix data race in skb_queue_len(), from Qian Cai.

 8) Fix regression in r8169 on some chips, from Heiner Kallweit.

 9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang.

10) Certain kinds of set link netlink operations can cause a NULL deref
    in the ipv6 addrconf code. Fix from Eric Dumazet.

11) Don't cancel uninitialized work queue in drop monitor, from Ido
    Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
  net: thunderx: use proper interface type for RGMII
  mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap
  bpf: Improve bucket_log calculation logic
  selftests/bpf: Test freeing sockmap/sockhash with a socket in it
  bpf, sockhash: Synchronize_rcu before free'ing map
  bpf, sockmap: Don't sleep while holding RCU lock on tear-down
  bpftool: Don't crash on missing xlated program instructions
  bpf, sockmap: Check update requirements after locking
  drop_monitor: Do not cancel uninitialized work item
  mlxsw: spectrum_dpipe: Add missing error path
  mlxsw: core: Add validation of hardware device types for MGPIR register
  mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort
  selftests: mlxsw: Add test cases for local table route replacement
  mlxsw: spectrum_router: Prevent incorrect replacement of local table routes
  net: dsa: microchip: enable module autoprobe
  ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
  dpaa_eth: support all modes with rate adapting PHYs
  net: stmmac: update pci platform data to use phy_interface
  net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter
  net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter
  ...
parents d4f309ca 29ca3b31
...@@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
bool do_tx_balance = true; bool do_tx_balance = true;
u32 hash_index = 0; u32 hash_index = 0;
const u8 *hash_start = NULL; const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
eth_data = eth_hdr(skb); eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) { switch (ntohs(skb->protocol)) {
case ETH_P_IP: { case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph;
if (is_broadcast_ether_addr(eth_data->h_dest) || if (is_broadcast_ether_addr(eth_data->h_dest) ||
iph->daddr == ip_bcast || !pskb_network_may_pull(skb, sizeof(*iph))) {
iph->protocol == IPPROTO_IGMP) { do_tx_balance = false;
break;
}
iph = ip_hdr(skb);
if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false; do_tx_balance = false;
break; break;
} }
hash_start = (char *)&(iph->daddr); hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr); hash_size = sizeof(iph->daddr);
}
break; break;
case ETH_P_IPV6: }
case ETH_P_IPV6: {
const struct ipv6hdr *ip6hdr;
/* IPv6 doesn't really use broadcast mac address, but leave /* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case. * that here just in case.
*/ */
...@@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break; break;
} }
/* Additianally, DAD probes should not be tx-balanced as that if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
do_tx_balance = false;
break;
}
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and * will lead to false positives for duplicate addresses and
* prevent address configuration from working. * prevent address configuration from working.
*/ */
...@@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break; break;
} }
hash_start = (char *)&(ipv6_hdr(skb)->daddr); hash_start = (char *)&ip6hdr->daddr;
hash_size = sizeof(ipv6_hdr(skb)->daddr); hash_size = sizeof(ip6hdr->daddr);
break; break;
case ETH_P_IPX: }
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { case ETH_P_IPX: {
const struct ipxhdr *ipxhdr;
if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
do_tx_balance = false;
break;
}
ipxhdr = (struct ipxhdr *)skb_network_header(skb);
if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */ /* something is wrong with this packet */
do_tx_balance = false; do_tx_balance = false;
break; break;
} }
if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in /* The only protocol worth balancing in
* this family since it has an "ARP" like * this family since it has an "ARP" like
* mechanism * mechanism
...@@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break; break;
} }
eth_data = eth_hdr(skb);
hash_start = (char *)eth_data->h_dest; hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN; hash_size = ETH_ALEN;
break; break;
}
case ETH_P_ARP: case ETH_P_ARP:
do_tx_balance = false; do_tx_balance = false;
if (bond_info->rlb_enabled) if (bond_info->rlb_enabled)
......
...@@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds) ...@@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR); b53_do_vlan_op(dev, VTA_CMD_CLEAR);
} }
b53_enable_vlan(dev, false, ds->vlan_filtering); b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
b53_for_each_port(dev, i) b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE, b53_write16(dev, B53_VLAN_PAGE,
......
...@@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) ...@@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */ /* Force link status for IMP port */
reg = core_readl(priv, offset); reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); reg |= (MII_SW_OR | LINK_STS);
if (priv->type == BCM7278_DEVICE_ID)
reg |= GMII_SPEED_UP_2G;
core_writel(priv, reg, offset); core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
......
...@@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = { ...@@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
module_spi_driver(ksz9477_spi_driver); module_spi_driver(ksz9477_spi_driver);
MODULE_ALIAS("spi:ksz9477");
MODULE_ALIAS("spi:ksz9897");
MODULE_ALIAS("spi:ksz9893");
MODULE_ALIAS("spi:ksz9563");
MODULE_ALIAS("spi:ksz8563");
MODULE_ALIAS("spi:ksz9567");
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) ...@@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
umac_reset(priv); umac_reset(priv);
/* Disable the UniMAC RX/TX */
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
/* We may have been suspended and never received a WOL event that /* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now * would turn off MPD detection, take care of that now
*/ */
......
...@@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt { ...@@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
/* Max length of transmit frame must be a multiple of 8 bytes */ /* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8 #define MACB_TX_LEN_ALIGN 8
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
* false amba_error in TX path from the DMA assuming there is not enough
* space in the SRAM (16KB) even when there is.
*/
#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO #define MACB_NETIF_LSO NETIF_F_TSO
...@@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb, ...@@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
/* Validate LSO compatibility */ /* Validate LSO compatibility */
/* there is only one buffer */ /* there is only one buffer or protocol is not UDP */
if (!skb_is_nonlinear(skb)) if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
return features; return features;
/* length of header */ /* length of header */
hdrlen = skb_transport_offset(skb); hdrlen = skb_transport_offset(skb);
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdrlen += tcp_hdrlen(skb);
/* For LSO: /* For UFO only:
* When software supplies two or more payload buffers all payload buffers * When software supplies two or more payload buffers all payload buffers
* apart from the last must be a multiple of 8 bytes in size. * apart from the last must be a multiple of 8 bytes in size.
*/ */
......
...@@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type) ...@@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
if (lmac_type == BGX_MODE_QSGMII) if (lmac_type == BGX_MODE_QSGMII)
return PHY_INTERFACE_MODE_QSGMII; return PHY_INTERFACE_MODE_QSGMII;
if (lmac_type == BGX_MODE_RGMII) if (lmac_type == BGX_MODE_RGMII)
return PHY_INTERFACE_MODE_RGMII; return PHY_INTERFACE_MODE_RGMII_RXID;
return PHY_INTERFACE_MODE_SGMII; return PHY_INTERFACE_MODE_SGMII;
} }
......
...@@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v) ...@@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.fallback)); atomic_read(&adap->chcr_stats.fallback));
seq_printf(seq, "IPSec PDU: %10u\n", seq_printf(seq, "IPSec PDU: %10u\n",
atomic_read(&adap->chcr_stats.ipsec_cnt)); atomic_read(&adap->chcr_stats.ipsec_cnt));
seq_printf(seq, "TLS PDU Tx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_tx));
seq_printf(seq, "TLS PDU Rx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
return 0; return 0;
} }
DEFINE_SHOW_ATTRIBUTE(chcr_stats); DEFINE_SHOW_ATTRIBUTE(chcr_stats);
......
...@@ -417,7 +417,10 @@ static void de_rx (struct de_private *de) ...@@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
if (status & DescOwn) if (status & DescOwn)
break; break;
len = ((status >> 16) & 0x7ff) - 4; /* the length is actually a 15 bit value here according
* to Table 4-1 in the DE2104x spec so mask is 0x7fff
*/
len = ((status >> 16) & 0x7fff) - 4;
mapping = de->rx_skb[rx_tail].mapping; mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) { if (unlikely(drop)) {
......
...@@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev) ...@@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
mac_dev->adjust_link(mac_dev); mac_dev->adjust_link(mac_dev);
} }
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
static int dpaa_phy_init(struct net_device *net_dev) static int dpaa_phy_init(struct net_device *net_dev)
{ {
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
...@@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev) ...@@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV; return -ENODEV;
} }
/* Remove any features not supported by the controller */ /* Unless the PHY is capable of rate adaptation */
ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
linkmode_and(phy_dev->supported, phy_dev->supported, mask); ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
linkmode_and(phy_dev->supported, phy_dev->supported, mask);
}
phy_support_asym_pause(phy_dev); phy_support_asym_pause(phy_dev);
......
...@@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) ...@@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
struct i40e_ring *ring; struct i40e_ring *ring;
if (test_bit(__I40E_CONFIG_BUSY, pf->state)) if (test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENETDOWN; return -EAGAIN;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN; return -ENETDOWN;
......
...@@ -401,6 +401,8 @@ struct mvneta_pcpu_stats { ...@@ -401,6 +401,8 @@ struct mvneta_pcpu_stats {
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 tx_packets; u64 tx_packets;
u64 tx_bytes; u64 tx_bytes;
}; };
...@@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev, ...@@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
struct mvneta_pcpu_stats *cpu_stats; struct mvneta_pcpu_stats *cpu_stats;
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 tx_packets; u64 tx_packets;
u64 tx_bytes; u64 tx_bytes;
...@@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev, ...@@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets; rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes; rx_bytes = cpu_stats->rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets; tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes; tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets; stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes; stats->rx_bytes += rx_bytes;
stats->rx_dropped += rx_dropped;
stats->rx_errors += rx_errors;
stats->tx_packets += tx_packets; stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes; stats->tx_bytes += tx_bytes;
} }
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped; stats->tx_dropped = dev->stats.tx_dropped;
} }
...@@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, ...@@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
static void mvneta_rx_error(struct mvneta_port *pp, static void mvneta_rx_error(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc) struct mvneta_rx_desc *rx_desc)
{ {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u32 status = rx_desc->status; u32 status = rx_desc->status;
/* update per-cpu counter */
u64_stats_update_begin(&stats->syncp);
stats->rx_errors++;
u64_stats_update_end(&stats->syncp);
switch (status & MVNETA_RXD_ERR_CODE_MASK) { switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC: case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
...@@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, ...@@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (unlikely(!rxq->skb)) { if (unlikely(!rxq->skb)) {
netdev_err(dev, struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
"Can't allocate skb on queue %d\n",
rxq->id); netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
dev->stats.rx_dropped++;
rxq->skb_alloc_err++; rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
return -ENOMEM; return -ENOMEM;
} }
page_pool_release_page(rxq->page_pool, page); page_pool_release_page(rxq->page_pool, page);
...@@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Check errors only for FIRST descriptor */ /* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) { if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc); mvneta_rx_error(pp, rx_desc);
dev->stats.rx_errors++;
/* leave the descriptor untouched */ /* leave the descriptor untouched */
continue; continue;
} }
...@@ -2372,7 +2386,6 @@ err_drop_frame_ret_pool: ...@@ -2372,7 +2386,6 @@ err_drop_frame_ret_pool:
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr); rx_desc->buf_phys_addr);
err_drop_frame: err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc); mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */ /* leave the descriptor untouched */
continue; continue;
......
...@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); ...@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{ {
if (!MLX5_CAP_GEN(mdev, tls)) if (!MLX5_CAP_GEN(mdev, tls_tx))
return false; return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek)) if (!MLX5_CAP_GEN(mdev, log_max_dek))
......
...@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen; int datalen;
u32 skb_seq; u32 skb_seq;
if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out; goto out;
} }
......
...@@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
do { do {
struct mlx5e_sq_wqe_info *wi; struct mlx5e_sq_wqe_info *wi;
u16 ci; u16 ci;
...@@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.ico_wqe[ci]; wi = &sq->db.ico_wqe[ci];
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
if (likely(wi->opcode == MLX5_OPCODE_UMR)) { if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
sqcc += MLX5E_UMR_WQEBBS; sqcc += MLX5E_UMR_WQEBBS;
wi->umr.rq->mpwqe.umr_completed++; wi->umr.rq->mpwqe.umr_completed++;
......
...@@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
i = 0; i = 0;
do { do {
struct mlx5e_tx_wqe_info *wi;
u16 wqe_counter; u16 wqe_counter;
bool last_wqe; bool last_wqe;
u16 ci;
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
struct mlx5e_tx_wqe_info *wi;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
&sq->recover_work);
}
stats->cqe_err++;
}
do { do {
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb; struct sk_buff *skb;
u16 ci;