mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
Including fixes from Netfilter and IPsec.
Current release - regressions: - netfilter: nft_set_pipapo: - don't return bogus extension pointer - fix null deref for empty set Current release - new code bugs: - core: prevent deadlocks when enabling NAPIs with mixed kthread config - eth: netdevsim: Fix wild pointer access in nsim_queue_free(). Previous releases - regressions: - page_pool: allow enabling recycling late, fix false positive warning - sched: ets: use old 'nbands' while purging unused classes - xfrm: - restore GSO for SW crypto - bring back device check in validate_xmit_xfrm - tls: handle data disappearing from under the TLS ULP - ptp: prevent possible ABBA deadlock in ptp_clock_freerun() - eth: bnxt: fill data page pool with frags if PAGE_SIZE > BNXT_RX_PAGE_SIZE - eth: hv_netvsc: fix panic during namespace deletion with VF Previous releases - always broken: - netfilter: fix refcount leak on table dump - vsock: do not allow binding to VMADDR_PORT_ANY - sctp: linearize cloned gso packets in sctp_rcv - eth: hibmcge: fix the division by zero issue - eth: microchip: fix KSZ8863 reset problem Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmidxhgSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkPckP/AhJ0kARPgo72OhElbW6KvkHhXVNUTJb 6m15j9Z8ybQPBupSorxUEBx7pxczv/GBloN1xoJqUY/p7B6kLO2HDOpaReNFjZvF J9hPl6ZF6CWzwgfcUfwI3UB1zhALKyHDClclfd8FBoKjAYXCrZXuPv/AV4oqYsA1 y7g24zpI76Cu+M+Nf5YhrlIVSmQ1/DXX8gifdcHFYnAKmCn7KxNv2lwvm2/yE2lL 9/Xl/D1cG/BiAaCUUXR4BP8RU5gdW72+lM3qmC/QFO/7jcBaoE89Y9Anona8p0PQ dqDerHd0GDUH9QA6bht3asCS+IW+Zo2gf25o53OzlYIMAxDmEZLUBCwetJhvNJBq DUQ6agzfNRxsCnlOc4JhMOqNr7rdU7d+9c9KuZWA/m8KRWdlvTYGJd/qzSlTWOhq s9228dl+4oTb9Mnq8Bqafi42+TImeOyFRW9ZgF8ptjlF0l/lyv6moIrRCmVXppRZ awABNDdG+i004XmAOAeOhjbUT7clLkLr+KEnsfH16qCa2o3dM6rlhvWYp2sucVJf SyRvMdz5VqMLgruefpQS/DuK52UklpRawgvgngzU6UDYQUaxQKToeusMjRU7xUnW hVI1y7/oNH6+r7Zr/iLTLKRR007B+RVC7VSbeMpxmAW+n6puMb+z7RnrJlnFapGM qqqtk2/jItuK =Ydk1 -----END PGP SIGNATURE----- Merge tag 'net-6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from Netfilter and IPsec. Current release - regressions: - netfilter: nft_set_pipapo: - don't return bogus extension pointer - fix null deref for empty set Current release - new code bugs: - core: prevent deadlocks when enabling NAPIs with mixed kthread config - eth: netdevsim: Fix wild pointer access in nsim_queue_free(). Previous releases - regressions: - page_pool: allow enabling recycling late, fix false positive warning - sched: ets: use old 'nbands' while purging unused classes - xfrm: - restore GSO for SW crypto - bring back device check in validate_xmit_xfrm - tls: handle data disappearing from under the TLS ULP - ptp: prevent possible ABBA deadlock in ptp_clock_freerun() - eth: - bnxt: fill data page pool with frags if PAGE_SIZE > BNXT_RX_PAGE_SIZE - hv_netvsc: fix panic during namespace deletion with VF Previous releases - always broken: - netfilter: fix refcount leak on table dump - vsock: do not allow binding to VMADDR_PORT_ANY - sctp: linearize cloned gso packets in sctp_rcv - eth: - hibmcge: fix the division by zero issue - microchip: fix KSZ8863 reset problem" * tag 'net-6.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (54 commits) net: usb: asix_devices: add phy_mask for ax88772 mdio bus net: kcm: Fix race condition in kcm_unattach() selftests: net/forwarding: test purge of active DWRR classes net/sched: ets: use old 'nbands' while purging unused classes bnxt: fill data page pool with frags if PAGE_SIZE > BNXT_RX_PAGE_SIZE netdevsim: Fix wild pointer access in nsim_queue_free(). net: mctp: Fix bad kfree_skb in bind lookup test netfilter: nf_tables: reject duplicate device on updates ipvs: Fix estimator kthreads preferred affinity netfilter: nft_set_pipapo: fix null deref for empty set selftests: tls: test TCP stealing data from under the TLS socket tls: handle data disappearing from under the TLS ULP ptp: prevent possible ABBA deadlock in ptp_clock_freerun() ixgbe: prevent from unwanted interface name changes devlink: let driver opt out of automatic phys_port_name generation net: prevent deadlocks when enabling NAPIs with mixed kthread config net: update NAPI threaded config even for disabled NAPIs selftests: drv-net: don't assume device has only 2 queues docs: Fix name for net.ipv4.udp_child_hash_entries riscv: dts: thead: Add APB clocks for TH1520 GMACs ...
This commit is contained in:
commit
63467137ec
65 changed files with 430 additions and 158 deletions
|
@ -62,11 +62,13 @@ properties:
|
|||
items:
|
||||
- description: GMAC main clock
|
||||
- description: Peripheral registers interface clock
|
||||
- description: APB glue registers interface clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: stmmaceth
|
||||
- const: pclk
|
||||
- const: apb
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
|
@ -88,8 +90,8 @@ examples:
|
|||
compatible = "thead,th1520-gmac", "snps,dwmac-3.70a";
|
||||
reg = <0xe7070000 0x2000>, <0xec003000 0x1000>;
|
||||
reg-names = "dwmac", "apb";
|
||||
clocks = <&clk 1>, <&clk 2>;
|
||||
clock-names = "stmmaceth", "pclk";
|
||||
clocks = <&clk 1>, <&clk 2>, <&clk 3>;
|
||||
clock-names = "stmmaceth", "pclk", "apb";
|
||||
interrupts = <66>;
|
||||
interrupt-names = "macirq";
|
||||
phy-mode = "rgmii-id";
|
||||
|
|
|
@ -1420,7 +1420,7 @@ udp_hash_entries - INTEGER
|
|||
A negative value means the networking namespace does not own its
|
||||
hash buckets and shares the initial networking namespace's one.
|
||||
|
||||
udp_child_ehash_entries - INTEGER
|
||||
udp_child_hash_entries - INTEGER
|
||||
Control the number of hash buckets for UDP sockets in the child
|
||||
networking namespace, which must be set before clone() or unshare().
|
||||
|
||||
|
|
|
@ -12584,10 +12584,9 @@ S: Supported
|
|||
F: drivers/cpufreq/intel_pstate.c
|
||||
|
||||
INTEL PTP DFL ToD DRIVER
|
||||
M: Tianfei Zhang <tianfei.zhang@intel.com>
|
||||
L: linux-fpga@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/ptp/ptp_dfl_tod.c
|
||||
|
||||
INTEL QUADRATURE ENCODER PERIPHERAL DRIVER
|
||||
|
@ -12725,9 +12724,8 @@ S: Maintained
|
|||
F: drivers/platform/x86/intel/wmi/thunderbolt.c
|
||||
|
||||
INTEL WWAN IOSM DRIVER
|
||||
M: M Chetan Kumar <m.chetan.kumar@intel.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/net/wwan/iosm/
|
||||
|
||||
INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY
|
||||
|
@ -15674,7 +15672,6 @@ MEDIATEK T7XX 5G WWAN MODEM DRIVER
|
|||
M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
|
||||
R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
|
||||
R: Liu Haijun <haijun.liu@mediatek.com>
|
||||
R: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
|
||||
R: Ricardo Martinez <ricardo.martinez@linux.intel.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -17451,6 +17448,7 @@ F: drivers/net/ethernet/neterion/
|
|||
NETFILTER
|
||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
M: Jozsef Kadlecsik <kadlec@netfilter.org>
|
||||
M: Florian Westphal <fw@strlen.de>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
L: coreteam@netfilter.org
|
||||
S: Maintained
|
||||
|
|
|
@ -297,8 +297,9 @@
|
|||
reg-names = "dwmac", "apb";
|
||||
interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq";
|
||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>;
|
||||
clock-names = "stmmaceth", "pclk";
|
||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>,
|
||||
<&clk CLK_PERISYS_APB4_HCLK>;
|
||||
clock-names = "stmmaceth", "pclk", "apb";
|
||||
snps,pbl = <32>;
|
||||
snps,fixed-burst;
|
||||
snps,multicast-filter-bins = <64>;
|
||||
|
@ -319,8 +320,9 @@
|
|||
reg-names = "dwmac", "apb";
|
||||
interrupts = <66 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq";
|
||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>;
|
||||
clock-names = "stmmaceth", "pclk";
|
||||
clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>,
|
||||
<&clk CLK_PERISYS_APB4_HCLK>;
|
||||
clock-names = "stmmaceth", "pclk", "apb";
|
||||
snps,pbl = <32>;
|
||||
snps,fixed-burst;
|
||||
snps,multicast-filter-bins = <64>;
|
||||
|
|
|
@ -36,15 +36,14 @@
|
|||
|
||||
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
|
||||
{
|
||||
regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
|
||||
ksz_rmw8(dev, addr, bits, set ? bits : 0);
|
||||
}
|
||||
|
||||
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
|
||||
bool set)
|
||||
{
|
||||
regmap_update_bits(ksz_regmap_8(dev),
|
||||
dev->dev_ops->get_port_addr(port, offset),
|
||||
bits, set ? bits : 0);
|
||||
ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
|
||||
set ? bits : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds)
|
|||
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
|
||||
|
||||
/* Enable aggressive back off algorithm in half duplex mode. */
|
||||
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
|
||||
SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
|
||||
ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Make sure unicast VLAN boundary is set as default and
|
||||
* enable no excessive collision drop.
|
||||
*/
|
||||
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
|
||||
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
|
||||
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
|
||||
ret = ksz_rmw8(dev, REG_SW_CTRL_2,
|
||||
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
|
||||
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
|
||||
|
||||
|
|
|
@ -1447,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
|
|||
regmap_reg_range(0x3f, 0x3f),
|
||||
|
||||
/* advanced control registers */
|
||||
regmap_reg_range(0x43, 0x43),
|
||||
regmap_reg_range(0x60, 0x6f),
|
||||
regmap_reg_range(0x70, 0x75),
|
||||
regmap_reg_range(0x76, 0x78),
|
||||
|
|
|
@ -926,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
|||
|
||||
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
|
||||
struct bnxt_rx_ring_info *rxr,
|
||||
unsigned int *offset,
|
||||
gfp_t gfp)
|
||||
{
|
||||
netmem_ref netmem;
|
||||
|
||||
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
|
||||
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
|
||||
netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
|
||||
} else {
|
||||
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
|
||||
*offset = 0;
|
||||
}
|
||||
if (!netmem)
|
||||
return 0;
|
||||
|
||||
*mapping = page_pool_get_dma_addr_netmem(netmem);
|
||||
*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
|
||||
return netmem;
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
dma_addr_t mapping;
|
||||
netmem_ref netmem;
|
||||
|
||||
netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
|
||||
netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
|
||||
if (!netmem)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3819,7 +3825,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
|
|||
if (BNXT_RX_PAGE_MODE(bp))
|
||||
pp.pool_size += bp->rx_ring_size / rx_size_fac;
|
||||
pp.nid = numa_node;
|
||||
pp.napi = &rxr->bnapi->napi;
|
||||
pp.netdev = bp->dev;
|
||||
pp.dev = &bp->pdev->dev;
|
||||
pp.dma_dir = bp->rx_dir;
|
||||
|
@ -3851,6 +3856,12 @@ err_destroy_pp:
|
|||
return PTR_ERR(pool);
|
||||
}
|
||||
|
||||
static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
|
||||
page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
|
||||
}
|
||||
|
||||
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
u16 mem_size;
|
||||
|
@ -3889,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
|
|||
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
|
||||
if (rc)
|
||||
return rc;
|
||||
bnxt_enable_rx_page_pool(rxr);
|
||||
|
||||
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
|
||||
if (rc < 0)
|
||||
|
@ -16031,6 +16043,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
|
|||
goto err_reset;
|
||||
}
|
||||
|
||||
bnxt_enable_rx_page_pool(rxr);
|
||||
napi_enable_locked(&bnapi->napi);
|
||||
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
|
||||
|
||||
|
|
|
@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ASSERT_RTNL();
|
||||
if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
|
||||
return -EBUSY;
|
||||
|
||||
if (netif_running(priv->netdev)) {
|
||||
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
dev_warn(&priv->pdev->dev,
|
||||
"failed to reset because port is up\n");
|
||||
return -EBUSY;
|
||||
|
@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
|
|||
netif_device_detach(priv->netdev);
|
||||
|
||||
priv->reset_type = type;
|
||||
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
|
||||
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
|
||||
if (ret) {
|
||||
|
@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
|
|||
type != priv->reset_type)
|
||||
return 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
ret = hbg_rebuild(priv);
|
||||
if (ret) {
|
||||
priv->stats.reset_fail_cnt++;
|
||||
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
|
||||
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
netif_device_attach(priv->netdev);
|
||||
clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
|
||||
|
||||
dev_info(&priv->pdev->dev, "reset done\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* must be protected by rtnl lock */
|
||||
int hbg_reset(struct hbg_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ASSERT_RTNL();
|
||||
ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
|
|||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct hbg_priv *priv = netdev_priv(netdev);
|
||||
|
||||
rtnl_lock();
|
||||
hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
|
||||
}
|
||||
|
||||
|
@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
|
|||
struct hbg_priv *priv = netdev_priv(netdev);
|
||||
|
||||
hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers hbg_pci_err_handler = {
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
|
||||
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
|
||||
#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
|
||||
#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
|
||||
/* little endian or big endian.
|
||||
* ctrl means packet description, data means skb packet data
|
||||
*/
|
||||
|
@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
|
|||
|
||||
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
|
||||
{
|
||||
u32 link_status;
|
||||
int ret;
|
||||
|
||||
hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
|
||||
|
||||
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
|
||||
|
@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
|
|||
|
||||
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
|
||||
|
||||
if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
|
||||
HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
|
||||
/* wait MAC link up */
|
||||
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
|
||||
link_status,
|
||||
FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
|
||||
link_status),
|
||||
HBG_MAC_LINK_WAIT_INTERVAL_US,
|
||||
HBG_MAC_LINK_WAIT_TIMEOUT_US);
|
||||
if (ret)
|
||||
hbg_np_link_fail_task_schedule(priv);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
|
|||
|
||||
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
|
||||
{
|
||||
return (ring->ntu + ring->len - ring->ntc) % ring->len;
|
||||
u32 len = READ_ONCE(ring->len);
|
||||
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
|
||||
}
|
||||
|
||||
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
|
|
|
@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
|
|||
|
||||
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
|
||||
attrs.phys.port_number = adapter->hw.bus.func;
|
||||
attrs.no_phys_port_name = 1;
|
||||
ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
|
||||
|
||||
devlink_port_attrs_set(devlink_port, &attrs);
|
||||
|
|
|
@ -330,15 +330,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(plat_dat))
|
||||
return PTR_ERR(plat_dat);
|
||||
|
||||
ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
|
||||
ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
|
||||
if (ret < 0)
|
||||
return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
|
||||
return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
|
||||
plat_dat->num_clks = ret;
|
||||
|
||||
ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
|
||||
if (ret)
|
||||
return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
|
||||
|
||||
plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
|
||||
data->stmmac_clk_name);
|
||||
|
||||
|
@ -346,7 +342,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
|
|||
ret = data->probe(pdev, plat_dat, &stmmac_res);
|
||||
if (ret < 0) {
|
||||
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
|
||||
clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -370,15 +365,11 @@ remove:
|
|||
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
|
||||
struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
|
||||
|
||||
stmmac_dvr_remove(&pdev->dev);
|
||||
|
||||
if (data->remove)
|
||||
data->remove(pdev);
|
||||
|
||||
if (plat_dat)
|
||||
clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
|
||||
}
|
||||
|
||||
static const struct of_device_id dwc_eth_dwmac_match[] = {
|
||||
|
|
|
@ -1765,11 +1765,15 @@ err_gmac_powerdown:
|
|||
|
||||
static void rk_gmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
|
||||
struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
|
||||
struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
|
||||
|
||||
stmmac_dvr_remove(&pdev->dev);
|
||||
|
||||
rk_gmac_powerdown(bsp_priv);
|
||||
|
||||
if (priv->plat->phy_node && bsp_priv->integrated_phy)
|
||||
clk_put(bsp_priv->clk_phy);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
|
@ -211,6 +211,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
|
|||
struct stmmac_resources stmmac_res;
|
||||
struct plat_stmmacenet_data *plat;
|
||||
struct thead_dwmac *dwmac;
|
||||
struct clk *apb_clk;
|
||||
void __iomem *apb;
|
||||
int ret;
|
||||
|
||||
|
@ -224,6 +225,19 @@ static int thead_dwmac_probe(struct platform_device *pdev)
|
|||
return dev_err_probe(&pdev->dev, PTR_ERR(plat),
|
||||
"dt configuration failed\n");
|
||||
|
||||
/*
|
||||
* The APB clock is essential for accessing glue registers. However,
|
||||
* old devicetrees don't describe it correctly. We continue to probe
|
||||
* and emit a warning if it isn't present.
|
||||
*/
|
||||
apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
|
||||
if (PTR_ERR(apb_clk) == -ENOENT)
|
||||
dev_warn(&pdev->dev,
|
||||
"cannot get apb clock, link may break after speed changes\n");
|
||||
else if (IS_ERR(apb_clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
|
||||
"failed to get apb clock\n");
|
||||
|
||||
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
|
||||
if (!dwmac)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -621,7 +621,8 @@ exit:
|
|||
|
||||
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
|
||||
{
|
||||
u32 val, cap, ret = 0;
|
||||
u32 val, cap;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&iep->ptp_clk_mutex);
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
|
||||
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
|
||||
|
||||
static void emac_adjust_link(struct net_device *ndev);
|
||||
|
||||
static int emac_get_tx_ts(struct prueth_emac *emac,
|
||||
struct emac_tx_ts_response *rsp)
|
||||
{
|
||||
|
@ -229,6 +231,10 @@ static int prueth_emac_common_start(struct prueth *prueth)
|
|||
ret = icssg_config(prueth, emac, slice);
|
||||
if (ret)
|
||||
goto disable_class;
|
||||
|
||||
mutex_lock(&emac->ndev->phydev->lock);
|
||||
emac_adjust_link(emac->ndev);
|
||||
mutex_unlock(&emac->ndev->phydev->lock);
|
||||
}
|
||||
|
||||
ret = prueth_emac_start(prueth);
|
||||
|
|
|
@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
|
|||
|
||||
static inline int dev_is_ethdev(struct net_device *dev)
|
||||
{
|
||||
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
|
||||
return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
|
|
@ -1061,6 +1061,7 @@ struct net_device_context {
|
|||
struct net_device __rcu *vf_netdev;
|
||||
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
|
||||
struct delayed_work vf_takeover;
|
||||
struct delayed_work vfns_work;
|
||||
|
||||
/* 1: allocated, serial number is valid. 0: not allocated */
|
||||
u32 vf_alloc;
|
||||
|
@ -1075,6 +1076,8 @@ struct net_device_context {
|
|||
struct netvsc_device_info *saved_netvsc_dev_info;
|
||||
};
|
||||
|
||||
void netvsc_vfns_work(struct work_struct *w);
|
||||
|
||||
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
|
||||
* packets. We can use ethtool to change UDP hash level when necessary.
|
||||
*/
|
||||
|
|
|
@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
spin_lock_init(&net_device_ctx->lock);
|
||||
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
|
||||
|
||||
net_device_ctx->vf_stats
|
||||
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
|
||||
|
@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev)
|
|||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||
|
||||
rtnl_lock();
|
||||
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
|
||||
|
||||
nvdev = rtnl_dereference(ndev_ctx->nvdev);
|
||||
if (nvdev) {
|
||||
cancel_work_sync(&nvdev->subchan_work);
|
||||
|
@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev)
|
|||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||
|
||||
rtnl_lock();
|
||||
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
|
||||
|
||||
nvdev = rtnl_dereference(ndev_ctx->nvdev);
|
||||
if (nvdev == NULL) {
|
||||
|
@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
|
|||
}
|
||||
}
|
||||
|
||||
void netvsc_vfns_work(struct work_struct *w)
|
||||
{
|
||||
struct net_device_context *ndev_ctx =
|
||||
container_of(w, struct net_device_context, vfns_work.work);
|
||||
struct net_device *ndev;
|
||||
|
||||
if (!rtnl_trylock()) {
|
||||
schedule_delayed_work(&ndev_ctx->vfns_work, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
if (!ndev)
|
||||
goto out;
|
||||
|
||||
netvsc_event_set_vf_ns(ndev);
|
||||
|
||||
out:
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* On Hyper-V, every VF interface is matched with a corresponding
|
||||
* synthetic interface. The synthetic interface is presented first
|
||||
|
@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
|||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct net_device_context *ndev_ctx;
|
||||
int ret = 0;
|
||||
|
||||
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
|
||||
netvsc_event_set_vf_ns(event_dev);
|
||||
ndev_ctx = netdev_priv(event_dev);
|
||||
schedule_delayed_work(&ndev_ctx->vfns_work, 0);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -710,9 +710,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
|
|||
static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
|
||||
{
|
||||
hrtimer_cancel(&rq->napi_timer);
|
||||
local_bh_disable();
|
||||
dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
|
||||
local_bh_enable();
|
||||
|
||||
if (rq->skb_queue.qlen) {
|
||||
local_bh_disable();
|
||||
dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
|
||||
kfree(rq);
|
||||
}
|
||||
|
|
|
@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
|
|||
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
|
||||
return -EINVAL;
|
||||
|
||||
gpiod_put(mdiodev->reset_gpio);
|
||||
reset_control_put(mdiodev->reset_ctrl);
|
||||
|
||||
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
|
||||
|
|
|
@ -443,9 +443,6 @@ void mdiobus_unregister(struct mii_bus *bus)
|
|||
if (!mdiodev)
|
||||
continue;
|
||||
|
||||
if (mdiodev->reset_gpio)
|
||||
gpiod_put(mdiodev->reset_gpio);
|
||||
|
||||
mdiodev->device_remove(mdiodev);
|
||||
mdiodev->device_free(mdiodev);
|
||||
}
|
||||
|
|
|
@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
|
|||
return macsec_ability;
|
||||
}
|
||||
|
||||
static bool tja11xx_phy_id_compare(struct phy_device *phydev,
|
||||
const struct phy_driver *phydrv)
|
||||
{
|
||||
u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
|
||||
phydev->phy_id;
|
||||
|
||||
return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
|
||||
}
|
||||
|
||||
static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
|
||||
const struct phy_driver *phydrv)
|
||||
{
|
||||
if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
|
||||
phydrv->phy_id_mask))
|
||||
return 0;
|
||||
|
||||
return !nxp_c45_macsec_ability(phydev);
|
||||
return tja11xx_phy_id_compare(phydev, phydrv) &&
|
||||
!nxp_c45_macsec_ability(phydev);
|
||||
}
|
||||
|
||||
static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
|
||||
const struct phy_driver *phydrv)
|
||||
{
|
||||
if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
|
||||
phydrv->phy_id_mask))
|
||||
return 0;
|
||||
|
||||
return nxp_c45_macsec_ability(phydev);
|
||||
return tja11xx_phy_id_compare(phydev, phydrv) &&
|
||||
nxp_c45_macsec_ability(phydev);
|
||||
}
|
||||
|
||||
static const struct nxp_c45_regmap tja1120_regmap = {
|
||||
|
|
|
@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
|
|||
priv->mdio->read = &asix_mdio_bus_read;
|
||||
priv->mdio->write = &asix_mdio_bus_write;
|
||||
priv->mdio->name = "Asix MDIO Bus";
|
||||
priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
|
||||
/* mii bus name is usb-<usb bus number>-<usb device number> */
|
||||
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
|
||||
dev->udev->bus->busnum, dev->udev->devnum);
|
||||
|
|
|
@ -1361,6 +1361,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
|
||||
|
|
|
@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
|
|||
|
||||
static __inline__ int dev_is_ethdev(struct net_device *dev)
|
||||
{
|
||||
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
|
||||
return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
|
|
@ -24,6 +24,11 @@
|
|||
#define PTP_DEFAULT_MAX_VCLOCKS 20
|
||||
#define PTP_MAX_CHANNELS 2048
|
||||
|
||||
enum {
|
||||
PTP_LOCK_PHYSICAL = 0,
|
||||
PTP_LOCK_VIRTUAL,
|
||||
};
|
||||
|
||||
struct timestamp_event_queue {
|
||||
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
|
||||
int head;
|
||||
|
|
|
@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
|
|||
return PTP_VCLOCK_REFRESH_INTERVAL;
|
||||
}
|
||||
|
||||
static void ptp_vclock_set_subclass(struct ptp_clock *ptp)
|
||||
{
|
||||
lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL);
|
||||
}
|
||||
|
||||
static const struct ptp_clock_info ptp_vclock_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "ptp virtual clock",
|
||||
|
@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ptp_vclock_set_subclass(vclock->clock);
|
||||
|
||||
timecounter_init(&vclock->tc, &vclock->cc, 0);
|
||||
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
|
||||
|
||||
|
|
|
@ -2071,6 +2071,8 @@ enum netdev_reg_state {
|
|||
* @max_pacing_offload_horizon: max EDT offload horizon in nsec.
|
||||
* @napi_config: An array of napi_config structures containing per-NAPI
|
||||
* settings.
|
||||
* @num_napi_configs: number of allocated NAPI config structs,
|
||||
* always >= max(num_rx_queues, num_tx_queues).
|
||||
* @gro_flush_timeout: timeout for GRO layer in NAPI
|
||||
* @napi_defer_hard_irqs: If not zero, provides a counter that would
|
||||
* allow to avoid NIC hard IRQ, on busy queues.
|
||||
|
@ -2482,8 +2484,9 @@ struct net_device {
|
|||
|
||||
u64 max_pacing_offload_horizon;
|
||||
struct napi_config *napi_config;
|
||||
unsigned long gro_flush_timeout;
|
||||
u32 num_napi_configs;
|
||||
u32 napi_defer_hard_irqs;
|
||||
unsigned long gro_flush_timeout;
|
||||
|
||||
/**
|
||||
* @up: copy of @state's IFF_UP, but safe to read with just @lock.
|
||||
|
|
|
@ -78,6 +78,9 @@ struct devlink_port_pci_sf_attrs {
|
|||
* @flavour: flavour of the port
|
||||
* @split: indicates if this is split port
|
||||
* @splittable: indicates if the port can be split.
|
||||
* @no_phys_port_name: skip automatic phys_port_name generation; for
|
||||
* compatibility only, newly added driver/port instance
|
||||
* should never set this.
|
||||
* @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
|
||||
* @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
|
||||
* @phys: physical port attributes
|
||||
|
@ -87,7 +90,8 @@ struct devlink_port_pci_sf_attrs {
|
|||
*/
|
||||
struct devlink_port_attrs {
|
||||
u8 split:1,
|
||||
splittable:1;
|
||||
splittable:1,
|
||||
no_phys_port_name:1;
|
||||
u32 lanes;
|
||||
enum devlink_port_flavour flavour;
|
||||
struct netdev_phys_item_id switch_id;
|
||||
|
|
|
@ -1163,6 +1163,14 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
|
|||
return housekeeping_cpumask(HK_TYPE_KTHREAD);
|
||||
}
|
||||
|
||||
static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs)
|
||||
{
|
||||
if (ipvs->est_cpulist_valid)
|
||||
return ipvs->sysctl_est_cpulist;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
|
||||
{
|
||||
return ipvs->sysctl_est_nice;
|
||||
|
@ -1270,6 +1278,11 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
|
|||
return housekeeping_cpumask(HK_TYPE_KTHREAD);
|
||||
}
|
||||
|
||||
static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
|
||||
{
|
||||
return IPVS_EST_NICE;
|
||||
|
|
|
@ -71,7 +71,6 @@ struct kcm_sock {
|
|||
struct list_head wait_psock_list;
|
||||
struct sk_buff *seq_skb;
|
||||
struct mutex tx_mutex;
|
||||
u32 tx_stopped : 1;
|
||||
|
||||
/* Don't use bit fields here, these are set under different locks */
|
||||
bool tx_wait;
|
||||
|
|
|
@ -265,6 +265,8 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
|
|||
struct xdp_mem_info;
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL
|
||||
void page_pool_enable_direct_recycling(struct page_pool *pool,
|
||||
struct napi_struct *napi);
|
||||
void page_pool_disable_direct_recycling(struct page_pool *pool);
|
||||
void page_pool_destroy(struct page_pool *pool);
|
||||
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
|
||||
|
|
|
@ -893,6 +893,7 @@ out:
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_affine_preferred);
|
||||
|
||||
/*
|
||||
* Re-affine kthreads according to their preferences
|
||||
|
|
|
@ -434,7 +434,7 @@ void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
|
|||
if (dentry && !xa_is_err(dentry))
|
||||
return;
|
||||
|
||||
ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir);
|
||||
ret = snprintf(name, sizeof(name), "%s@%p", dir->class, dir);
|
||||
name[sizeof(name) - 1] = '\0';
|
||||
|
||||
if (ret < sizeof(name)) {
|
||||
|
|
|
@ -43,6 +43,7 @@ config NF_CONNTRACK_BRIDGE
|
|||
config BRIDGE_NF_EBTABLES_LEGACY
|
||||
tristate "Legacy EBTABLES support"
|
||||
depends on BRIDGE && NETFILTER_XTABLES_LEGACY
|
||||
depends on NETFILTER_XTABLES
|
||||
default n
|
||||
help
|
||||
Legacy ebtables packet/frame classifier.
|
||||
|
|
|
@ -6999,7 +6999,7 @@ int netif_set_threaded(struct net_device *dev,
|
|||
enum netdev_napi_threaded threaded)
|
||||
{
|
||||
struct napi_struct *napi;
|
||||
int err = 0;
|
||||
int i, err = 0;
|
||||
|
||||
netdev_assert_locked_or_invisible(dev);
|
||||
|
||||
|
@ -7021,6 +7021,10 @@ int netif_set_threaded(struct net_device *dev,
|
|||
list_for_each_entry(napi, &dev->napi_list, dev_list)
|
||||
WARN_ON_ONCE(napi_set_threaded(napi, threaded));
|
||||
|
||||
/* Override the config for all NAPIs even if currently not listed */
|
||||
for (i = 0; i < dev->num_napi_configs; i++)
|
||||
dev->napi_config[i].threaded = threaded;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -7353,8 +7357,9 @@ void netif_napi_add_weight_locked(struct net_device *dev,
|
|||
* Clear dev->threaded if kthread creation failed so that
|
||||
* threaded mode will not be enabled in napi_enable().
|
||||
*/
|
||||
if (dev->threaded && napi_kthread_create(napi))
|
||||
dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
|
||||
if (napi_get_threaded_config(dev, napi))
|
||||
if (napi_kthread_create(napi))
|
||||
dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
|
||||
netif_napi_set_irq_locked(napi, -1);
|
||||
}
|
||||
EXPORT_SYMBOL(netif_napi_add_weight_locked);
|
||||
|
@ -11873,6 +11878,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
|||
goto free_all;
|
||||
dev->cfg_pending = dev->cfg;
|
||||
|
||||
dev->num_napi_configs = maxqs;
|
||||
napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
|
||||
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!dev->napi_config)
|
||||
|
|
|
@ -323,6 +323,14 @@ static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
|
|||
return NETDEV_NAPI_THREADED_DISABLED;
|
||||
}
|
||||
|
||||
static inline enum netdev_napi_threaded
|
||||
napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
|
||||
{
|
||||
if (n->config)
|
||||
return n->config->threaded;
|
||||
return dev->threaded;
|
||||
}
|
||||
|
||||
int napi_set_threaded(struct napi_struct *n,
|
||||
enum netdev_napi_threaded threaded);
|
||||
|
||||
|
|
|
@ -1201,6 +1201,35 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
|
|||
pool->xdp_mem_id = mem->id;
|
||||
}
|
||||
|
||||
/**
|
||||
* page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
|
||||
* @pool: page pool to modify
|
||||
* @napi: NAPI instance to associate the page pool with
|
||||
*
|
||||
* Associate a page pool with a NAPI instance for lockless page recycling.
|
||||
* This is useful when a new page pool has to be added to a NAPI instance
|
||||
* without disabling that NAPI instance, to mark the point at which control
|
||||
* path "hands over" the page pool to the NAPI instance. In most cases driver
|
||||
* can simply set the @napi field in struct page_pool_params, and does not
|
||||
* have to call this helper.
|
||||
*
|
||||
* The function is idempotent, but does not implement any refcounting.
|
||||
* Single page_pool_disable_direct_recycling() will disable recycling,
|
||||
* no matter how many times enable was called.
|
||||
*/
|
||||
void page_pool_enable_direct_recycling(struct page_pool *pool,
|
||||
struct napi_struct *napi)
|
||||
{
|
||||
if (READ_ONCE(pool->p.napi) == napi)
|
||||
return;
|
||||
WARN_ON(!napi || pool->p.napi);
|
||||
|
||||
mutex_lock(&page_pools_lock);
|
||||
WRITE_ONCE(pool->p.napi, napi);
|
||||
mutex_unlock(&page_pools_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(page_pool_enable_direct_recycling);
|
||||
|
||||
void page_pool_disable_direct_recycling(struct page_pool *pool)
|
||||
{
|
||||
/* Disable direct recycling based on pool->cpuid.
|
||||
|
|
|
@ -1519,7 +1519,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
|
|||
struct devlink_port_attrs *attrs = &devlink_port->attrs;
|
||||
int n = 0;
|
||||
|
||||
if (!devlink_port->attrs_set)
|
||||
if (!devlink_port->attrs_set || devlink_port->attrs.no_phys_port_name)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (attrs->flavour) {
|
||||
|
|
|
@ -14,6 +14,7 @@ config NF_DEFRAG_IPV4
|
|||
config IP_NF_IPTABLES_LEGACY
|
||||
tristate "Legacy IP tables support"
|
||||
depends on NETFILTER_XTABLES_LEGACY
|
||||
depends on NETFILTER_XTABLES
|
||||
default m if NETFILTER_XTABLES_LEGACY
|
||||
help
|
||||
iptables is a legacy packet classifier.
|
||||
|
@ -326,6 +327,7 @@ endif # IP_NF_IPTABLES
|
|||
config IP_NF_ARPTABLES
|
||||
tristate "Legacy ARPTABLES support"
|
||||
depends on NETFILTER_XTABLES_LEGACY
|
||||
depends on NETFILTER_XTABLES
|
||||
default n
|
||||
help
|
||||
arptables is a legacy packet classifier.
|
||||
|
@ -343,6 +345,7 @@ config IP_NF_ARPFILTER
|
|||
select IP_NF_ARPTABLES
|
||||
select NETFILTER_FAMILY_ARP
|
||||
depends on NETFILTER_XTABLES_LEGACY
|
||||
depends on NETFILTER_XTABLES
|
||||
help
|
||||
ARP packet filtering defines a table `filter', which has a series of
|
||||
rules for simple ARP packet filtering at local input and
|
||||
|
|
|
@ -217,7 +217,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||
remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
|
||||
skb->remcsum_offload = remcsum;
|
||||
|
||||
need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
|
||||
need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb);
|
||||
/* Try to offload checksum if possible */
|
||||
offload_csum = !!(need_csum &&
|
||||
!need_ipsec &&
|
||||
|
|
|
@ -10,6 +10,7 @@ menu "IPv6: Netfilter Configuration"
|
|||
config IP6_NF_IPTABLES_LEGACY
|
||||
tristate "Legacy IP6 tables support"
|
||||
depends on INET && IPV6 && NETFILTER_XTABLES_LEGACY
|
||||
depends on NETFILTER_XTABLES
|
||||
default m if NETFILTER_XTABLES_LEGACY
|
||||
help
|
||||
ip6tables is a legacy packet classifier.
|
||||
|
|
|
@ -334,7 +334,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
|
|||
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
|
||||
unsigned int i;
|
||||
|
||||
xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
|
||||
xfrm_state_flush(net, 0, false);
|
||||
xfrm_flush_gc();
|
||||
|
||||
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
|
||||
|
|
|
@ -430,7 +430,7 @@ static void psock_write_space(struct sock *sk)
|
|||
|
||||
/* Check if the socket is reserved so someone is waiting for sending. */
|
||||
kcm = psock->tx_kcm;
|
||||
if (kcm && !unlikely(kcm->tx_stopped))
|
||||
if (kcm)
|
||||
queue_work(kcm_wq, &kcm->tx_work);
|
||||
|
||||
spin_unlock_bh(&mux->lock);
|
||||
|
@ -1693,12 +1693,6 @@ static int kcm_release(struct socket *sock)
|
|||
*/
|
||||
__skb_queue_purge(&sk->sk_write_queue);
|
||||
|
||||
/* Set tx_stopped. This is checked when psock is bound to a kcm and we
|
||||
* get a writespace callback. This prevents further work being queued
|
||||
* from the callback (unbinding the psock occurs after canceling work.
|
||||
*/
|
||||
kcm->tx_stopped = 1;
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
spin_lock_bh(&mux->lock);
|
||||
|
@ -1714,7 +1708,7 @@ static int kcm_release(struct socket *sock)
|
|||
/* Cancel work. After this point there should be no outside references
|
||||
* to the kcm socket.
|
||||
*/
|
||||
cancel_work_sync(&kcm->tx_work);
|
||||
disable_work_sync(&kcm->tx_work);
|
||||
|
||||
lock_sock(sk);
|
||||
psock = kcm->tx_psock;
|
||||
|
|
|
@ -1586,7 +1586,6 @@ static void mctp_test_bind_lookup(struct kunit *test)
|
|||
|
||||
cleanup:
|
||||
kfree_skb(skb_sock);
|
||||
kfree_skb(skb_pkt);
|
||||
|
||||
/* Drop all binds */
|
||||
for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++)
|
||||
|
|
|
@ -265,7 +265,8 @@ int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
|
|||
}
|
||||
|
||||
set_user_nice(kd->task, sysctl_est_nice(ipvs));
|
||||
set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
|
||||
if (sysctl_est_preferred_cpulist(ipvs))
|
||||
kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs));
|
||||
|
||||
pr_info("starting estimator thread %d...\n", kd->id);
|
||||
wake_up_process(kd->task);
|
||||
|
|
|
@ -884,8 +884,6 @@ errout:
|
|||
|
||||
static int ctnetlink_done(struct netlink_callback *cb)
|
||||
{
|
||||
if (cb->args[1])
|
||||
nf_ct_put((struct nf_conn *)cb->args[1]);
|
||||
kfree(cb->data);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1208,19 +1206,26 @@ ignore_entry:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long ctnetlink_get_id(const struct nf_conn *ct)
|
||||
{
|
||||
unsigned long id = nf_ct_get_id(ct);
|
||||
|
||||
return id ? id : 1;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nf_conn *ct, *last;
|
||||
unsigned long last_id = cb->args[1];
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
struct nf_conn *nf_ct_evict[8];
|
||||
struct nf_conn *ct;
|
||||
int res, i;
|
||||
spinlock_t *lockp;
|
||||
|
||||
last = (struct nf_conn *)cb->args[1];
|
||||
i = 0;
|
||||
|
||||
local_bh_disable();
|
||||
|
@ -1257,7 +1262,7 @@ restart:
|
|||
continue;
|
||||
|
||||
if (cb->args[1]) {
|
||||
if (ct != last)
|
||||
if (ctnetlink_get_id(ct) != last_id)
|
||||
continue;
|
||||
cb->args[1] = 0;
|
||||
}
|
||||
|
@ -1270,8 +1275,7 @@ restart:
|
|||
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
|
||||
ct, true, flags);
|
||||
if (res < 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
cb->args[1] = (unsigned long)ct;
|
||||
cb->args[1] = ctnetlink_get_id(ct);
|
||||
spin_unlock(lockp);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1284,12 +1288,10 @@ restart:
|
|||
}
|
||||
out:
|
||||
local_bh_enable();
|
||||
if (last) {
|
||||
if (last_id) {
|
||||
/* nf ct hash resize happened, now clear the leftover. */
|
||||
if ((struct nf_conn *)cb->args[1] == last)
|
||||
if (cb->args[1] == last_id)
|
||||
cb->args[1] = 0;
|
||||
|
||||
nf_ct_put(last);
|
||||
}
|
||||
|
||||
while (i) {
|
||||
|
@ -3168,23 +3170,27 @@ errout:
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
static int ctnetlink_exp_done(struct netlink_callback *cb)
|
||||
|
||||
static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp)
|
||||
{
|
||||
if (cb->args[1])
|
||||
nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
|
||||
return 0;
|
||||
unsigned long id = (unsigned long)exp;
|
||||
|
||||
id += nf_ct_get_id(exp->master);
|
||||
id += exp->class;
|
||||
|
||||
return id ? id : 1;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nf_conntrack_expect *exp, *last;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
|
||||
u_int8_t l3proto = nfmsg->nfgen_family;
|
||||
unsigned long last_id = cb->args[1];
|
||||
struct nf_conntrack_expect *exp;
|
||||
|
||||
rcu_read_lock();
|
||||
last = (struct nf_conntrack_expect *)cb->args[1];
|
||||
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
|
||||
restart:
|
||||
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
|
||||
|
@ -3196,7 +3202,7 @@ restart:
|
|||
continue;
|
||||
|
||||
if (cb->args[1]) {
|
||||
if (exp != last)
|
||||
if (ctnetlink_exp_id(exp) != last_id)
|
||||
continue;
|
||||
cb->args[1] = 0;
|
||||
}
|
||||
|
@ -3205,9 +3211,7 @@ restart:
|
|||
cb->nlh->nlmsg_seq,
|
||||
IPCTNL_MSG_EXP_NEW,
|
||||
exp) < 0) {
|
||||
if (!refcount_inc_not_zero(&exp->use))
|
||||
continue;
|
||||
cb->args[1] = (unsigned long)exp;
|
||||
cb->args[1] = ctnetlink_exp_id(exp);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -3218,32 +3222,30 @@ restart:
|
|||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
if (last)
|
||||
nf_ct_expect_put(last);
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct nf_conntrack_expect *exp, *last;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
|
||||
struct nf_conn *ct = cb->data;
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
u_int8_t l3proto = nfmsg->nfgen_family;
|
||||
unsigned long last_id = cb->args[1];
|
||||
struct nf_conntrack_expect *exp;
|
||||
|
||||
if (cb->args[0])
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
last = (struct nf_conntrack_expect *)cb->args[1];
|
||||
|
||||
restart:
|
||||
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
|
||||
if (l3proto && exp->tuple.src.l3num != l3proto)
|
||||
continue;
|
||||
if (cb->args[1]) {
|
||||
if (exp != last)
|
||||
if (ctnetlink_exp_id(exp) != last_id)
|
||||
continue;
|
||||
cb->args[1] = 0;
|
||||
}
|
||||
|
@ -3251,9 +3253,7 @@ restart:
|
|||
cb->nlh->nlmsg_seq,
|
||||
IPCTNL_MSG_EXP_NEW,
|
||||
exp) < 0) {
|
||||
if (!refcount_inc_not_zero(&exp->use))
|
||||
continue;
|
||||
cb->args[1] = (unsigned long)exp;
|
||||
cb->args[1] = ctnetlink_exp_id(exp);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -3264,9 +3264,6 @@ restart:
|
|||
cb->args[0] = 1;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
if (last)
|
||||
nf_ct_expect_put(last);
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
@ -3285,7 +3282,6 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
|
|||
struct nf_conntrack_zone zone;
|
||||
struct netlink_dump_control c = {
|
||||
.dump = ctnetlink_exp_ct_dump_table,
|
||||
.done = ctnetlink_exp_done,
|
||||
};
|
||||
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
|
||||
|
@ -3335,7 +3331,6 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
|
|||
else {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = ctnetlink_exp_dump_table,
|
||||
.done = ctnetlink_exp_done,
|
||||
};
|
||||
return netlink_dump_start(info->sk, skb, info->nlh, &c);
|
||||
}
|
||||
|
|
|
@ -567,16 +567,16 @@ nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write,
|
|||
return ret;
|
||||
|
||||
if (*(u8 *)table->data == 0)
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
/* Load nf_log_syslog only if no logger is currently registered */
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
|
||||
if (nf_log_is_registered(i))
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
request_module("%s", "nf_log_syslog");
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ctl_table_header *nf_ct_netfilter_header;
|
||||
|
|
|
@ -2803,6 +2803,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
|||
struct nft_chain *chain = ctx->chain;
|
||||
struct nft_chain_hook hook = {};
|
||||
struct nft_stats __percpu *stats = NULL;
|
||||
struct nftables_pernet *nft_net;
|
||||
struct nft_hook *h, *next;
|
||||
struct nf_hook_ops *ops;
|
||||
struct nft_trans *trans;
|
||||
|
@ -2845,6 +2846,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
|||
if (nft_hook_list_find(&basechain->hook_list, h)) {
|
||||
list_del(&h->list);
|
||||
nft_netdev_hook_free(h);
|
||||
continue;
|
||||
}
|
||||
|
||||
nft_net = nft_pernet(ctx->net);
|
||||
list_for_each_entry(trans, &nft_net->commit_list, list) {
|
||||
if (trans->msg_type != NFT_MSG_NEWCHAIN ||
|
||||
trans->table != ctx->table ||
|
||||
!nft_trans_chain_update(trans))
|
||||
continue;
|
||||
|
||||
if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) {
|
||||
nft_chain_release_hook(&hook);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -9060,6 +9075,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
|
|||
{
|
||||
const struct nlattr * const *nla = ctx->nla;
|
||||
struct nft_flowtable_hook flowtable_hook;
|
||||
struct nftables_pernet *nft_net;
|
||||
struct nft_hook *hook, *next;
|
||||
struct nf_hook_ops *ops;
|
||||
struct nft_trans *trans;
|
||||
|
@ -9076,6 +9092,20 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
|
|||
if (nft_hook_list_find(&flowtable->hook_list, hook)) {
|
||||
list_del(&hook->list);
|
||||
nft_netdev_hook_free(hook);
|
||||
continue;
|
||||
}
|
||||
|
||||
nft_net = nft_pernet(ctx->net);
|
||||
list_for_each_entry(trans, &nft_net->commit_list, list) {
|
||||
if (trans->msg_type != NFT_MSG_NEWFLOWTABLE ||
|
||||
trans->table != ctx->table ||
|
||||
!nft_trans_flowtable_update(trans))
|
||||
continue;
|
||||
|
||||
if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) {
|
||||
err = -EEXIST;
|
||||
goto err_flowtable_update_hook;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -426,10 +426,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
|
|||
|
||||
local_bh_disable();
|
||||
|
||||
if (unlikely(!raw_cpu_ptr(m->scratch)))
|
||||
goto out;
|
||||
|
||||
scratch = *raw_cpu_ptr(m->scratch);
|
||||
if (unlikely(!scratch))
|
||||
goto out;
|
||||
|
||||
map_index = scratch->map_index;
|
||||
|
||||
|
|
|
@ -1150,12 +1150,12 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
|
|||
const u32 *key)
|
||||
{
|
||||
struct nft_pipapo *priv = nft_set_priv(set);
|
||||
const struct nft_set_ext *ext = NULL;
|
||||
struct nft_pipapo_scratch *scratch;
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_pipapo_match *m;
|
||||
const struct nft_pipapo_field *f;
|
||||
const u8 *rp = (const u8 *)key;
|
||||
const struct nft_set_ext *ext;
|
||||
unsigned long *res, *fill;
|
||||
bool map_index;
|
||||
int i;
|
||||
|
@ -1246,13 +1246,13 @@ next_match:
|
|||
goto out;
|
||||
|
||||
if (last) {
|
||||
ext = &f->mt[ret].e->ext;
|
||||
if (unlikely(nft_set_elem_expired(ext) ||
|
||||
!nft_set_elem_active(ext, genmask))) {
|
||||
ext = NULL;
|
||||
goto next_match;
|
||||
}
|
||||
const struct nft_set_ext *e = &f->mt[ret].e->ext;
|
||||
|
||||
if (unlikely(nft_set_elem_expired(e) ||
|
||||
!nft_set_elem_active(e, genmask)))
|
||||
goto next_match;
|
||||
|
||||
ext = e;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -217,7 +217,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
|
|||
|
||||
level += err;
|
||||
/* Implies a giant cgroup tree */
|
||||
if (WARN_ON_ONCE(level > 255))
|
||||
if (level > 255)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
priv->level = level;
|
||||
|
|
|
@ -651,6 +651,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
|
||||
sch_tree_lock(sch);
|
||||
|
||||
for (i = nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_purge_queue(q->classes[i].qdisc);
|
||||
}
|
||||
|
||||
WRITE_ONCE(q->nbands, nbands);
|
||||
for (i = nstrict; i < q->nstrict; i++) {
|
||||
if (q->classes[i].qdisc->q.qlen) {
|
||||
|
@ -658,11 +664,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
q->classes[i].deficit = quanta[i];
|
||||
}
|
||||
}
|
||||
for (i = q->nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_purge_queue(q->classes[i].qdisc);
|
||||
}
|
||||
WRITE_ONCE(q->nstrict, nstrict);
|
||||
memcpy(q->prio2band, priomap, sizeof(priomap));
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
|
|||
* it's better to just linearize it otherwise crc computing
|
||||
* takes longer.
|
||||
*/
|
||||
if ((!is_gso && skb_linearize(skb)) ||
|
||||
if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
|
||||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
|
||||
goto discard_it;
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ void tls_strp_msg_done(struct tls_strparser *strp);
|
|||
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
|
||||
void tls_rx_msg_ready(struct tls_strparser *strp);
|
||||
|
||||
void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
|
||||
bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
|
||||
int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
|
||||
struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
|
||||
int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
|
||||
|
|
|
@ -475,7 +475,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
|
|||
strp->stm.offset = offset;
|
||||
}
|
||||
|
||||
void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
||||
bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
||||
{
|
||||
struct strp_msg *rxm;
|
||||
struct tls_msg *tlm;
|
||||
|
@ -484,8 +484,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
|||
DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
|
||||
|
||||
if (!strp->copy_mode && force_refresh) {
|
||||
if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
|
||||
return;
|
||||
if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) {
|
||||
WRITE_ONCE(strp->msg_ready, 0);
|
||||
memset(&strp->stm, 0, sizeof(strp->stm));
|
||||
return false;
|
||||
}
|
||||
|
||||
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
|
||||
}
|
||||
|
@ -495,6 +498,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
|||
rxm->offset = strp->stm.offset;
|
||||
tlm = tls_msg(strp->anchor);
|
||||
tlm->control = strp->mark;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called with lock held on lower socket */
|
||||
|
|
|
@ -1384,7 +1384,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
|
|||
return sock_intr_errno(timeo);
|
||||
}
|
||||
|
||||
tls_strp_msg_load(&ctx->strp, released);
|
||||
if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
|
||||
return tls_rx_rec_wait(sk, psock, nonblock, false);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -689,7 +689,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
|
|||
unsigned int i;
|
||||
|
||||
for (i = 0; i < MAX_PORT_RETRIES; i++) {
|
||||
if (port <= LAST_RESERVED_PORT)
|
||||
if (port == VMADDR_PORT_ANY ||
|
||||
port <= LAST_RESERVED_PORT)
|
||||
port = LAST_RESERVED_PORT + 1;
|
||||
|
||||
new_addr.svm_port = port++;
|
||||
|
|
|
@ -155,7 +155,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
|
|||
return skb;
|
||||
}
|
||||
|
||||
if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) {
|
||||
if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
|
||||
unlikely(xmit_xfrm_check_overflow(skb)))) {
|
||||
struct sk_buff *segs;
|
||||
|
||||
/* Packet got rerouted, fixup features and segment it. */
|
||||
|
@ -415,10 +416,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
|
|||
struct net_device *dev = x->xso.dev;
|
||||
bool check_tunnel_size;
|
||||
|
||||
if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED)
|
||||
if (!x->type_offload ||
|
||||
(x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
|
||||
return false;
|
||||
|
||||
if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) {
|
||||
if ((!dev || dev == xfrm_dst_path(dst)->dev) &&
|
||||
!xdst->child->xfrm) {
|
||||
mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
|
||||
if (skb->len <= mtu)
|
||||
goto ok;
|
||||
|
@ -430,6 +433,9 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
|
|||
return false;
|
||||
|
||||
ok:
|
||||
if (!dev)
|
||||
return true;
|
||||
|
||||
check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
|
||||
x->props.mode == XFRM_MODE_TUNNEL;
|
||||
switch (x->props.family) {
|
||||
|
|
|
@ -3297,7 +3297,7 @@ void xfrm_state_fini(struct net *net)
|
|||
unsigned int sz;
|
||||
|
||||
flush_work(&net->xfrm.state_hash_work);
|
||||
xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
|
||||
xfrm_state_flush(net, 0, false);
|
||||
flush_work(&xfrm_state_gc_work);
|
||||
|
||||
WARN_ON(!list_empty(&net->xfrm.state_all));
|
||||
|
|
|
@ -35,6 +35,8 @@ def _setup_deferred_cleanup(cfg) -> None:
|
|||
threaded = cmd(f"cat /sys/class/net/{cfg.ifname}/threaded").stdout
|
||||
defer(_set_threaded_state, cfg, threaded)
|
||||
|
||||
return combined
|
||||
|
||||
|
||||
def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None:
|
||||
"""
|
||||
|
@ -49,7 +51,7 @@ def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None:
|
|||
napi0_id = napis[0]['id']
|
||||
napi1_id = napis[1]['id']
|
||||
|
||||
_setup_deferred_cleanup(cfg)
|
||||
qcnt = _setup_deferred_cleanup(cfg)
|
||||
|
||||
# set threaded
|
||||
_set_threaded_state(cfg, 1)
|
||||
|
@ -62,7 +64,7 @@ def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None:
|
|||
nl.napi_set({'id': napi1_id, 'threaded': 'disabled'})
|
||||
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 1")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 2")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
_assert_napi_threaded_disabled(nl, napi1_id)
|
||||
|
||||
|
@ -80,7 +82,7 @@ def change_num_queues(cfg, nl) -> None:
|
|||
napi0_id = napis[0]['id']
|
||||
napi1_id = napis[1]['id']
|
||||
|
||||
_setup_deferred_cleanup(cfg)
|
||||
qcnt = _setup_deferred_cleanup(cfg)
|
||||
|
||||
# set threaded
|
||||
_set_threaded_state(cfg, 1)
|
||||
|
@ -90,7 +92,7 @@ def change_num_queues(cfg, nl) -> None:
|
|||
_assert_napi_threaded_enabled(nl, napi1_id)
|
||||
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 1")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined 2")
|
||||
cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
|
||||
|
||||
# check napi threaded is set for both napis
|
||||
_assert_napi_threaded_enabled(nl, napi0_id)
|
||||
|
|
|
@ -11,6 +11,7 @@ ALL_TESTS="
|
|||
ets_test_strict
|
||||
ets_test_mixed
|
||||
ets_test_dwrr
|
||||
ets_test_plug
|
||||
classifier_mode
|
||||
ets_test_strict
|
||||
ets_test_mixed
|
||||
|
|
|
@ -224,3 +224,11 @@ ets_test_dwrr()
|
|||
ets_set_dwrr_two_bands
|
||||
xfail_on_slow ets_dwrr_test_01
|
||||
}
|
||||
|
||||
ets_test_plug()
|
||||
{
|
||||
ets_change_qdisc $put 2 "3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3" "1514 1514"
|
||||
tc qdisc add dev $put handle 20: parent 10:4 plug
|
||||
start_traffic_pktsize 100 $h1.10 192.0.2.1 192.0.2.2 00:c1:a0:c1:a0:00 "-c 1"
|
||||
ets_qdisc_setup $put 2
|
||||
}
|
||||
|
|
|
@ -2708,6 +2708,69 @@ TEST(prequeue) {
|
|||
close(cfd);
|
||||
}
|
||||
|
||||
TEST(data_steal) {
|
||||
struct tls_crypto_info_keys tls;
|
||||
char buf[20000], buf2[20000];
|
||||
struct sockaddr_in addr;
|
||||
int sfd, cfd, ret, fd;
|
||||
int pid, status;
|
||||
socklen_t len;
|
||||
|
||||
len = sizeof(addr);
|
||||
memrnd(buf, sizeof(buf));
|
||||
|
||||
tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls, 0);
|
||||
|
||||
addr.sin_family = AF_INET;
|
||||
addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
addr.sin_port = 0;
|
||||
|
||||
fd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
sfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
|
||||
ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0);
|
||||
ASSERT_EQ(listen(sfd, 10), 0);
|
||||
ASSERT_EQ(getsockname(sfd, &addr, &len), 0);
|
||||
ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0);
|
||||
ASSERT_GE(cfd = accept(sfd, &addr, &len), 0);
|
||||
close(sfd);
|
||||
|
||||
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
|
||||
if (ret) {
|
||||
ASSERT_EQ(errno, ENOENT);
|
||||
SKIP(return, "no TLS support");
|
||||
}
|
||||
ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0);
|
||||
|
||||
/* Spawn a child and get it into the read wait path of the underlying
|
||||
* TCP socket.
|
||||
*/
|
||||
pid = fork();
|
||||
ASSERT_GE(pid, 0);
|
||||
if (!pid) {
|
||||
EXPECT_EQ(recv(cfd, buf, sizeof(buf), MSG_WAITALL),
|
||||
sizeof(buf));
|
||||
exit(!__test_passed(_metadata));
|
||||
}
|
||||
|
||||
usleep(2000);
|
||||
ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls, tls.len), 0);
|
||||
ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls, tls.len), 0);
|
||||
|
||||
EXPECT_EQ(send(fd, buf, sizeof(buf), 0), sizeof(buf));
|
||||
usleep(2000);
|
||||
EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_DONTWAIT), -1);
|
||||
/* Don't check errno, the error will be different depending
|
||||
* on what random bytes TLS interpreted as the record length.
|
||||
*/
|
||||
|
||||
close(fd);
|
||||
close(cfd);
|
||||
|
||||
EXPECT_EQ(wait(&status), pid);
|
||||
EXPECT_EQ(status, 0);
|
||||
}
|
||||
|
||||
static void __attribute__((constructor)) fips_check(void) {
|
||||
int res;
|
||||
FILE *f;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue