xdp: double protect netdev->xdp_flags with netdev->lock

Protect xdp_features with netdev->lock. This way pure readers
no longer have to take rtnl_lock to access the field.

This includes calling NETDEV_XDP_FEAT_CHANGE under the lock.
Looks like that's fine for bonding, the only "real" listener,
it's the same as ethtool feature change.

In terms of normal drivers - only GVE need special consideration
(other drivers don't use instance lock or don't support XDP).
It calls xdp_set_features_flag() helper from gve_init_priv() which
in turn is called from gve_reset_recovery() (locked), or prior
to netdev registration. So switch to _locked.

Reviewed-by: Joe Damato <jdamato@fastly.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Acked-by: Harshitha Ramamurthy <hramamurthy@google.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://patch.msgid.link/20250408195956.412733-6-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-04-08 12:59:52 -07:00
parent d02e3b3882
commit 03df156dd3
6 changed files with 16 additions and 4 deletions

View file

@ -354,6 +354,7 @@ For devices with locked ops, currently only the following notifiers are
running under the lock:
* ``NETDEV_REGISTER``
* ``NETDEV_UP``
* ``NETDEV_XDP_FEAT_CHANGE``
The following notifiers are running without the lock:
* ``NETDEV_UNREGISTER``

View file

@ -2185,7 +2185,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = 0;
}
xdp_set_features_flag(priv->dev, xdp_features);
xdp_set_features_flag_locked(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)

View file

@ -2526,7 +2526,7 @@ struct net_device {
* @net_shaper_hierarchy, @reg_state, @threaded
*
* Double protects:
* @up, @moving_ns, @nd_net
* @up, @moving_ns, @nd_net, @xdp_flags
*
* Double ops protects:
* @real_num_rx_queues, @real_num_tx_queues

View file

@ -616,6 +616,7 @@ struct xdp_metadata_ops {
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
void xdp_set_features_flag_locked(struct net_device *dev, xdp_features_t val);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
void xdp_features_clear_redirect_target(struct net_device *dev);
#else

View file

@ -20,6 +20,7 @@ int netdev_debug_event(struct notifier_block *nb, unsigned long event,
switch (cmd) {
case NETDEV_REGISTER:
case NETDEV_UP:
case NETDEV_XDP_FEAT_CHANGE:
netdev_ops_assert_locked(dev);
fallthrough;
case NETDEV_DOWN:
@ -58,7 +59,6 @@ int netdev_debug_event(struct notifier_block *nb, unsigned long event,
case NETDEV_OFFLOAD_XSTATS_DISABLE:
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
case NETDEV_XDP_FEAT_CHANGE:
ASSERT_RTNL();
break;

View file

@ -17,6 +17,7 @@
#include <net/page_pool/helpers.h>
#include <net/hotdata.h>
#include <net/netdev_lock.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@ -991,17 +992,26 @@ static int __init xdp_metadata_init(void)
}
late_initcall(xdp_metadata_init);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
void xdp_set_features_flag_locked(struct net_device *dev, xdp_features_t val)
{
val &= NETDEV_XDP_ACT_MASK;
if (dev->xdp_features == val)
return;
netdev_assert_locked_or_invisible(dev);
dev->xdp_features = val;
if (dev->reg_state == NETREG_REGISTERED)
call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL_GPL(xdp_set_features_flag_locked);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
{
netdev_lock(dev);
xdp_set_features_flag_locked(dev, val);
netdev_unlock(dev);
}
EXPORT_SYMBOL_GPL(xdp_set_features_flag);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)