netdev: depend on netdev->lock for qstats in ops locked drivers

We mostly needed rtnl_lock in qstat to make sure the queue count
is stable while we work. For "ops locked" drivers the instance
lock protects the queue count, so we don't have to take rtnl_lock.

For currently ops-locked drivers: netdevsim and bnxt need
the protection from netdev going down while we dump, which
instance lock provides. gve doesn't care.

Reviewed-by: Joe Damato <jdamato@fastly.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250408195956.412733-9-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-04-08 12:59:55 -07:00
parent 87eba404f2
commit ce7b149474
3 changed files with 26 additions and 13 deletions

View File

@@ -356,6 +356,12 @@ Similarly to ``ndos`` the instance lock is only held for select drivers.
For "ops locked" drivers all ethtool ops without exceptions should
be called under the instance lock.
struct netdev_stat_ops
----------------------
"qstat" ops are invoked under the instance lock for "ops locked" drivers,
and under rtnl_lock for all other drivers.
struct net_shaper_ops
---------------------

View File

@@ -85,9 +85,11 @@ struct netdev_queue_stats_tx {
* for some of the events is not maintained, and reliable "total" cannot
* be provided).
*
* Ops are called under the instance lock if netdev_need_ops_lock()
* returns true, otherwise under rtnl_lock.
* Device drivers can assume that when collecting total device stats,
* the @get_base_stats and subsequent per-queue calls are performed
* "atomically" (without releasing the rtnl_lock).
* "atomically" (without releasing the relevant lock).
*
* Device drivers are encouraged to reset the per-queue statistics when
* number of queues change. This is because the primary use case for

View File

@@ -795,26 +795,31 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
rtnl_lock();
if (ifindex) {
netdev = __dev_get_by_index(net, ifindex);
if (netdev && netdev->stat_ops) {
netdev = netdev_get_by_index_lock_ops_compat(net, ifindex);
if (!netdev) {
NL_SET_BAD_ATTR(info->extack,
info->attrs[NETDEV_A_QSTATS_IFINDEX]);
return -ENODEV;
}
if (netdev->stat_ops) {
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
info, ctx);
} else {
NL_SET_BAD_ATTR(info->extack,
info->attrs[NETDEV_A_QSTATS_IFINDEX]);
err = netdev ? -EOPNOTSUPP : -ENODEV;
}
} else {
for_each_netdev_dump(net, netdev, ctx->ifindex) {
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
info, ctx);
if (err < 0)
break;
err = -EOPNOTSUPP;
}
netdev_unlock_ops_compat(netdev);
return err;
}
for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) {
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
info, ctx);
if (err < 0)
break;
}
rtnl_unlock();
return err;
}