net: remove sock_i_uid()

Difference between sock_i_uid() and sk_uid() is that
after sock_orphan(), sock_i_uid() returns GLOBAL_ROOT_UID
while sk_uid() returns the last cached sk->sk_uid value.

None of sock_i_uid() callers care about this.

Use sk_uid() which is much faster and inlined.

Note that diag/dump users are calling sock_i_ino() and
can not see the full benefit yet.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Lorenzo Colitti <lorenzo@google.com>
Reviewed-by: Maciej Żenczykowski <maze@google.com>
Link: https://patch.msgid.link/20250620133001.4090592-3-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2025-06-20 13:30:01 +00:00 committed by Jakub Kicinski
parent e84a4927a4
commit c51da3f7a1
26 changed files with 50 additions and 66 deletions

View file

@ -2092,8 +2092,6 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
kuid_t sock_i_uid(struct sock *sk);
static inline kuid_t sk_uid(const struct sock *sk)
{
/* Paired with WRITE_ONCE() in sockfs_setattr() */

View file

@ -181,7 +181,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
from_kuid_munged(seq_user_ns(seq), sk_uid(s)));
out:
return 0;
}

View file

@ -815,7 +815,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
refcount_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
bt->parent ? sock_i_ino(bt->parent) : 0LU);

View file

@ -2780,17 +2780,6 @@ void sock_pfree(struct sk_buff *skb)
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
kuid_t sock_i_uid(struct sock *sk)
{
kuid_t uid;
read_lock_bh(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
read_unlock_bh(&sk->sk_callback_lock);
return uid;
}
EXPORT_SYMBOL(sock_i_uid);
unsigned long __sock_i_ino(struct sock *sk)
{
unsigned long ino;

View file

@ -168,7 +168,7 @@ static bool inet_use_bhash2_on_bind(const struct sock *sk)
}
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
int bound_dev_if2;
@ -185,12 +185,12 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
if (!relax || (!reuseport_ok && sk->sk_reuseport &&
sk2->sk_reuseport && reuseport_cb_ok &&
(sk2->sk_state == TCP_TIME_WAIT ||
uid_eq(sk_uid, sock_i_uid(sk2)))))
uid_eq(uid, sk_uid(sk2)))))
return true;
} else if (!reuseport_ok || !sk->sk_reuseport ||
!sk2->sk_reuseport || !reuseport_cb_ok ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(sk_uid, sock_i_uid(sk2)))) {
!uid_eq(uid, sk_uid(sk2)))) {
return true;
}
}
@ -198,7 +198,7 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
}
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
kuid_t sk_uid, bool relax,
kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
if (ipv6_only_sock(sk2)) {
@ -211,20 +211,20 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
#endif
}
return inet_bind_conflict(sk, sk2, sk_uid, relax,
return inet_bind_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok);
}
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
kuid_t sk_uid,
kuid_t uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{
struct sock *sk2;
sk_for_each_bound(sk2, &tb2->owners) {
if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
if (__inet_bhash2_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok))
return true;
}
@ -242,8 +242,8 @@ static int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2, /* may be null */
bool relax, bool reuseport_ok)
{
kuid_t uid = sock_i_uid((struct sock *)sk);
struct sock_reuseport *reuseport_cb;
kuid_t uid = sk_uid(sk);
bool reuseport_cb_ok;
struct sock *sk2;
@ -287,11 +287,11 @@ static int inet_csk_bind_conflict(const struct sock *sk,
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
bool relax, bool reuseport_ok)
{
kuid_t uid = sock_i_uid((struct sock *)sk);
const struct net *net = sock_net(sk);
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
kuid_t uid = sk_uid(sk);
bool conflict = false;
bool reuseport_cb_ok;
@ -425,15 +425,13 @@ success:
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
struct sock *sk)
{
kuid_t uid = sock_i_uid(sk);
if (tb->fastreuseport <= 0)
return 0;
if (!sk->sk_reuseport)
return 0;
if (rcu_access_pointer(sk->sk_reuseport_cb))
return 0;
if (!uid_eq(tb->fastuid, uid))
if (!uid_eq(tb->fastuid, sk_uid(sk)))
return 0;
/* We only need to check the rcv_saddr if this tb was once marked
* without fastreuseport and then was reset, as we can only know that
@ -458,14 +456,13 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk)
{
kuid_t uid = sock_i_uid(sk);
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
if (hlist_empty(&tb->bhash2)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
tb->fastreuseport = FASTREUSEPORT_ANY;
tb->fastuid = uid;
tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
@ -492,7 +489,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
*/
if (!sk_reuseport_match(tb, sk)) {
tb->fastreuseport = FASTREUSEPORT_STRICT;
tb->fastuid = uid;
tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;

View file

@ -181,7 +181,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
goto errout;
#endif
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->idiag_inode = sock_i_ino(sk);
memset(&inet_sockopt, 0, sizeof(inet_sockopt));

View file

@ -721,8 +721,8 @@ static int inet_reuseport_add_sock(struct sock *sk,
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
kuid_t uid = sk_uid(sk);
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
@ -730,7 +730,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false))
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));

View file

@ -1116,7 +1116,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));

View file

@ -1043,7 +1043,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}

View file

@ -2896,7 +2896,7 @@ static void get_openreq4(const struct request_sock *req,
jiffies_delta_to_clock_t(delta),
req->num_timeout,
from_kuid_munged(seq_user_ns(f),
sock_i_uid(req->rsk_listener)),
sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0,
@ -2954,7 +2954,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(f), sk_uid(sk)),
icsk->icsk_probes_out,
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
@ -3246,9 +3246,9 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
const struct request_sock *req = v;
uid = from_kuid_munged(seq_user_ns(seq),
sock_i_uid(req->rsk_listener));
sk_uid(req->rsk_listener));
} else {
uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
}
meta.seq = seq;

View file

@ -145,8 +145,8 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
unsigned long *bitmap,
struct sock *sk, unsigned int log)
{
kuid_t uid = sk_uid(sk);
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
@ -158,7 +158,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
uid_eq(uid, sk_uid(sk2))) {
if (!bitmap)
return 0;
} else {
@ -180,8 +180,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk)
{
kuid_t uid = sk_uid(sk);
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
@ -195,7 +195,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
uid_eq(uid, sk_uid(sk2))) {
res = 0;
} else {
res = 1;
@ -210,7 +210,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{
struct net *net = sock_net(sk);
kuid_t uid = sock_i_uid(sk);
kuid_t uid = sk_uid(sk);
struct sock *sk2;
sk_for_each(sk2, &hslot->head) {
@ -220,7 +220,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
(udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false)) {
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
@ -3387,7 +3387,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
udp_rqueue_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
@ -3630,7 +3630,7 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket);

View file

@ -1064,7 +1064,7 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
sk_wmem_alloc_get(sp),
rqueue,
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0,
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,

View file

@ -2168,7 +2168,7 @@ static void get_openreq6(struct seq_file *seq,
jiffies_to_clock_t(ttd),
req->num_timeout,
from_kuid_munged(seq_user_ns(seq),
sock_i_uid(req->rsk_listener)),
sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0, req);
@ -2234,7 +2234,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
icsk->icsk_probes_out,
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,

View file

@ -3788,7 +3788,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
refcount_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
from_kuid_munged(seq_user_ns(f), sk_uid(s)),
sock_i_ino(s)
);
return 0;

View file

@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
llc->link);
out:
return 0;

View file

@ -4783,7 +4783,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
READ_ONCE(po->ifindex),
packet_sock_flag(po, PACKET_SOCK_RUNNING),
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
from_kuid_munged(seq_user_ns(seq), sk_uid(s)),
sock_i_ino(s));
}

View file

@ -153,7 +153,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
nla_put_u32(skb, PACKET_DIAG_UID,
from_kuid_munged(user_ns, sock_i_uid(sk))))
from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&

View file

@ -584,7 +584,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
sk->sk_protocol, pn->sobject, pn->dobject,
pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
atomic_read(&sk->sk_drops));
@ -755,7 +755,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%02X %5u %lu",
(int) (psk - pnres.sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
}
seq_pad(seq, '\n');

View file

@ -756,7 +756,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk2 = ep2->base.sk;
if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
!uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
!uid_eq(sk_uid(sk2), sk_uid(sk)) ||
!sk2->sk_reuseport)
continue;

View file

@ -177,7 +177,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
ep->base.bind_addr.port,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
sctp_seq_dump_local_addrs(seq, &ep->base);
@ -267,7 +267,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->assoc_id,
assoc->sndbuf_used,
atomic_read(&assoc->rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
epb->bind_addr.port,
assoc->peer.port);

View file

@ -8345,8 +8345,8 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
bool reuse = (sk->sk_reuse || sp->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
struct net *net = sock_net(sk);
kuid_t uid = sock_i_uid(sk);
struct sctp_bind_bucket *pp;
kuid_t uid = sk_uid(sk);
unsigned short snum;
int ret;
@ -8444,7 +8444,7 @@ pp_found:
(reuse && (sk2->sk_reuse || sp2->reuse) &&
sk2->sk_state != SCTP_SS_LISTENING) ||
(sk->sk_reuseport && sk2->sk_reuseport &&
uid_eq(uid, sock_i_uid(sk2))))
uid_eq(uid, sk_uid(sk2))))
continue;
if ((!sk->sk_bound_dev_if || !bound_dev_if2 ||

View file

@ -64,7 +64,7 @@ static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
return 1;
r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->diag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->diag_inode = sock_i_ino(sk);
return 0;
}

View file

@ -3642,7 +3642,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
sock_i_uid(sk))) ||
sk_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
TIPC_NLA_SOCK_PAD))

View file

@ -3682,7 +3682,7 @@ static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = unix_prog_seq_show(prog, &meta, v, uid);

View file

@ -106,7 +106,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
struct user_namespace *user_ns)
{
uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
uid_t uid = from_kuid_munged(user_ns, sk_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}

View file

@ -119,7 +119,7 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
if ((req->xdiag_show & XDP_SHOW_INFO) &&
nla_put_u32(nlskb, XDP_DIAG_UID,
from_kuid_munged(user_ns, sock_i_uid(sk))))
from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&