mirror of
https://github.com/torvalds/linux.git
synced 2025-08-16 06:31:34 +02:00
tcp: stronger sk_rcvbuf checks
Currently, TCP stack accepts incoming packet if sizes of receive queues are below sk->sk_rcvbuf limit. This can cause memory overshoot if the packet is big, like an 1/2 MB BIG TCP one. Refine the check to take into account the incoming skb truesize. Note that we still accept the packet if the receive queue is empty, to not completely freeze TCP flows in pathological conditions. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250711114006.480026-8-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
75dff0584c
commit
1d2fbaad7c
1 changed files with 16 additions and 6 deletions
|
@ -4888,10 +4888,20 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||||
static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
|
static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
|
||||||
static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
|
static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
|
||||||
|
|
||||||
|
/* Check if this incoming skb can be added to socket receive queues
|
||||||
|
* while satisfying sk->sk_rcvbuf limit.
|
||||||
|
*/
|
||||||
|
static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
unsigned int new_mem = atomic_read(&sk->sk_rmem_alloc) + skb->truesize;
|
||||||
|
|
||||||
|
return new_mem <= sk->sk_rcvbuf;
|
||||||
|
}
|
||||||
|
|
||||||
static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
|
static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
|
||||||
unsigned int size)
|
unsigned int size)
|
||||||
{
|
{
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
if (!tcp_can_ingest(sk, skb) ||
|
||||||
!sk_rmem_schedule(sk, skb, size)) {
|
!sk_rmem_schedule(sk, skb, size)) {
|
||||||
|
|
||||||
if (tcp_prune_queue(sk, skb) < 0)
|
if (tcp_prune_queue(sk, skb) < 0)
|
||||||
|
@ -5507,7 +5517,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
|
||||||
tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
|
tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
|
||||||
tp->ooo_last_skb = rb_to_skb(prev);
|
tp->ooo_last_skb = rb_to_skb(prev);
|
||||||
if (!prev || goal <= 0) {
|
if (!prev || goal <= 0) {
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
if (tcp_can_ingest(sk, skb) &&
|
||||||
!tcp_under_memory_pressure(sk))
|
!tcp_under_memory_pressure(sk))
|
||||||
break;
|
break;
|
||||||
goal = sk->sk_rcvbuf >> 3;
|
goal = sk->sk_rcvbuf >> 3;
|
||||||
|
@ -5541,12 +5551,12 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
|
||||||
|
|
||||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
if (!tcp_can_ingest(sk, in_skb))
|
||||||
tcp_clamp_window(sk);
|
tcp_clamp_window(sk);
|
||||||
else if (tcp_under_memory_pressure(sk))
|
else if (tcp_under_memory_pressure(sk))
|
||||||
tcp_adjust_rcv_ssthresh(sk);
|
tcp_adjust_rcv_ssthresh(sk);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
if (tcp_can_ingest(sk, in_skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tcp_collapse_ofo_queue(sk);
|
tcp_collapse_ofo_queue(sk);
|
||||||
|
@ -5556,7 +5566,7 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
|
||||||
NULL,
|
NULL,
|
||||||
tp->copied_seq, tp->rcv_nxt);
|
tp->copied_seq, tp->rcv_nxt);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
if (tcp_can_ingest(sk, in_skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Collapsing did not help, destructive actions follow.
|
/* Collapsing did not help, destructive actions follow.
|
||||||
|
@ -5564,7 +5574,7 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
|
||||||
|
|
||||||
tcp_prune_ofo_queue(sk, in_skb);
|
tcp_prune_ofo_queue(sk, in_skb);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
if (tcp_can_ingest(sk, in_skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* If we are really being abused, tell the caller to silently
|
/* If we are really being abused, tell the caller to silently
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue