diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 01dd76be1a58..73c13642d47f 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -212,26 +212,6 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); - - /* We may have received data on the sk_receive_queue pre-accept and - * then we can not use read_skb in this context because we haven't - * assigned a sk_socket yet so have no link to the ops. The work-around - * is to check the sk_receive_queue and in these cases read skbs off - * queue again. The read_skb hook is not running at this point because - * of lock_sock so we avoid having multiple runners in read_skb. - */ - if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { - tcp_data_ready(sk); - /* This handles the ENOMEM errors if we both receive data - * pre accept and are already under memory pressure. At least - * let user know to retry. - */ - if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { - copied = -EAGAIN; - goto out; - } - } - msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); /* The typical case for EFAULT is the socket was gracefully