Revert "bpf, sockmap: Improved check for empty queue"

This reverts commit ba4fec5bd6.

It breaks the Android KABI and will be brought back at a later time when
it is safe to do so.

Bug: 161946584
Change-Id: I95c2e59a4854c4e178d11b890e8401bc3cb64b68
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-14 16:16:04 +00:00
parent 51ffabff7c
commit 4903ee3f95
2 changed files with 25 additions and 8 deletions

View File

@ -71,6 +71,7 @@ struct sk_psock_link {
};
struct sk_psock_work_state {
struct sk_buff *skb;
u32 len;
u32 off;
};

View File

@ -621,12 +621,16 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
static void sk_psock_skb_state(struct sk_psock *psock,
struct sk_psock_work_state *state,
struct sk_buff *skb,
int len, int off)
{
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
state->skb = skb;
state->len = len;
state->off = off;
} else {
sock_drop(psock->sk, skb);
}
spin_unlock_bh(&psock->ingress_lock);
}
@ -637,17 +641,23 @@ static void sk_psock_backlog(struct work_struct *work)
struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
struct sk_psock_work_state *state = &psock->work_state;
struct sk_buff *skb = NULL;
u32 len = 0, off = 0;
bool ingress;
u32 len, off;
int ret;
mutex_lock(&psock->work_mutex);
if (unlikely(state->len)) {
if (unlikely(state->skb)) {
spin_lock_bh(&psock->ingress_lock);
skb = state->skb;
len = state->len;
off = state->off;
state->skb = NULL;
spin_unlock_bh(&psock->ingress_lock);
}
if (skb)
goto start;
while ((skb = skb_peek(&psock->ingress_skb))) {
while ((skb = skb_dequeue(&psock->ingress_skb))) {
len = skb->len;
off = 0;
if (skb_bpf_strparser(skb)) {
@ -656,6 +666,7 @@ static void sk_psock_backlog(struct work_struct *work)
off = stm->offset;
len = stm->full_len;
}
start:
ingress = skb_bpf_ingress(skb);
skb_bpf_redirect_clear(skb);
do {
@ -665,7 +676,8 @@ static void sk_psock_backlog(struct work_struct *work)
len, ingress);
if (ret <= 0) {
if (ret == -EAGAIN) {
sk_psock_skb_state(psock, state, len, off);
sk_psock_skb_state(psock, state, skb,
len, off);
/* Delay slightly to prioritize any
* other work that might be here.
@ -677,17 +689,16 @@ static void sk_psock_backlog(struct work_struct *work)
/* Hard errors break pipe and stop xmit. */
sk_psock_report_error(psock, ret ? -ret : EPIPE);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
sock_drop(psock->sk, skb);
goto end;
}
off += ret;
len -= ret;
} while (len);
skb = skb_dequeue(&psock->ingress_skb);
if (!ingress) {
if (!ingress)
kfree_skb(skb);
}
}
end:
mutex_unlock(&psock->work_mutex);
}
@ -779,6 +790,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
skb_bpf_redirect_clear(skb);
sock_drop(psock->sk, skb);
}
kfree_skb(psock->work_state.skb);
/* We null the skb here to ensure that calls to sk_psock_backlog
* do not pick up the free'd skb.
*/
psock->work_state.skb = NULL;
__sk_psock_purge_ingress_msg(psock);
}
@ -797,6 +813,7 @@ void sk_psock_stop(struct sk_psock *psock)
spin_lock_bh(&psock->ingress_lock);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
sk_psock_cork_free(psock);
__sk_psock_zap_ingress(psock);
spin_unlock_bh(&psock->ingress_lock);
}
@ -811,7 +828,6 @@ static void sk_psock_destroy(struct work_struct *work)
sk_psock_done_strp(psock);
cancel_delayed_work_sync(&psock->work);
__sk_psock_zap_ingress(psock);
mutex_destroy(&psock->work_mutex);
psock_progs_drop(&psock->progs);