Revert "bpf, sockmap: Convert schedule_work into delayed_work"

This reverts commit 9f4d7efb33.

It breaks the Android KABI and will be brought back at a later time when
it is safe to do so.

Bug: 161946584
Change-Id: Ic3e8a533b0958aea3b2f58af8aa8292377e78ace
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-14 16:16:09 +00:00
parent d7c3711e7d
commit 8e369c7704
3 changed files with 9 additions and 17 deletions

View File

@ -105,7 +105,7 @@ struct sk_psock {
struct proto *sk_proto;
struct mutex work_mutex;
struct sk_psock_work_state work_state;
struct delayed_work work;
struct work_struct work;
struct rcu_work rwork;
};

View File

@ -481,7 +481,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
}
out:
if (psock->work_state.skb && copied > 0)
schedule_delayed_work(&psock->work, 0);
schedule_work(&psock->work);
return copied;
}
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
@ -639,8 +639,7 @@ static void sk_psock_skb_state(struct sk_psock *psock,
static void sk_psock_backlog(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
struct sk_psock *psock = container_of(work, struct sk_psock, work);
struct sk_psock_work_state *state = &psock->work_state;
struct sk_buff *skb = NULL;
bool ingress;
@ -680,12 +679,6 @@ static void sk_psock_backlog(struct work_struct *work)
if (ret == -EAGAIN) {
sk_psock_skb_state(psock, state, skb,
len, off);
/* Delay slightly to prioritize any
* other work that might be here.
*/
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
schedule_delayed_work(&psock->work, 1);
goto end;
}
/* Hard errors break pipe and stop xmit. */
@ -740,7 +733,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
INIT_LIST_HEAD(&psock->link);
spin_lock_init(&psock->link_lock);
INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
INIT_WORK(&psock->work, sk_psock_backlog);
mutex_init(&psock->work_mutex);
INIT_LIST_HEAD(&psock->ingress_msg);
spin_lock_init(&psock->ingress_lock);
@ -829,7 +822,7 @@ static void sk_psock_destroy(struct work_struct *work)
sk_psock_done_strp(psock);
cancel_delayed_work_sync(&psock->work);
cancel_work_sync(&psock->work);
mutex_destroy(&psock->work_mutex);
psock_progs_drop(&psock->progs);
@ -944,7 +937,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
}
skb_queue_tail(&psock_other->ingress_skb, skb);
schedule_delayed_work(&psock_other->work, 0);
schedule_work(&psock_other->work);
spin_unlock_bh(&psock_other->ingress_lock);
return 0;
}
@ -1024,7 +1017,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
skb_queue_tail(&psock->ingress_skb, skb);
schedule_delayed_work(&psock->work, 0);
schedule_work(&psock->work);
err = 0;
}
spin_unlock_bh(&psock->ingress_lock);
@ -1055,7 +1048,7 @@ static void sk_psock_write_space(struct sock *sk)
psock = sk_psock(sk);
if (likely(psock)) {
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
schedule_delayed_work(&psock->work, 0);
schedule_work(&psock->work);
write_space = psock->saved_write_space;
}
rcu_read_unlock();

View File

@ -1624,10 +1624,9 @@ void sock_map_close(struct sock *sk, long timeout)
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_delayed_work_sync(&psock->work);
cancel_work_sync(&psock->work);
sk_psock_put(sk, psock);
}
/* Make sure we do not recurse. This is a bug.
* Leak the socket instead of crashing on a stack overflow.
*/