Revert "bpf: Defer the free of inner map when necessary"

This reverts commit 62fca83303 which is
commit 876673364161da50eed6b472d746ef88242b2368 upstream.

It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.

Bug: 161946584
Change-Id: I11fc14db5e679ca1c8ff97dcd96cb2a8fd35122a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-02-02 22:52:03 +00:00
parent c16bb76a0c
commit 329c931b9d
3 changed files with 9 additions and 41 deletions

View File

@ -241,11 +241,7 @@ struct bpf_map {
*/
atomic64_t refcnt ____cacheline_aligned;
atomic64_t usercnt;
/* rcu is used before freeing and work is only used during freeing */
union {
struct work_struct work;
struct rcu_head rcu;
};
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
@ -261,7 +257,6 @@ struct bpf_map {
} owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
bool free_after_mult_rcu_gp;
s64 __percpu *elem_count;
};

View File

@ -117,15 +117,10 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
struct bpf_map *inner_map = ptr;
/* The inner map may still be used by both non-sleepable and sleepable
* bpf program, so free it after one RCU grace period and one tasks
* trace RCU grace period.
/* ptr->ops->map_free() has to go through one
* rcu grace period by itself.
*/
if (need_defer)
WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
bpf_map_put(inner_map);
bpf_map_put(ptr);
}
u32 bpf_map_fd_sys_lookup_elem(void *ptr)

View File

@ -630,28 +630,6 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
}
static void bpf_map_free_in_work(struct bpf_map *map)
{
INIT_WORK(&map->work, bpf_map_free_deferred);
/* Avoid spawning kworkers, since they all might contend
* for the same mutex like slab_mutex.
*/
queue_work(system_unbound_wq, &map->work);
}
static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
{
bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
}
static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
bpf_map_free_rcu_gp(rcu);
else
call_rcu(rcu, bpf_map_free_rcu_gp);
}
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
@ -661,11 +639,11 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map, do_idr_lock);
btf_put(map->btf);
if (READ_ONCE(map->free_after_mult_rcu_gp))
call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
else
bpf_map_free_in_work(map);
INIT_WORK(&map->work, bpf_map_free_deferred);
/* Avoid spawning kworkers, since they all might contend
* for the same mutex like slab_mutex.
*/
queue_work(system_unbound_wq, &map->work);
}
}