page_pool: fix inconsistency for page_pool_ring_[un]lock()
[ Upstream commit 368d3cb406cdd074d1df2ad9ec06d1bfcb664882 ]
page_pool_ring_[un]lock() use in_softirq() to decide which
spin lock variant to use, and when they are called in the
context with in_softirq() being false, spin_lock_bh() is
called in page_pool_ring_lock() while spin_unlock() is
called in page_pool_ring_unlock(), because spin_lock_bh()
has disabled the softirq in page_pool_ring_lock(), which
causes inconsistency for spin lock pair calling.
This patch fixes it by returning in_softirq state from
page_pool_producer_lock(), and use it to decide which
spin lock variant to use in page_pool_producer_unlock().
As pool->ring has both producer and consumer lock, so
rename it to page_pool_producer_[un]lock() to reflect
the actual usage. Also move them to page_pool.c as they
are only used there, and remove the 'inline' as the
compiler may have better idea to do inlining or not.
Fixes: 7886244736
("net: page_pool: Add bulk support for ptr_ring")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://lore.kernel.org/r/20230522031714.5089-1-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
7dccd5fa7e
commit
7c95f56995
@ -383,22 +383,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
|
|||||||
page_pool_update_nid(pool, new_nid);
|
page_pool_update_nid(pool, new_nid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void page_pool_ring_lock(struct page_pool *pool)
|
|
||||||
__acquires(&pool->ring.producer_lock)
|
|
||||||
{
|
|
||||||
if (in_softirq())
|
|
||||||
spin_lock(&pool->ring.producer_lock);
|
|
||||||
else
|
|
||||||
spin_lock_bh(&pool->ring.producer_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void page_pool_ring_unlock(struct page_pool *pool)
|
|
||||||
__releases(&pool->ring.producer_lock)
|
|
||||||
{
|
|
||||||
if (in_softirq())
|
|
||||||
spin_unlock(&pool->ring.producer_lock);
|
|
||||||
else
|
|
||||||
spin_unlock_bh(&pool->ring.producer_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _NET_PAGE_POOL_H */
|
#endif /* _NET_PAGE_POOL_H */
|
||||||
|
@ -133,6 +133,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
|
|||||||
#define recycle_stat_add(pool, __stat, val)
|
#define recycle_stat_add(pool, __stat, val)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static bool page_pool_producer_lock(struct page_pool *pool)
|
||||||
|
__acquires(&pool->ring.producer_lock)
|
||||||
|
{
|
||||||
|
bool in_softirq = in_softirq();
|
||||||
|
|
||||||
|
if (in_softirq)
|
||||||
|
spin_lock(&pool->ring.producer_lock);
|
||||||
|
else
|
||||||
|
spin_lock_bh(&pool->ring.producer_lock);
|
||||||
|
|
||||||
|
return in_softirq;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void page_pool_producer_unlock(struct page_pool *pool,
|
||||||
|
bool in_softirq)
|
||||||
|
__releases(&pool->ring.producer_lock)
|
||||||
|
{
|
||||||
|
if (in_softirq)
|
||||||
|
spin_unlock(&pool->ring.producer_lock);
|
||||||
|
else
|
||||||
|
spin_unlock_bh(&pool->ring.producer_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int page_pool_init(struct page_pool *pool,
|
static int page_pool_init(struct page_pool *pool,
|
||||||
const struct page_pool_params *params)
|
const struct page_pool_params *params)
|
||||||
{
|
{
|
||||||
@ -615,6 +638,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
|
|||||||
int count)
|
int count)
|
||||||
{
|
{
|
||||||
int i, bulk_len = 0;
|
int i, bulk_len = 0;
|
||||||
|
bool in_softirq;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
struct page *page = virt_to_head_page(data[i]);
|
struct page *page = virt_to_head_page(data[i]);
|
||||||
@ -633,7 +657,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* Bulk producer into ptr_ring page_pool cache */
|
/* Bulk producer into ptr_ring page_pool cache */
|
||||||
page_pool_ring_lock(pool);
|
in_softirq = page_pool_producer_lock(pool);
|
||||||
for (i = 0; i < bulk_len; i++) {
|
for (i = 0; i < bulk_len; i++) {
|
||||||
if (__ptr_ring_produce(&pool->ring, data[i])) {
|
if (__ptr_ring_produce(&pool->ring, data[i])) {
|
||||||
/* ring full */
|
/* ring full */
|
||||||
@ -642,7 +666,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
recycle_stat_add(pool, ring, i);
|
recycle_stat_add(pool, ring, i);
|
||||||
page_pool_ring_unlock(pool);
|
page_pool_producer_unlock(pool, in_softirq);
|
||||||
|
|
||||||
/* Hopefully all pages was return into ptr_ring */
|
/* Hopefully all pages was return into ptr_ring */
|
||||||
if (likely(i == bulk_len))
|
if (likely(i == bulk_len))
|
||||||
|
Loading…
Reference in New Issue
Block a user