Revert "mm/sparsemem: fix race in accessing memory_section->usage"
This reverts commit 68ed9e3332
which is
commit 5ec8e8ea8b7783fab150cf86404fc38cb4db8800 upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: Id78d131f9d910aa331832fb9b7cda4088e37d5f2
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
2dbddbe358
commit
c801066eca
@ -1747,7 +1747,6 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
|
||||
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
|
||||
|
||||
struct mem_section_usage {
|
||||
struct rcu_head rcu;
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
|
||||
#endif
|
||||
@ -1941,7 +1940,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
|
||||
{
|
||||
int idx = subsection_map_index(pfn);
|
||||
|
||||
return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
|
||||
return test_bit(idx, ms->usage->subsection_map);
|
||||
}
|
||||
#else
|
||||
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
|
||||
@ -1965,7 +1964,6 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
struct mem_section *ms;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||
@ -1979,19 +1977,13 @@ static inline int pfn_valid(unsigned long pfn)
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
ms = __pfn_to_section(pfn);
|
||||
rcu_read_lock();
|
||||
if (!valid_section(ms)) {
|
||||
rcu_read_unlock();
|
||||
if (!valid_section(ms))
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Traditionally early sections always returned pfn_valid() for
|
||||
* the entire section-sized span.
|
||||
*/
|
||||
ret = early_section(ms) || pfn_section_valid(ms, pfn);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return early_section(ms) || pfn_section_valid(ms, pfn);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
17
mm/sparse.c
17
mm/sparse.c
@ -792,13 +792,6 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
|
||||
if (empty) {
|
||||
unsigned long section_nr = pfn_to_section_nr(pfn);
|
||||
|
||||
/*
|
||||
* Mark the section invalid so that valid_section()
|
||||
* return false. This prevents code from dereferencing
|
||||
* ms->usage array.
|
||||
*/
|
||||
ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
|
||||
|
||||
/*
|
||||
* When removing an early section, the usage map is kept (as the
|
||||
* usage maps of other sections fall into the same page). It
|
||||
@ -807,10 +800,16 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
|
||||
* was allocated during boot.
|
||||
*/
|
||||
if (!PageReserved(virt_to_page(ms->usage))) {
|
||||
kfree_rcu(ms->usage, rcu);
|
||||
WRITE_ONCE(ms->usage, NULL);
|
||||
kfree(ms->usage);
|
||||
ms->usage = NULL;
|
||||
}
|
||||
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
|
||||
/*
|
||||
* Mark the section invalid so that valid_section()
|
||||
* return false. This prevents code from dereferencing
|
||||
* ms->usage array.
|
||||
*/
|
||||
ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user