x86, pat: fix warn_on_once() while mapping 0-1MB range with /dev/mem
Jeff Mahoney reported: > With Suse's hwinfo tool, on -tip: > WARNING: at arch/x86/mm/pat.c:637 reserve_pfn_range+0x5b/0x26d() reserve_pfn_range() is not tracking the memory range below 1MB as non-RAM and as such is inconsistent with similar checks in reserve_memtype() and free_memtype() Rename the pagerange_is_ram() to pat_pagerange_is_ram() and add the "track legacy 1MB region as non RAM" condition. And also, fix reserve_pfn_range() to return -EINVAL, when the pfn range is RAM. This is to be consistent with this API design. Reported-and-tested-by: Jeff Mahoney <jeffm@suse.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4f06b0436b
commit
be03d9e802
@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
|
|||||||
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
||||||
|
|
||||||
extern int page_is_ram(unsigned long pagenr);
|
extern int page_is_ram(unsigned long pagenr);
|
||||||
extern int pagerange_is_ram(unsigned long start, unsigned long end);
|
|
||||||
extern int devmem_is_allowed(unsigned long pagenr);
|
extern int devmem_is_allowed(unsigned long pagenr);
|
||||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||||
pgprot_t vma_prot);
|
pgprot_t vma_prot);
|
||||||
|
@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int pagerange_is_ram(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
int ram_page = 0, not_rampage = 0;
|
|
||||||
unsigned long page_nr;
|
|
||||||
|
|
||||||
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
|
|
||||||
++page_nr) {
|
|
||||||
if (page_is_ram(page_nr))
|
|
||||||
ram_page = 1;
|
|
||||||
else
|
|
||||||
not_rampage = 1;
|
|
||||||
|
|
||||||
if (ram_page == not_rampage)
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ram_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
||||||
* conflicts.
|
* conflicts.
|
||||||
|
@ -211,6 +211,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
|
|||||||
static struct memtype *cached_entry;
|
static struct memtype *cached_entry;
|
||||||
static u64 cached_start;
|
static u64 cached_start;
|
||||||
|
|
||||||
|
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
int ram_page = 0, not_rampage = 0;
|
||||||
|
unsigned long page_nr;
|
||||||
|
|
||||||
|
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
|
||||||
|
++page_nr) {
|
||||||
|
/*
|
||||||
|
* For legacy reasons, physical address range in the legacy ISA
|
||||||
|
* region is tracked as non-RAM. This will allow users of
|
||||||
|
* /dev/mem to map portions of legacy ISA region, even when
|
||||||
|
* some of those portions are listed(or not even listed) with
|
||||||
|
* different e820 types(RAM/reserved/..)
|
||||||
|
*/
|
||||||
|
if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
|
||||||
|
page_is_ram(page_nr))
|
||||||
|
ram_page = 1;
|
||||||
|
else
|
||||||
|
not_rampage = 1;
|
||||||
|
|
||||||
|
if (ram_page == not_rampage)
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ram_page;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For RAM pages, mark the pages as non WB memory type using
|
* For RAM pages, mark the pages as non WB memory type using
|
||||||
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
|
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
|
||||||
@ -336,20 +363,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
|||||||
if (new_type)
|
if (new_type)
|
||||||
*new_type = actual_type;
|
*new_type = actual_type;
|
||||||
|
|
||||||
/*
|
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||||
* For legacy reasons, some parts of the physical address range in the
|
if (is_range_ram == 1)
|
||||||
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
|
return reserve_ram_pages_type(start, end, req_type,
|
||||||
* the e820 tables). So we will track the memory attributes of this
|
new_type);
|
||||||
* legacy 1MB region using the linear memtype_list always.
|
else if (is_range_ram < 0)
|
||||||
*/
|
return -EINVAL;
|
||||||
if (end >= ISA_END_ADDRESS) {
|
|
||||||
is_range_ram = pagerange_is_ram(start, end);
|
|
||||||
if (is_range_ram == 1)
|
|
||||||
return reserve_ram_pages_type(start, end, req_type,
|
|
||||||
new_type);
|
|
||||||
else if (is_range_ram < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
|
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
|
||||||
if (!new)
|
if (!new)
|
||||||
@ -446,19 +465,11 @@ int free_memtype(u64 start, u64 end)
|
|||||||
if (is_ISA_range(start, end - 1))
|
if (is_ISA_range(start, end - 1))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||||
* For legacy reasons, some parts of the physical address range in the
|
if (is_range_ram == 1)
|
||||||
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
|
return free_ram_pages_type(start, end);
|
||||||
* the e820 tables). So we will track the memory attributes of this
|
else if (is_range_ram < 0)
|
||||||
* legacy 1MB region using the linear memtype_list always.
|
return -EINVAL;
|
||||||
*/
|
|
||||||
if (end >= ISA_END_ADDRESS) {
|
|
||||||
is_range_ram = pagerange_is_ram(start, end);
|
|
||||||
if (is_range_ram == 1)
|
|
||||||
return free_ram_pages_type(start, end);
|
|
||||||
else if (is_range_ram < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&memtype_lock);
|
spin_lock(&memtype_lock);
|
||||||
list_for_each_entry(entry, &memtype_list, nd) {
|
list_for_each_entry(entry, &memtype_list, nd) {
|
||||||
@ -626,17 +637,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
||||||
|
|
||||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
||||||
|
|
||||||
if (is_ram != 0) {
|
/*
|
||||||
/*
|
* reserve_pfn_range() doesn't support RAM pages.
|
||||||
* For mapping RAM pages, drivers need to call
|
*/
|
||||||
* set_memory_[uc|wc|wb] directly, for reserve and free, before
|
if (is_ram != 0)
|
||||||
* setting up the PTE.
|
return -EINVAL;
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -693,7 +700,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
|||||||
{
|
{
|
||||||
int is_ram;
|
int is_ram;
|
||||||
|
|
||||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
||||||
if (is_ram == 0)
|
if (is_ram == 0)
|
||||||
free_memtype(paddr, paddr + size);
|
free_memtype(paddr, paddr + size);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user