dma-mapping: always use VM_DMA_COHERENT for generic DMA remap
Currently the generic dma remap allocator gets a vm_flags passed by the caller that is a little confusing. We just introduced a generic vmalloc-level flag to identify the dma coherent allocations, so use that everywhere and remove the now pointless argument. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
fe9041c245
commit
512317401f
@ -343,13 +343,12 @@ static void *
|
||||
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
||||
const void *caller)
|
||||
{
|
||||
return dma_common_contiguous_remap(page, size, VM_DMA_COHERENT,
|
||||
prot, caller);
|
||||
return dma_common_contiguous_remap(page, size, prot, caller);
|
||||
}
|
||||
|
||||
static void __dma_free_remap(void *cpu_addr, size_t size)
|
||||
{
|
||||
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT);
|
||||
dma_common_free_remap(cpu_addr, size);
|
||||
}
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
@ -1365,8 +1364,7 @@ static void *
|
||||
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
||||
const void *caller)
|
||||
{
|
||||
return dma_common_pages_remap(pages, size, VM_DMA_COHERENT, prot,
|
||||
caller);
|
||||
return dma_common_pages_remap(pages, size, prot, caller);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1609,7 +1607,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
}
|
||||
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
|
||||
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT);
|
||||
dma_common_free_remap(cpu_addr, size);
|
||||
|
||||
__iommu_remove_mapping(dev, handle, size);
|
||||
__iommu_free_buffer(dev, pages, size, attrs);
|
||||
|
@ -167,7 +167,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
if (PageHighMem(page)) {
|
||||
void *p;
|
||||
|
||||
p = dma_common_contiguous_remap(page, size, VM_MAP,
|
||||
p = dma_common_contiguous_remap(page, size,
|
||||
pgprot_noncached(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!p) {
|
||||
@ -192,7 +192,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
page = virt_to_page(platform_vaddr_to_cached(vaddr));
|
||||
} else {
|
||||
#ifdef CONFIG_MMU
|
||||
dma_common_free_remap(vaddr, size, VM_MAP);
|
||||
dma_common_free_remap(vaddr, size);
|
||||
#endif
|
||||
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
|
||||
}
|
||||
|
@ -617,7 +617,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
< size)
|
||||
goto out_free_sg;
|
||||
|
||||
vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
vaddr = dma_common_pages_remap(pages, size, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!vaddr)
|
||||
goto out_unmap;
|
||||
@ -941,7 +941,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
||||
pages = __iommu_dma_get_pages(cpu_addr);
|
||||
if (!pages)
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
|
||||
dma_common_free_remap(cpu_addr, alloc_size);
|
||||
} else {
|
||||
/* Lowmem means a coherent atomic or CMA allocation */
|
||||
page = virt_to_page(cpu_addr);
|
||||
@ -979,7 +979,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
||||
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
|
||||
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
||||
VM_USERMAP, prot, __builtin_return_address(0));
|
||||
prot, __builtin_return_address(0));
|
||||
if (!cpu_addr)
|
||||
goto out_free_pages;
|
||||
|
||||
|
@ -627,13 +627,11 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
unsigned long attrs);
|
||||
|
||||
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
unsigned long vm_flags,
|
||||
pgprot_t prot, const void *caller);
|
||||
|
||||
void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
unsigned long vm_flags, pgprot_t prot,
|
||||
const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
|
||||
pgprot_t prot, const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||||
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
|
@ -12,12 +12,11 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
size_t size, unsigned long vm_flags, pgprot_t prot,
|
||||
const void *caller)
|
||||
size_t size, pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = get_vm_area_caller(size, vm_flags, caller);
|
||||
area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
@ -34,12 +33,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
* Cannot be used in non-sleeping contexts
|
||||
*/
|
||||
void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
unsigned long vm_flags, pgprot_t prot,
|
||||
const void *caller)
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
@ -53,7 +51,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
* Cannot be used in non-sleeping contexts
|
||||
*/
|
||||
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
unsigned long vm_flags,
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
int i;
|
||||
@ -67,7 +64,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
||||
pages[i] = nth_page(page, i);
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
|
||||
kfree(pages);
|
||||
|
||||
@ -79,11 +76,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
/*
|
||||
* Unmaps a range previously mapped by dma_common_*_remap
|
||||
*/
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size)
|
||||
{
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!area || (area->flags & vm_flags) != vm_flags) {
|
||||
if (!area || area->flags != VM_DMA_COHERENT) {
|
||||
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
||||
return;
|
||||
}
|
||||
@ -136,7 +133,7 @@ static int __init dma_atomic_pool_init(void)
|
||||
if (!atomic_pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
pgprot_dmacoherent(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
@ -153,7 +150,7 @@ static int __init dma_atomic_pool_init(void)
|
||||
return 0;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
|
||||
dma_common_free_remap(addr, atomic_pool_size);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
@ -228,7 +225,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
arch_dma_prep_coherent(page, size);
|
||||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
ret = dma_common_contiguous_remap(page, size,
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
|
Loading…
Reference in New Issue
Block a user