ANDROID: iommu/io-pgtable: Allow IOMMU drivers to allocate/free page-table memory
The page-table memory is currently allocated and freed by the io-pgtable layer, and there is no intervention from the IOMMU driver during this process. However, in some environments, where the IOMMU driver may need to be aware of the pages that are used for its page-tables, it is useful to allow the IOMMU driver to manage the allocation and freeing of the page table memory. Thus, add support in the io-pgtable layer to allow IOMMU drivers to allocate and free page-table memory. Bug: 147086566 Change-Id: I7b38a72da1654273a4312e04c776b10612c57bdc Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
parent
b5fc23d902
commit
5ab49e4b85
@ -228,21 +228,18 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages)
|
||||
}
|
||||
|
||||
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
struct io_pgtable_cfg *cfg, void *cookie)
|
||||
{
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
int order = get_order(size);
|
||||
struct page *p;
|
||||
dma_addr_t dma;
|
||||
void *pages;
|
||||
|
||||
VM_BUG_ON((gfp & __GFP_HIGHMEM));
|
||||
p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
|
||||
gfp | __GFP_ZERO, order);
|
||||
if (!p)
|
||||
pages = io_pgtable_alloc_pages(cfg, cookie, order, gfp | __GFP_ZERO);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
pages = page_address(p);
|
||||
if (!cfg->coherent_walk) {
|
||||
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
@ -262,17 +259,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
|
||||
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
|
||||
out_free:
|
||||
__free_pages(p, order);
|
||||
io_pgtable_free_pages(cfg, cookie, pages, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __arm_lpae_free_pages(void *pages, size_t size,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
struct io_pgtable_cfg *cfg, void *cookie)
|
||||
{
|
||||
if (!cfg->coherent_walk)
|
||||
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
|
||||
size, DMA_TO_DEVICE);
|
||||
free_pages((unsigned long)pages, get_order(size));
|
||||
io_pgtable_free_pages(cfg, cookie, pages, get_order(size));
|
||||
}
|
||||
|
||||
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
|
||||
@ -387,6 +384,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
||||
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
|
||||
size_t tblsz = ARM_LPAE_GRANULE(data);
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
void *cookie = data->iop.cookie;
|
||||
|
||||
/* Find our entry at the current level */
|
||||
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
|
||||
@ -402,13 +400,13 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
||||
/* Grab a pointer to the next level */
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (!pte) {
|
||||
cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
|
||||
cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg, cookie);
|
||||
if (!cptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
|
||||
if (pte)
|
||||
__arm_lpae_free_pages(cptep, tblsz, cfg);
|
||||
__arm_lpae_free_pages(cptep, tblsz, cfg, cookie);
|
||||
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
|
||||
__arm_lpae_sync_pte(ptep, cfg);
|
||||
}
|
||||
@ -507,6 +505,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
|
||||
{
|
||||
arm_lpae_iopte *start, *end;
|
||||
unsigned long table_size;
|
||||
void *cookie = data->iop.cookie;
|
||||
|
||||
if (lvl == ARM_LPAE_START_LVL(data))
|
||||
table_size = data->pgd_size;
|
||||
@ -530,7 +529,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
|
||||
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
|
||||
}
|
||||
|
||||
__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
|
||||
__arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie);
|
||||
}
|
||||
|
||||
static void arm_lpae_free_pgtable(struct io_pgtable *iop)
|
||||
@ -553,11 +552,12 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
|
||||
size_t tablesz = ARM_LPAE_GRANULE(data);
|
||||
size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
|
||||
int i, unmap_idx = -1;
|
||||
void *cookie = data->iop.cookie;
|
||||
|
||||
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
|
||||
return 0;
|
||||
|
||||
tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
|
||||
tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, cookie);
|
||||
if (!tablep)
|
||||
return 0; /* Bytes unmapped */
|
||||
|
||||
@ -577,7 +577,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
|
||||
|
||||
pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
|
||||
if (pte != blk_pte) {
|
||||
__arm_lpae_free_pages(tablep, tablesz, cfg);
|
||||
__arm_lpae_free_pages(tablep, tablesz, cfg, cookie);
|
||||
/*
|
||||
* We may race against someone unmapping another part of this
|
||||
* block, but anything else is invalid. We can't misinterpret
|
||||
@ -868,7 +868,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
cfg->arm_lpae_s1_cfg.mair[1] = 0;
|
||||
|
||||
/* Looking good; allocate a pgd */
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
|
||||
cookie);
|
||||
if (!data->pgd)
|
||||
goto out_free_data;
|
||||
|
||||
@ -965,7 +966,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
cfg->arm_lpae_s2_cfg.vtcr = reg;
|
||||
|
||||
/* Allocate pgd pages */
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
|
||||
cookie);
|
||||
if (!data->pgd)
|
||||
goto out_free_data;
|
||||
|
||||
@ -1053,7 +1055,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
|
||||
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
|
||||
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
|
||||
cookie);
|
||||
if (!data->pgd)
|
||||
goto out_free_data;
|
||||
|
||||
|
@ -68,3 +68,36 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
|
||||
io_pgtable_init_table[iop->fmt]->free(iop);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_io_pgtable_ops);
|
||||
|
||||
void *io_pgtable_alloc_pages(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
int order, gfp_t gfp_mask)
|
||||
{
|
||||
struct device *dev;
|
||||
struct page *p;
|
||||
|
||||
if (!cfg)
|
||||
return NULL;
|
||||
|
||||
if (cfg->iommu_pgtable_ops && cfg->iommu_pgtable_ops->alloc_pgtable)
|
||||
return cfg->iommu_pgtable_ops->alloc_pgtable(cookie, order,
|
||||
gfp_mask);
|
||||
|
||||
dev = cfg->iommu_dev;
|
||||
p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
|
||||
gfp_mask, order);
|
||||
if (!p)
|
||||
return NULL;
|
||||
return page_address(p);
|
||||
}
|
||||
|
||||
void io_pgtable_free_pages(struct io_pgtable_cfg *cfg, void *cookie, void *virt,
|
||||
int order)
|
||||
{
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
if (cfg->iommu_pgtable_ops && cfg->iommu_pgtable_ops->free_pgtable)
|
||||
cfg->iommu_pgtable_ops->free_pgtable(cookie, virt, order);
|
||||
else
|
||||
free_pages((unsigned long)virt, order);
|
||||
}
|
||||
|
@ -46,6 +46,19 @@ struct iommu_flush_ops {
|
||||
unsigned long iova, size_t granule, void *cookie);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_pgtable_ops - IOMMU callbacks for page table memory management.
|
||||
*
|
||||
* @alloc_pgtable: Allocate page table memory, and return a page-aligned
|
||||
* cacheable linear mapping address of the start of a physically
|
||||
* contiguous region of memory.
|
||||
* @free_pgtable: Free page table memory.
|
||||
*/
|
||||
struct iommu_pgtable_ops {
|
||||
void *(*alloc_pgtable)(void *cookie, int order, gfp_t gfp_mask);
|
||||
void (*free_pgtable)(void *cookie, void *virt, int order);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct io_pgtable_cfg - Configuration data for a set of page tables.
|
||||
*
|
||||
@ -58,6 +71,8 @@ struct iommu_flush_ops {
|
||||
* @coherent_walk A flag to indicate whether or not page table walks made
|
||||
* by the IOMMU are coherent with the CPU caches.
|
||||
* @tlb: TLB management callbacks for this set of tables.
|
||||
* @iommu_pgtable_ops: IOMMU page table memory management callbacks (optional;
|
||||
* defaults to the buddy allocator if not present).
|
||||
* @iommu_dev: The device representing the DMA configuration for the
|
||||
* page table walker.
|
||||
*/
|
||||
@ -95,6 +110,7 @@ struct io_pgtable_cfg {
|
||||
unsigned int oas;
|
||||
bool coherent_walk;
|
||||
const struct iommu_flush_ops *tlb;
|
||||
const struct iommu_pgtable_ops *iommu_pgtable_ops;
|
||||
struct device *iommu_dev;
|
||||
|
||||
/* Low-level data specific to the table format */
|
||||
@ -166,6 +182,36 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
|
||||
*/
|
||||
void free_io_pgtable_ops(struct io_pgtable_ops *ops);
|
||||
|
||||
/**
|
||||
* io_pgtable_alloc_pages - Allocate memory for page tables using an IOMMU
|
||||
* driver's provided callback, or the buddy allocator.
|
||||
*
|
||||
* @cfg: The page table configuration. This will be used to determine if
|
||||
* the page table memory should be allocated through the IOMMU
|
||||
* driver's callback, or the buddy allocator.
|
||||
* @cookie: An opaque pointer used by the IOMMU driver's callback.
|
||||
* @order: The order of the size of the allocation.
|
||||
* @gfp_mask: The GFP mask to be used with the allocation
|
||||
*
|
||||
* Returns a cacheable linear mapping address to a physically contiguous region
|
||||
* of memory. The start of the region must be page-aligned.
|
||||
*/
|
||||
void *io_pgtable_alloc_pages(struct io_pgtable_cfg *cfg, void *cookie,
|
||||
int order, gfp_t gfp_mask);
|
||||
|
||||
/**
|
||||
* io_pgtable_free_pages - Free memory for page tables using an IOMMU
|
||||
* driver's provided callback, or the buddy allocator.
|
||||
*
|
||||
* @cfg: The page table configuration. This will be used to determine if
|
||||
* the page table memory should be allocated through the IOMMU
|
||||
* driver's callback, or the buddy allocator.
|
||||
* @cookie: An opage pointer used by the IOMMU driver's callback.
|
||||
* @virt: The virtual address of the memory to free.
|
||||
* @order: The order of the size of the allocation.
|
||||
*/
|
||||
void io_pgtable_free_pages(struct io_pgtable_cfg *cfg, void *cookie, void *virt,
|
||||
int order);
|
||||
|
||||
/*
|
||||
* Internal structures for page table allocator implementations.
|
||||
|
Loading…
Reference in New Issue
Block a user