dma-mapping: add dma_{map,unmap}_resource

Map/Unmap a device MMIO resource from a physical address. If no dma_map_ops
method is available the operation is a no-op.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Niklas Söderlund 2016-08-10 13:22:16 +02:00 committed by Vinod Koul
parent 0e74b34dfc
commit 6f3d87968f
2 changed files with 53 additions and 5 deletions

View File

@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the recommended that you never use these unless you really know what the
cache width is. cache width is.
dma_addr_t
dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
void
dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
API for mapping and unmapping for MMIO resources. All the notes and
warnings for the other mapping APIs apply here. The API should only be
used to map device MMIO resources, mapping of RAM is not permitted.
int int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr) dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
In some circumstances dma_map_single() and dma_map_page() will fail to create In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
a mapping. A driver can check for these errors by testing the returned will fail to create a mapping. A driver can check for these errors by testing
DMA address with dma_mapping_error(). A non-zero return value means the mapping the returned DMA address with dma_mapping_error(). A non-zero return value
could not be created and the driver should take appropriate action (e.g. means the mapping could not be created and the driver should take appropriate
reduce current DMA mapping usage or delay and try again later). action (e.g. reduce current DMA mapping usage or delay and try again later).
int int
dma_map_sg(struct device *dev, struct scatterlist *sg, dma_map_sg(struct device *dev, struct scatterlist *sg,

View File

@ -264,6 +264,42 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, false); debug_dma_unmap_page(dev, addr, size, dir, false);
} }
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
unsigned long pfn = __phys_to_pfn(phys_addr);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
/* Don't allow RAM to be mapped */
BUG_ON(pfn_valid(pfn));
addr = phys_addr;
if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
return addr;
}
static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)