disp: msm: remove gem functions without callsites

Many upstream files and APIs have been removed in downstream
driver. Some function definintions are left dangling without
any caller. Remove the functions which are not used downstream.

Change-Id: I9f936e7cdac3be6f854b3c67725164fad785f0d4
Signed-off-by: Steve Cohen <quic_cohens@quicinc.com>
This commit is contained in:
Steve Cohen 2021-05-26 21:01:22 -04:00 committed by Jeykumar Sankaran
parent a683fba2e8
commit 5fe7c2f8a0
9 changed files with 2 additions and 545 deletions

View File

@ -208,7 +208,6 @@ msm_drm-$(CONFIG_THERMAL_OF) += msm_cooling_device.o
msm_drm-$(CONFIG_DRM_MSM) += msm_atomic.o \
msm_fb.o \
msm_iommu.o \
msm_drv.o \
msm_gem.o \
msm_gem_prime.o \

View File

@ -1076,10 +1076,6 @@ struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
/* For SDE display */
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
@ -1149,12 +1145,8 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj,
@ -1175,7 +1167,6 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
@ -1185,14 +1176,6 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
@ -1201,7 +1184,6 @@ void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
int msm_gem_delayed_import(struct drm_gem_object *obj);
void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
@ -1216,8 +1198,6 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object **bos);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
@ -1302,9 +1282,6 @@ static inline void __exit msm_mdp_unregister(void)
#endif /* CONFIG_DRM_MSM_MDP5 */
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -38,63 +38,6 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_framebuffer *msm_fb;
int i, n;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->format->format,
drm_framebuffer_read_refcount(fb), fb->base.id);
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
msm_gem_describe(fb->obj[i], m);
}
}
#endif
void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable)
{
struct msm_framebuffer *msm_fb;
int i, n;
struct drm_gem_object *bo;
struct msm_gem_object *msm_obj;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
if (!fb->format) {
DRM_ERROR("from:%pS null fb->format\n",
__builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
for (i = 0; i < n; i++) {
bo = msm_framebuffer_bo(fb, i);
if (bo) {
msm_obj = to_msm_bo(bo);
if (enable)
msm_obj->flags |= MSM_BO_KEEPATTRS;
else
msm_obj->flags &= ~MSM_BO_KEEPATTRS;
}
}
}
/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
* to prepare an fb more multiple different initiator 'id's. But that
* should be fine, since only the scanout (mdpN) side of things needs
@ -338,44 +281,3 @@ fail:
return ERR_PTR(ret);
}
struct drm_framebuffer *
msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
{
struct drm_mode_fb_cmd2 mode_cmd = {
.pixel_format = format,
.width = w,
.height = h,
.pitches = { p },
};
struct drm_gem_object *bo;
struct drm_framebuffer *fb;
int size;
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
if (IS_ERR(bo)) {
dev_warn(dev->dev, "could not allocate stolen bo\n");
/* try regular bo: */
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
dev_err(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
msm_gem_object_set_name(bo, "stolenfb");
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_put(bo);
return ERR_CAST(fb);
}
return fb;
}

View File

@ -518,51 +518,6 @@ fail:
del_vma(vma);
return ret;
}
static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
struct page **pages;
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
vma = lookup_vma(obj, aspace);
if (WARN_ON(!vma))
return -EINVAL;
pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
obj->size >> PAGE_SHIFT, msm_obj->flags);
}
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
u64 local;
int ret;
mutex_lock(&msm_obj->lock);
ret = msm_gem_get_iova_locked(obj, aspace, &local);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
if (!ret)
*iova = local;
mutex_unlock(&msm_obj->lock);
return ret;
}
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
@ -594,27 +549,6 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
return vma ? vma->iova : 0;
}
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
mutex_lock(&msm_obj->lock);
vma = lookup_vma(obj, aspace);
if (!WARN_ON(!vma))
msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
msm_obj->flags);
mutex_unlock(&msm_obj->lock);
}
void msm_gem_put_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
@ -781,17 +715,6 @@ void *msm_gem_get_vaddr(struct drm_gem_object *obj)
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -823,47 +746,6 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
return (madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
mutex_lock_nested(&msm_obj->lock, subclass);
put_iova(obj);
if (msm_obj->aspace) {
mutex_lock(&msm_obj->aspace->list_lock);
msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
obj);
mutex_unlock(&msm_obj->aspace->list_lock);
}
msm_gem_vunmap_locked(obj);
put_pages(obj);
msm_obj->madv = __MSM_MADV_PURGED;
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
mutex_unlock(&msm_obj->lock);
}
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -919,99 +801,6 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
return 0;
}
#ifdef CONFIG_DEBUG_FS
static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno);
}
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = msm_obj->resv;
struct dma_resv_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
mutex_lock(&msm_obj->lock);
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
madv = " purged";
break;
case MSM_MADV_DONTNEED:
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
default:
madv = "";
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
if (!list_empty(&msm_obj->vmas)) {
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
seq_puts(m, "\n");
}
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
for (i = 0; i < shared_count; i++) {
fence = rcu_dereference(fobj->shared[i]);
describe_fence(fence, "Shared", m);
}
}
fence = rcu_dereference(robj->fence_excl);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
mutex_unlock(&msm_obj->lock);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
struct msm_gem_object *msm_obj;
int count = 0;
size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
}
seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif
/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
@ -1314,53 +1103,6 @@ fail:
return ERR_PTR(ret);
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
ret = PTR_ERR(vaddr);
goto err;
}
if (bo)
*bo = obj;
return vaddr;
err:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
msm_gem_unpin_iova(bo, aspace);
drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
struct msm_gem_object *msm_obj = to_msm_bo(bo);

View File

@ -157,13 +157,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
@ -185,7 +178,6 @@ enum msm_gem_lock {
OBJ_LOCK_SHRINKER,
};
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,

View File

@ -78,14 +78,6 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
msm_gem_put_pages(obj);
}
struct dma_resv *msm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return msm_obj->resv;
}
struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -286,31 +286,6 @@ static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
.destroy = iommu_aspace_destroy,
};
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
u64 size = domain->geometry.aperture_end -
domain->geometry.aperture_start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain);
aspace->ops = &msm_iommu_aspace_ops;
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
size >> PAGE_SHIFT);
kref_init(&aspace->kref);
return aspace;
}
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages,

View File

@ -1,114 +0,0 @@
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags);
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
return 0;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int ret;
pm_runtime_get_sync(mmu->dev);
ret = iommu_attach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
return ret;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->orig_nents, prot);
// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
pm_runtime_put_sync(mmu->dev);
return 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
kfree(iommu);
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
};
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
return &iommu->base;
}

View File

@ -72,17 +72,9 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->funcs = funcs;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags))
{
mmu->arg = arg;
mmu->handler = handler;
}
/* SDE smmu driver initialize and cleanup functions */
int __init msm_smmu_driver_init(void);
void __exit msm_smmu_driver_cleanup(void);