Merge "msm: kgsl: Fix gpuaddr_in_range() to check upper bound"

This commit is contained in:
qctecmdr 2021-12-14 06:02:40 -08:00 committed by Gerrit - the friendly Code Review server
commit e389f17ab1
6 changed files with 19 additions and 14 deletions

View File

@ -1092,8 +1092,8 @@ static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
}
/* Make sure that the address is in range and dword aligned */
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr) ||
!IS_ALIGNED(ib->gpuaddr, 4)) {
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr,
ib->size) || !IS_ALIGNED(ib->gpuaddr, 4)) {
pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
context->id, ib->gpuaddr);
return false;

View File

@ -705,7 +705,8 @@ static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
}
/* Make sure that the address is mapped */
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr,
ib->size)) {
pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
context->id, ib->gpuaddr);
return false;

View File

@ -1304,9 +1304,9 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
if (!private)
return NULL;
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr) &&
if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr, 0) &&
!kgsl_mmu_gpuaddr_in_range(
private->pagetable->mmu->securepagetable, gpuaddr))
private->pagetable->mmu->securepagetable, gpuaddr, 0))
return NULL;
spin_lock(&private->mem_lock);

View File

@ -2020,18 +2020,21 @@ static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,
}
static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr)
uint64_t gpuaddr, uint64_t size)
{
if (gpuaddr == 0)
return false;
if (gpuaddr >= pagetable->va_start && gpuaddr < pagetable->va_end)
if (gpuaddr >= pagetable->va_start && (gpuaddr + size) <
pagetable->va_end)
return true;
if (gpuaddr >= pagetable->compat_va_start && gpuaddr < pagetable->compat_va_end)
if (gpuaddr >= pagetable->compat_va_start && (gpuaddr + size) <
pagetable->compat_va_end)
return true;
if (gpuaddr >= pagetable->svm_start && gpuaddr < pagetable->svm_end)
if (gpuaddr >= pagetable->svm_start && (gpuaddr + size) <
pagetable->svm_end)
return true;
return false;

View File

@ -520,10 +520,10 @@ enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device)
}
bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr)
uint64_t gpuaddr, uint64_t size)
{
if (PT_OP_VALID(pagetable, addr_in_range))
return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr);
return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr, size);
return false;
}
@ -535,7 +535,7 @@ bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
*/
static bool nommu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr)
uint64_t gpuaddr, uint64_t size)
{
return (gpuaddr != 0) ? true : false;
}

View File

@ -136,7 +136,7 @@ struct kgsl_mmu_pt_ops {
int (*svm_range)(struct kgsl_pagetable *pt, uint64_t *lo, uint64_t *hi,
uint64_t memflags);
bool (*addr_in_range)(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr);
uint64_t gpuaddr, uint64_t size);
};
enum kgsl_mmu_feature {
@ -214,7 +214,8 @@ int kgsl_mmu_unmap_range(struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc, u64 offset, u64 length);
unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
u64 ttbr0, uint64_t addr);
bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, uint64_t gpuaddr);
bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, uint64_t gpuaddr,
uint64_t size);
int kgsl_mmu_get_region(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size);