drm/msm: fix potential deadlock in gpu init
Somewhere along the way, the firmware loader sprouted another lock dependency, resulting in possible deadlock scenario: &dev->struct_mutex --> &sb->s_type->i_mutex_key#2 --> &mm->mmap_sem which is problematic vs things like gem mmap. So introduce a separate mutex to synchronize gpu init. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
944fc36c31
commit
a1ad352333
@ -95,7 +95,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
DBG("%s", gpu->name);
|
DBG("%s", gpu->name);
|
||||||
|
|
||||||
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gpu->rb_iova = 0;
|
gpu->rb_iova = 0;
|
||||||
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
|
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
|
||||||
@ -370,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&drm->struct_mutex);
|
||||||
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
|
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
|
||||||
MSM_BO_UNCACHED);
|
MSM_BO_UNCACHED);
|
||||||
|
mutex_unlock(&drm->struct_mutex);
|
||||||
if (IS_ERR(gpu->memptrs_bo)) {
|
if (IS_ERR(gpu->memptrs_bo)) {
|
||||||
ret = PTR_ERR(gpu->memptrs_bo);
|
ret = PTR_ERR(gpu->memptrs_bo);
|
||||||
gpu->memptrs_bo = NULL;
|
gpu->memptrs_bo = NULL;
|
||||||
@ -379,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
|
gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
|
||||||
if (!gpu->memptrs) {
|
if (!gpu->memptrs) {
|
||||||
dev_err(drm->dev, "could not vmap memptrs\n");
|
dev_err(drm->dev, "could not vmap memptrs\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
|
ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
|
||||||
&gpu->memptrs_iova);
|
&gpu->memptrs_iova);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
|
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
|
||||||
|
@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|||||||
struct msm_kms *kms;
|
struct msm_kms *kms;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv) {
|
if (!priv) {
|
||||||
dev_err(dev->dev, "failed to allocate private data\n");
|
dev_err(dev->dev, "failed to allocate private data\n");
|
||||||
@ -314,13 +313,15 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|||||||
|
|
||||||
static void load_gpu(struct drm_device *dev)
|
static void load_gpu(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
static DEFINE_MUTEX(init_lock);
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct msm_gpu *gpu;
|
struct msm_gpu *gpu;
|
||||||
|
|
||||||
if (priv->gpu)
|
mutex_lock(&init_lock);
|
||||||
return;
|
|
||||||
|
if (priv->gpu)
|
||||||
|
goto out;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
gpu = a3xx_gpu_init(dev);
|
gpu = a3xx_gpu_init(dev);
|
||||||
if (IS_ERR(gpu)) {
|
if (IS_ERR(gpu)) {
|
||||||
dev_warn(dev->dev, "failed to load a3xx gpu\n");
|
dev_warn(dev->dev, "failed to load a3xx gpu\n");
|
||||||
@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev)
|
|||||||
|
|
||||||
if (gpu) {
|
if (gpu) {
|
||||||
int ret;
|
int ret;
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
gpu->funcs->pm_resume(gpu);
|
gpu->funcs->pm_resume(gpu);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
ret = gpu->funcs->hw_init(gpu);
|
ret = gpu->funcs->hw_init(gpu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
|
||||||
@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev)
|
|||||||
/* give inactive pm a chance to kick in: */
|
/* give inactive pm a chance to kick in: */
|
||||||
msm_gpu_retire(gpu);
|
msm_gpu_retire(gpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->gpu = gpu;
|
priv->gpu = gpu;
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
out:
|
||||||
|
mutex_unlock(&init_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_open(struct drm_device *dev, struct drm_file *file)
|
static int msm_open(struct drm_device *dev, struct drm_file *file)
|
||||||
|
@ -612,8 +612,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||||||
}
|
}
|
||||||
gpu->id = msm_register_mmu(drm, gpu->mmu);
|
gpu->id = msm_register_mmu(drm, gpu->mmu);
|
||||||
|
|
||||||
|
|
||||||
/* Create ringbuffer: */
|
/* Create ringbuffer: */
|
||||||
|
mutex_lock(&drm->struct_mutex);
|
||||||
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
|
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
|
||||||
|
mutex_unlock(&drm->struct_mutex);
|
||||||
if (IS_ERR(gpu->rb)) {
|
if (IS_ERR(gpu->rb)) {
|
||||||
ret = PTR_ERR(gpu->rb);
|
ret = PTR_ERR(gpu->rb);
|
||||||
gpu->rb = NULL;
|
gpu->rb = NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user