Merge tag 'drm-intel-fixes-2020-08-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.9-rc2: - GVT fixes - Fix device parameter usage for selftest mock i915 device - Fix LPSP capability debugfs NULL dereference - Fix buddy register pagemask table - Fix intel_atomic_check() non-negative return value - Fix selftests passing a random 0 into ilog2() - Fix TGL power well enable/disable ordering - Switch to PMU module refcounting Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87a6yp7jp3.fsf@intel.com
This commit is contained in:
commit
0790e63f58
@ -14930,7 +14930,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
|||||||
if (any_ms && !check_digital_port_conflicts(state)) {
|
if (any_ms && !check_digital_port_conflicts(state)) {
|
||||||
drm_dbg_kms(&dev_priv->drm,
|
drm_dbg_kms(&dev_priv->drm,
|
||||||
"rejecting conflicting digital port configuration\n");
|
"rejecting conflicting digital port configuration\n");
|
||||||
ret = EINVAL;
|
ret = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2044,9 +2044,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
|
|||||||
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
|
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
|
||||||
{
|
{
|
||||||
struct drm_connector *connector = m->private;
|
struct drm_connector *connector = m->private;
|
||||||
struct intel_encoder *encoder =
|
|
||||||
intel_attached_encoder(to_intel_connector(connector));
|
|
||||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||||
|
struct intel_encoder *encoder;
|
||||||
|
|
||||||
|
encoder = intel_attached_encoder(to_intel_connector(connector));
|
||||||
|
if (!encoder)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (connector->status != connector_status_connected)
|
if (connector->status != connector_status_connected)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -4146,6 +4146,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||||||
.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
|
.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = "TC cold off",
|
||||||
|
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
|
||||||
|
.ops = &tgl_tc_cold_off_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.name = "AUX A",
|
.name = "AUX A",
|
||||||
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
|
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
|
||||||
@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||||||
.hsw.irq_pipe_mask = BIT(PIPE_D),
|
.hsw.irq_pipe_mask = BIT(PIPE_D),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.name = "TC cold off",
|
|
||||||
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
|
|
||||||
.ops = &tgl_tc_cold_off_ops,
|
|
||||||
.id = DISP_PW_ID_NONE,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct i915_power_well_desc rkl_power_wells[] = {
|
static const struct i915_power_well_desc rkl_power_wells[] = {
|
||||||
@ -5240,10 +5240,10 @@ struct buddy_page_mask {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
|
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
|
||||||
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
|
|
||||||
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
|
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
|
||||||
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
|
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
|
||||||
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
|
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
|
||||||
|
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||||||
{
|
{
|
||||||
u8 *cfg_base = vgpu_cfg_space(vgpu);
|
u8 *cfg_base = vgpu_cfg_space(vgpu);
|
||||||
u8 mask, new, old;
|
u8 mask, new, old;
|
||||||
|
pci_power_t pwr;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
|
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
|
||||||
@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||||||
/* For other configuration space directly copy as it is. */
|
/* For other configuration space directly copy as it is. */
|
||||||
if (i < bytes)
|
if (i < bytes)
|
||||||
memcpy(cfg_base + off + i, src + i, bytes - i);
|
memcpy(cfg_base + off + i, src + i, bytes - i);
|
||||||
|
|
||||||
|
if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
|
||||||
|
pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
|
||||||
|
& PCI_PM_CTRL_STATE_MASK);
|
||||||
|
if (pwr == PCI_D3hot)
|
||||||
|
vgpu->d3_entered = true;
|
||||||
|
gvt_dbg_core("vgpu-%d power status changed to %d\n",
|
||||||
|
vgpu->id, pwr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
|||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||||
u16 *gmch_ctl;
|
u16 *gmch_ctl;
|
||||||
|
u8 next;
|
||||||
|
|
||||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||||
info->cfg_space_size);
|
info->cfg_space_size);
|
||||||
@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
|||||||
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
|
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
|
||||||
|
|
||||||
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
|
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
|
||||||
|
|
||||||
|
/* PM Support */
|
||||||
|
vgpu->cfg_space.pmcsr_off = 0;
|
||||||
|
if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
|
||||||
|
next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
|
||||||
|
do {
|
||||||
|
if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
|
||||||
|
vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
|
||||||
|
} while (next);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|||||||
return create_scratch_page_tree(vgpu);
|
return create_scratch_page_tree(vgpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
|
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct list_head *pos, *n;
|
struct list_head *pos, *n;
|
||||||
struct intel_vgpu_mm *mm;
|
struct intel_vgpu_mm *mm;
|
||||||
|
@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
|
|||||||
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
|
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
|
||||||
unsigned int off, void *p_data, unsigned int bytes);
|
unsigned int off, void *p_data, unsigned int bytes);
|
||||||
|
|
||||||
|
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
#endif /* _GVT_GTT_H_ */
|
#endif /* _GVT_GTT_H_ */
|
||||||
|
@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar {
|
|||||||
struct intel_vgpu_cfg_space {
|
struct intel_vgpu_cfg_space {
|
||||||
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
|
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
|
||||||
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
|
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
|
||||||
|
u32 pmcsr_off;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
|
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
|
||||||
@ -198,6 +199,8 @@ struct intel_vgpu {
|
|||||||
struct intel_vgpu_submission submission;
|
struct intel_vgpu_submission submission;
|
||||||
struct radix_tree_root page_track_tree;
|
struct radix_tree_root page_track_tree;
|
||||||
u32 hws_pga[I915_NUM_ENGINES];
|
u32 hws_pga[I915_NUM_ENGINES];
|
||||||
|
/* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
|
||||||
|
bool d3_entered;
|
||||||
|
|
||||||
struct dentry *debugfs;
|
struct dentry *debugfs;
|
||||||
|
|
||||||
|
@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
|
|||||||
intel_gvt_deactivate_vgpu(vgpu);
|
intel_gvt_deactivate_vgpu(vgpu);
|
||||||
|
|
||||||
mutex_lock(&vgpu->vgpu_lock);
|
mutex_lock(&vgpu->vgpu_lock);
|
||||||
|
vgpu->d3_entered = false;
|
||||||
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
|
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
|
||||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||||
mutex_unlock(&vgpu->vgpu_lock);
|
mutex_unlock(&vgpu->vgpu_lock);
|
||||||
@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||||||
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
|
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
|
||||||
idr_init(&vgpu->object_idr);
|
idr_init(&vgpu->object_idr);
|
||||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||||
|
vgpu->d3_entered = false;
|
||||||
|
|
||||||
ret = intel_vgpu_init_mmio(vgpu);
|
ret = intel_vgpu_init_mmio(vgpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||||||
/* full GPU reset or device model level reset */
|
/* full GPU reset or device model level reset */
|
||||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||||
|
if (engine_mask == ALL_ENGINES)
|
||||||
intel_vgpu_invalidate_ppgtt(vgpu);
|
intel_vgpu_invalidate_ppgtt(vgpu);
|
||||||
/*fence will not be reset during virtual reset */
|
/*fence will not be reset during virtual reset */
|
||||||
if (dmlr) {
|
if (dmlr) {
|
||||||
intel_vgpu_reset_gtt(vgpu);
|
if(!vgpu->d3_entered) {
|
||||||
|
intel_vgpu_invalidate_ppgtt(vgpu);
|
||||||
|
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
|
||||||
|
}
|
||||||
|
intel_vgpu_reset_ggtt(vgpu, true);
|
||||||
intel_vgpu_reset_resource(vgpu);
|
intel_vgpu_reset_resource(vgpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,6 +579,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||||||
intel_vgpu_reset_cfg_space(vgpu);
|
intel_vgpu_reset_cfg_space(vgpu);
|
||||||
/* only reset the failsafe mode when dmlr reset */
|
/* only reset the failsafe mode when dmlr reset */
|
||||||
vgpu->failsafe = false;
|
vgpu->failsafe = false;
|
||||||
|
/*
|
||||||
|
* PCI_D0 is set before dmlr, so reset d3_entered here
|
||||||
|
* after done using.
|
||||||
|
*/
|
||||||
|
if(vgpu->d3_entered)
|
||||||
|
vgpu->d3_entered = false;
|
||||||
|
else
|
||||||
vgpu->pv_notified = false;
|
vgpu->pv_notified = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event)
|
|||||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||||
|
|
||||||
drm_WARN_ON(&i915->drm, event->parent);
|
drm_WARN_ON(&i915->drm, event->parent);
|
||||||
|
|
||||||
module_put(THIS_MODULE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!event->parent) {
|
if (!event->parent)
|
||||||
__module_get(THIS_MODULE);
|
|
||||||
event->destroy = i915_pmu_event_destroy;
|
event->destroy = i915_pmu_event_destroy;
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||||||
if (!pmu->base.attr_groups)
|
if (!pmu->base.attr_groups)
|
||||||
goto err_attr;
|
goto err_attr;
|
||||||
|
|
||||||
|
pmu->base.module = THIS_MODULE;
|
||||||
pmu->base.task_ctx_nr = perf_invalid_context;
|
pmu->base.task_ctx_nr = perf_invalid_context;
|
||||||
pmu->base.event_init = i915_pmu_event_init;
|
pmu->base.event_init = i915_pmu_event_init;
|
||||||
pmu->base.add = i915_pmu_event_add;
|
pmu->base.add = i915_pmu_event_add;
|
||||||
|
@ -8,8 +8,6 @@
|
|||||||
#include "../i915_selftest.h"
|
#include "../i915_selftest.h"
|
||||||
#include "i915_random.h"
|
#include "i915_random.h"
|
||||||
|
|
||||||
#define SZ_8G (1ULL << 33)
|
|
||||||
|
|
||||||
static void __igt_dump_block(struct i915_buddy_mm *mm,
|
static void __igt_dump_block(struct i915_buddy_mm *mm,
|
||||||
struct i915_buddy_block *block,
|
struct i915_buddy_block *block,
|
||||||
bool buddy)
|
bool buddy)
|
||||||
@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm)
|
|||||||
static void igt_mm_config(u64 *size, u64 *chunk_size)
|
static void igt_mm_config(u64 *size, u64 *chunk_size)
|
||||||
{
|
{
|
||||||
I915_RND_STATE(prng);
|
I915_RND_STATE(prng);
|
||||||
u64 s, ms;
|
u32 s, ms;
|
||||||
|
|
||||||
/* Nothing fancy, just try to get an interesting bit pattern */
|
/* Nothing fancy, just try to get an interesting bit pattern */
|
||||||
|
|
||||||
prandom_seed_state(&prng, i915_selftest.random_seed);
|
prandom_seed_state(&prng, i915_selftest.random_seed);
|
||||||
|
|
||||||
s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
|
/* Let size be a random number of pages up to 8 GB (2M pages) */
|
||||||
ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
|
s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
|
||||||
s = max(s & -ms, ms);
|
/* Let the chunk size be a random power of 2 less than size */
|
||||||
|
ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
|
||||||
|
/* Round size down to the chunk size */
|
||||||
|
s &= -ms;
|
||||||
|
|
||||||
*chunk_size = ms;
|
/* Convert from pages to bytes */
|
||||||
*size = s;
|
*chunk_size = (u64)ms << 12;
|
||||||
|
*size = (u64)s << 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int igt_buddy_alloc_smoke(void *arg)
|
static int igt_buddy_alloc_smoke(void *arg)
|
||||||
|
@ -78,6 +78,7 @@ static void mock_device_release(struct drm_device *dev)
|
|||||||
drm_mode_config_cleanup(&i915->drm);
|
drm_mode_config_cleanup(&i915->drm);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
i915_params_free(&i915->params);
|
||||||
put_device(&i915->drm.pdev->dev);
|
put_device(&i915->drm.pdev->dev);
|
||||||
i915->drm.pdev = NULL;
|
i915->drm.pdev = NULL;
|
||||||
}
|
}
|
||||||
@ -165,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void)
|
|||||||
i915->drm.pdev = pdev;
|
i915->drm.pdev = pdev;
|
||||||
drmm_add_final_kfree(&i915->drm, i915);
|
drmm_add_final_kfree(&i915->drm, i915);
|
||||||
|
|
||||||
|
i915_params_copy(&i915->params, &i915_modparams);
|
||||||
|
|
||||||
intel_runtime_pm_init_early(&i915->runtime_pm);
|
intel_runtime_pm_init_early(&i915->runtime_pm);
|
||||||
|
|
||||||
/* Using the global GTT may ask questions about KMS users, so prepare */
|
/* Using the global GTT may ask questions about KMS users, so prepare */
|
||||||
|
Loading…
Reference in New Issue
Block a user