Merge tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel into drm-next

- Y tiling support for scanout from Tvrtko&Damien
- Remove more UMS support
- some small prep patches for OLR removal from John Harrison
- first few patches for dynamic pagetable allocation from Ben Widawsky, rebased
  by tons of other people
- DRRS support patches (Sonika&Vandana)
- fbc patches from Paulo
- make sure our vblank callbacks aren't called when the pipes are off
- various patches all over

* tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel: (61 commits)
  drm/i915: Update DRIVER_DATE to 20150227
  drm/i915: Clarify obj->map_and_fenceable
  drm/i915/skl: Allow Y (and Yf) frame buffer creation
  drm/i915/skl: Update watermarks for Y tiling
  drm/i915/skl: Updated watermark programming
  drm/i915/skl: Adjust get_plane_config() to support Yb/Yf tiling
  drm/i915/skl: Teach pin_and_fence_fb_obj() about Y tiling constraints
  drm/i915/skl: Adjust intel_fb_align_height() for Yb/Yf tiling
  drm/i915/skl: Allow scanning out Y and Yf fbs
  drm/i915/skl: Add new displayable tiling formats
  drm/i915: Remove DRIVER_MODESET checks from modeset code
  drm/i915: Remove regfile code&data for UMS suspend/resume
  drm/i915: Remove DRIVER_MODESET checks from gem code
  drm/i915: Remove DRIVER_MODESET checks in the gpu reset code
  drm/i915: Remove DRIVER_MODESET checks from suspend/resume code
  drm/i915: Remove DRIVER_MODESET checks in load/unload/close code
  drm/i915: fix a printk format
  drm/i915: Add media rc6 residency file to sysfs
  drm/i915: Add missing description to parameter in alloc_pt_range
  drm/i915: Removed the read of RP_STATE_CAP from sysfs/debugfs functions
  ...
This commit is contained in:
Dave Airlie
2015-03-09 19:41:15 +10:00
38 changed files with 1616 additions and 1718 deletions

View File

@ -4051,6 +4051,17 @@ int num_ioctls;</synopsis>
<title>Frame Buffer Compression (FBC)</title> <title>Frame Buffer Compression (FBC)</title>
!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC) !Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
!Idrivers/gpu/drm/i915/intel_fbc.c !Idrivers/gpu/drm/i915/intel_fbc.c
</sect2>
<sect2>
<title>Display Refresh Rate Switching (DRRS)</title>
!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
</sect2> </sect2>
<sect2> <sect2>
<title>DPIO</title> <title>DPIO</title>

View File

@ -276,7 +276,6 @@ static void vblank_disable_fn(unsigned long arg)
void drm_vblank_cleanup(struct drm_device *dev) void drm_vblank_cleanup(struct drm_device *dev)
{ {
int crtc; int crtc;
unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */ /* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0) if (dev->num_crtcs == 0)
@ -285,11 +284,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
for (crtc = 0; crtc < dev->num_crtcs; crtc++) { for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
del_timer_sync(&vblank->disable_timer); WARN_ON(vblank->enabled &&
drm_core_check_feature(dev, DRIVER_MODESET));
spin_lock_irqsave(&dev->vbl_lock, irqflags); del_timer_sync(&vblank->disable_timer);
vblank_disable_and_save(dev, crtc);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} }
kfree(dev->vblank); kfree(dev->vblank);
@ -475,17 +473,23 @@ int drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false; dev->irq_enabled = false;
/* /*
* Wake up any waiters so they don't hang. * Wake up any waiters so they don't hang. This is just to paper over
* isssues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/ */
if (dev->num_crtcs) { if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags); spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) { for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i]; struct drm_vblank_crtc *vblank = &dev->vblank[i];
if (!vblank->enabled)
continue;
WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
vblank_disable_and_save(dev, i);
wake_up(&vblank->queue); wake_up(&vblank->queue);
vblank->enabled = false;
vblank->last =
dev->driver->get_vblank_counter(dev, i);
} }
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} }
@ -1232,6 +1236,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
} }
EXPORT_SYMBOL(drm_crtc_vblank_off); EXPORT_SYMBOL(drm_crtc_vblank_off);
/**
* drm_crtc_vblank_reset - reset vblank state to off on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to reset the vblank state to off at load time.
* Drivers should use this together with the drm_crtc_vblank_off() and
* drm_crtc_vblank_on() functions. The difference compared to
* drm_crtc_vblank_off() is that this function doesn't save the vblank counter
* and hence doesn't need to call any driver hooks.
*/
void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
{
struct drm_device *dev = drm_crtc->dev;
unsigned long irqflags;
int crtc = drm_crtc_index(drm_crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/*
* Prevent subsequent drm_vblank_get() from enabling the vblank
* interrupt by bumping the refcount.
*/
if (!vblank->inmodeset) {
atomic_inc(&vblank->refcount);
vblank->inmodeset = 1;
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
WARN_ON(!list_empty(&dev->vblank_event_list));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/** /**
* drm_vblank_on - enable vblank events on a CRTC * drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device * @dev: DRM device
@ -1653,7 +1689,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
struct timeval tvblank; struct timeval tvblank;
unsigned long irqflags; unsigned long irqflags;
if (!dev->num_crtcs) if (WARN_ON_ONCE(!dev->num_crtcs))
return false; return false;
if (WARN_ON(crtc >= dev->num_crtcs)) if (WARN_ON(crtc >= dev->num_crtcs))

View File

@ -87,8 +87,7 @@ i915-y += dvo_ch7017.o \
i915-y += i915_vgpu.o i915-y += i915_vgpu.o
# legacy horrors # legacy horrors
i915-y += i915_dma.o \ i915-y += i915_dma.o
i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915) += i915.o

View File

@ -818,24 +818,26 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
return false; return false;
} }
static u32 *vmap_batch(struct drm_i915_gem_object *obj) static u32 *vmap_batch(struct drm_i915_gem_object *obj,
unsigned start, unsigned len)
{ {
int i; int i;
void *addr = NULL; void *addr = NULL;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
int first_page = start >> PAGE_SHIFT;
int last_page = (len + start + 4095) >> PAGE_SHIFT;
int npages = last_page - first_page;
struct page **pages; struct page **pages;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); pages = drm_malloc_ab(npages, sizeof(*pages));
if (pages == NULL) { if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n"); DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish; goto finish;
} }
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page)
pages[i] = sg_page_iter_page(&sg_iter); pages[i++] = sg_page_iter_page(&sg_iter);
i++;
}
addr = vmap(pages, i, 0, PAGE_KERNEL); addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) { if (addr == NULL) {
@ -855,61 +857,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len) u32 batch_len)
{ {
int ret = 0;
int needs_clflush = 0; int needs_clflush = 0;
u32 *src_base, *dest_base = NULL; void *src_base, *src;
u32 *src_addr, *dest_addr; void *dst = NULL;
u32 offset = batch_start_offset / sizeof(*dest_addr); int ret;
u32 end = batch_start_offset + batch_len;
if (end > dest_obj->base.size || end > src_obj->base.size) if (batch_len > dest_obj->base.size ||
batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
return ERR_PTR(ret); return ERR_PTR(ret);
} }
src_base = vmap_batch(src_obj); src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) { if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM; ret = -ENOMEM;
goto unpin_src; goto unpin_src;
} }
src_addr = src_base + offset; ret = i915_gem_object_get_pages(dest_obj);
if (ret) {
if (needs_clflush) DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
drm_clflush_virt_range((char *)src_addr, batch_len); goto unmap_src;
}
i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n"); DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
goto unmap_src; goto unmap_src;
} }
dest_base = vmap_batch(dest_obj); dst = vmap_batch(dest_obj, 0, batch_len);
if (!dest_base) { if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM; ret = -ENOMEM;
goto unmap_src; goto unmap_src;
} }
dest_addr = dest_base + offset; src = src_base + offset_in_page(batch_start_offset);
if (needs_clflush)
drm_clflush_virt_range(src, batch_len);
if (batch_start_offset != 0) memcpy(dst, src, batch_len);
memset((u8 *)dest_base, 0, batch_start_offset);
memcpy(dest_addr, src_addr, batch_len);
memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
unmap_src: unmap_src:
vunmap(src_base); vunmap(src_base);
unpin_src: unpin_src:
i915_gem_object_unpin_pages(src_obj); i915_gem_object_unpin_pages(src_obj);
return ret ? ERR_PTR(ret) : dest_base; return ret ? ERR_PTR(ret) : dst;
} }
/** /**
@ -1046,34 +1048,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
u32 batch_len, u32 batch_len,
bool is_master) bool is_master)
{ {
int ret = 0;
u32 *cmd, *batch_base, *batch_end; u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 }; struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
int ret = 0;
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
return -1;
}
batch_base = copy_batch(shadow_batch_obj, batch_obj, batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len); batch_start_offset, batch_len);
if (IS_ERR(batch_base)) { if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base); return PTR_ERR(batch_base);
} }
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
/* /*
* We use the batch length as size because the shadow object is as * We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra * large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way. * space. Parsing should be faster in some cases this way.
*/ */
batch_end = cmd + (batch_len / sizeof(*batch_end)); batch_end = batch_base + (batch_len / sizeof(*batch_end));
cmd = batch_base;
while (cmd < batch_end) { while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc; const struct drm_i915_cmd_descriptor *desc;
u32 length; u32 length;
@ -1132,7 +1126,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
} }
vunmap(batch_base); vunmap(batch_base);
i915_gem_object_ggtt_unpin(shadow_batch_obj); i915_gem_object_unpin_pages(shadow_batch_obj);
return ret; return ret;
} }

View File

@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name); seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, vma_link) list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (vma->pin_count > 0) if (vma->pin_count > 0)
pin_count++; pin_count++;
seq_printf(m, " (pinned x %d)", pin_count); }
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display) if (obj->pin_display)
seq_printf(m, " (display)"); seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE) if (obj->fence_reg != I915_FENCE_REG_NONE)
@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank, work->flip_queued_vblank,
work->flip_ready_vblank, work->flip_ready_vblank,
drm_vblank_count(dev, crtc->pipe)); drm_crtc_vblank_count(&crtc->base));
if (work->enable_stall_check) if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, "); seq_puts(m, "Stall check enabled, ");
else else
@ -2185,7 +2186,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n"); seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
ppgtt->debug_dump(ppgtt, m); ppgtt->debug_dump(ppgtt, m);
} }
@ -4191,7 +4192,7 @@ i915_max_freq_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 rp_state_cap, hw_max, hw_min; u32 hw_max, hw_min;
int ret; int ret;
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
@ -4208,18 +4209,10 @@ i915_max_freq_set(void *data, u64 val)
/* /*
* Turbo will still be enabled, but won't go above the set value. * Turbo will still be enabled, but won't go above the set value.
*/ */
if (IS_VALLEYVIEW(dev)) { val = intel_freq_opcode(dev_priv, val);
val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq; hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq; hw_min = dev_priv->rps.min_freq;
} else {
val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff;
}
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
@ -4266,7 +4259,7 @@ i915_min_freq_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 rp_state_cap, hw_max, hw_min; u32 hw_max, hw_min;
int ret; int ret;
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
@ -4283,18 +4276,10 @@ i915_min_freq_set(void *data, u64 val)
/* /*
* Turbo will still be enabled, but won't go below the set value. * Turbo will still be enabled, but won't go below the set value.
*/ */
if (IS_VALLEYVIEW(dev)) { val = intel_freq_opcode(dev_priv, val);
val = intel_freq_opcode(dev_priv, val);
hw_max = dev_priv->rps.max_freq; hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq; hw_min = dev_priv->rps.min_freq;
} else {
val = intel_freq_opcode(dev_priv, val);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff;
}
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
@ -4370,6 +4355,85 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set, i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n"); "%llu\n");
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
if (INTEL_INFO(dev)->gen < 9)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
seq_printf(m, " Available Slice Total: %u\n",
INTEL_INFO(dev)->slice_total);
seq_printf(m, " Available Subslice Total: %u\n",
INTEL_INFO(dev)->subslice_total);
seq_printf(m, " Available Subslice Per Slice: %u\n",
INTEL_INFO(dev)->subslice_per_slice);
seq_printf(m, " Available EU Total: %u\n",
INTEL_INFO(dev)->eu_total);
seq_printf(m, " Available EU Per Subslice: %u\n",
INTEL_INFO(dev)->eu_per_subslice);
seq_printf(m, " Has Slice Power Gating: %s\n",
yesno(INTEL_INFO(dev)->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
yesno(INTEL_INFO(dev)->has_subslice_pg));
seq_printf(m, " Has EU Power Gating: %s\n",
yesno(INTEL_INFO(dev)->has_eu_pg));
seq_puts(m, "SSEU Device Status\n");
if (IS_SKYLAKE(dev)) {
const int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
GEN9_PGCTL_SSA_EU19_ACK |
GEN9_PGCTL_SSA_EU210_ACK |
GEN9_PGCTL_SSA_EU311_ACK;
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
GEN9_PGCTL_SSB_EU19_ACK |
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < s_max; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
s_tot++;
ss_per = INTEL_INFO(dev)->subslice_per_slice;
ss_tot += ss_per;
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
eu_mask[ss%2]);
eu_tot += eu_cnt;
eu_per = max(eu_per, eu_cnt);
}
}
}
seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
return 0;
}
static int i915_forcewake_open(struct inode *inode, struct file *file) static int i915_forcewake_open(struct inode *inode, struct file *file)
{ {
struct drm_device *dev = inode->i_private; struct drm_device *dev = inode->i_private;
@ -4483,6 +4547,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0}, {"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0}, {"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
}; };
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)

View File

@ -606,6 +606,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
} }
} }
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev)) {
u32 fuse, mask_eu; u32 fuse, mask_eu;
@ -615,7 +616,90 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
CHV_FGT_EU_DIS_SS1_R0_MASK | CHV_FGT_EU_DIS_SS1_R0_MASK |
CHV_FGT_EU_DIS_SS1_R1_MASK); CHV_FGT_EU_DIS_SS1_R1_MASK);
info->eu_total = 16 - hweight32(mask_eu); info->eu_total = 16 - hweight32(mask_eu);
} else if (IS_SKYLAKE(dev)) {
const int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
fuse2 = I915_READ(GEN8_FUSE2);
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
GEN8_F2_S_ENA_SHIFT;
ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
GEN9_F2_SS_DIS_SHIFT;
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
info->slice_total = hweight32(s_enable);
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
info->subslice_per_slice = ss_max - hweight32(ss_disable);
info->subslice_total = info->slice_total *
info->subslice_per_slice;
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
for (s = 0; s < s_max; s++) {
if (!(s_enable & (0x1 << s)))
/* skip disabled slice */
continue;
for (ss = 0; ss < ss_max; ss++) {
u32 n_disabled;
if (ss_disable & (0x1 << ss))
/* skip disabled subslice */
continue;
n_disabled = hweight8(eu_disable[s] >>
(ss * eu_max));
/*
* Record which subslice(s) has(have) 7 EUs. we
* can tune the hash used to spread work among
* subslices if they are unbalanced.
*/
if (eu_max - n_disabled == 7)
info->subslice_7eu[s] |= 1 << ss;
info->eu_total += eu_max - n_disabled;
}
}
/*
* SKL is expected to always have a uniform distribution
* of EU across subslices with the exception that any one
* EU in any one subslice may be fused off for die
* recovery.
*/
info->eu_per_subslice = info->subslice_total ?
DIV_ROUND_UP(info->eu_total,
info->subslice_total) : 0;
/*
* SKL supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
* more than one EU pair per subslice.
*/
info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
info->has_subslice_pg = 0;
info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
} }
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
info->has_slice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n");
} }
/** /**
@ -638,17 +722,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags; info = (struct intel_device_info *) flags;
/* Refuse to load on gen6+ without kms enabled. */
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
return -ENODEV;
}
/* UMS needs agp support. */
if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
return -EINVAL;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL) if (dev_priv == NULL)
return -ENOMEM; return -ENOMEM;
@ -718,20 +791,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret) if (ret)
goto out_regs; goto out_regs;
if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* WARNING: Apparently we must kick fbdev drivers before vgacon,
/* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */
* otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv);
ret = i915_kick_out_firmware_fb(dev_priv); if (ret) {
if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt;
goto out_gtt; }
}
ret = i915_kick_out_vgacon(dev_priv); ret = i915_kick_out_vgacon(dev_priv);
if (ret) { if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n"); DRM_ERROR("failed to remove conflicting VGA console\n");
goto out_gtt; goto out_gtt;
}
} }
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
@ -835,12 +906,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_power_domains_init(dev_priv); intel_power_domains_init(dev_priv);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev);
ret = i915_load_modeset_init(dev); if (ret < 0) {
if (ret < 0) { DRM_ERROR("failed to init modeset\n");
DRM_ERROR("failed to init modeset\n"); goto out_power_well;
goto out_power_well;
}
} }
/* /*
@ -929,28 +998,25 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister(); acpi_video_unregister();
if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_fbdev_fini(dev);
intel_fbdev_fini(dev);
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev);
intel_modeset_cleanup(dev);
/* /*
* free the memory space allocated for the child device * free the memory space allocated for the child device
* config parsed from VBT * config parsed from VBT
*/ */
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
kfree(dev_priv->vbt.child_dev); kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL; dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0; dev_priv->vbt.child_dev_num = 0;
}
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
} }
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
/* Free error state after interrupts are fully disabled. */ /* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
@ -960,17 +1026,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_fini(dev); intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */
/* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq);
flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
}
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
@ -1031,8 +1095,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_release(dev, file); i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_modeset_preclose(dev, file);
intel_modeset_preclose(dev, file);
} }
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)

View File

@ -568,6 +568,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc; struct drm_crtc *crtc;
pci_power_t opregion_target_state; pci_power_t opregion_target_state;
int error;
/* ignore lid events during suspend */ /* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock); mutex_lock(&dev_priv->modeset_restore_lock);
@ -582,38 +583,33 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(dev->pdev); pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */ error = i915_gem_suspend(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (error) {
int error; dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
error = i915_gem_suspend(dev); return error;
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
return error;
}
intel_suspend_gt_powersave(dev);
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
for_each_crtc(dev, crtc)
intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
intel_suspend_hw(dev);
} }
intel_suspend_gt_powersave(dev);
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
for_each_crtc(dev, crtc)
intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
intel_suspend_hw(dev);
i915_gem_suspend_gtt_mappings(dev); i915_gem_suspend_gtt_mappings(dev);
i915_save_state(dev); i915_save_state(dev);
@ -684,53 +680,48 @@ static int i915_drm_resume(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev);
i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev); i915_restore_state(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
/* KMS EnterVT equivalent */ intel_init_pch_refclk(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_mode_config_reset(dev);
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) { if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
intel_runtime_pm_enable_interrupts(dev_priv);
intel_modeset_init_hw(dev);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
} }
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
intel_runtime_pm_enable_interrupts(dev_priv);
intel_modeset_init_hw(dev);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
intel_dp_mst_resume(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
intel_opregion_init(dev); intel_opregion_init(dev);
@ -866,38 +857,35 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT * was running at the time of the reset (i.e. we weren't VT
* switched away). * switched away).
*/ */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
ret = i915_gem_init_hw(dev); /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
dev_priv->gpu_error.reload_in_reset = false; ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex); dev_priv->gpu_error.reload_in_reset = false;
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
return ret;
}
/* mutex_unlock(&dev->struct_mutex);
* FIXME: This races pretty badly against concurrent holders of if (ret) {
* ring interrupts. This is possible since we've started to drop DRM_ERROR("Failed hw init on reset %d\n", ret);
* dev->struct_mutex in select places when waiting for the gpu. return ret;
*/
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
} }
/*
* FIXME: This races pretty badly against concurrent holders of
* ring interrupts. This is possible since we've started to drop
* dev->struct_mutex in select places when waiting for the gpu.
*/
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev);
return 0; return 0;
} }

View File

@ -56,7 +56,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20150214" #define DRIVER_DATE "20150227"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
@ -693,7 +693,18 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS]; int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES]; int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES];
unsigned int eu_total;
/* Slice/subslice/EU info */
u8 slice_total;
u8 subslice_total;
u8 subslice_per_slice;
u8 eu_total;
u8 eu_per_subslice;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
u8 subslice_7eu[3];
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
}; };
#undef DEFINE_FLAG #undef DEFINE_FLAG
@ -889,150 +900,21 @@ struct intel_gmbus {
}; };
struct i915_suspend_saved_registers { struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB; u32 saveDSPARB;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
u32 savePIPEBSRC;
u32 saveFPA0;
u32 saveFPA1;
u32 saveDPLL_A;
u32 saveDPLL_A_MD;
u32 saveHTOTAL_A;
u32 saveHBLANK_A;
u32 saveHSYNC_A;
u32 saveVTOTAL_A;
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
u32 saveTRANSACONF;
u32 saveTRANS_HTOTAL_A;
u32 saveTRANS_HBLANK_A;
u32 saveTRANS_HSYNC_A;
u32 saveTRANS_VTOTAL_A;
u32 saveTRANS_VBLANK_A;
u32 saveTRANS_VSYNC_A;
u32 savePIPEASTAT;
u32 saveDSPASTRIDE;
u32 saveDSPASIZE;
u32 saveDSPAPOS;
u32 saveDSPAADDR;
u32 saveDSPASURF;
u32 saveDSPATILEOFF;
u32 savePFIT_PGM_RATIOS;
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
u32 saveFPB1;
u32 saveDPLL_B;
u32 saveDPLL_B_MD;
u32 saveHTOTAL_B;
u32 saveHBLANK_B;
u32 saveHSYNC_B;
u32 saveVTOTAL_B;
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
u32 saveTRANSBCONF;
u32 saveTRANS_HTOTAL_B;
u32 saveTRANS_HBLANK_B;
u32 saveTRANS_HSYNC_B;
u32 saveTRANS_VTOTAL_B;
u32 saveTRANS_VBLANK_B;
u32 saveTRANS_VSYNC_B;
u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE;
u32 saveDSPBPOS;
u32 saveDSPBADDR;
u32 saveDSPBSURF;
u32 saveDSPBTILEOFF;
u32 saveVGA0;
u32 saveVGA1;
u32 saveVGA_PD;
u32 saveVGACNTRL;
u32 saveADPA;
u32 saveLVDS; u32 saveLVDS;
u32 savePP_ON_DELAYS; u32 savePP_ON_DELAYS;
u32 savePP_OFF_DELAYS; u32 savePP_OFF_DELAYS;
u32 saveDVOA;
u32 saveDVOB;
u32 saveDVOC;
u32 savePP_ON; u32 savePP_ON;
u32 savePP_OFF; u32 savePP_OFF;
u32 savePP_CONTROL; u32 savePP_CONTROL;
u32 savePP_DIVISOR; u32 savePP_DIVISOR;
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
u32 saveFBC_CONTROL; u32 saveFBC_CONTROL;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
u32 saveDEIER;
u32 saveDEIMR;
u32 saveGTIER;
u32 saveGTIMR;
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0; u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE; u32 saveMI_ARB_STATE;
u32 saveSWF0[16]; u32 saveSWF0[16];
u32 saveSWF1[16]; u32 saveSWF1[16];
u32 saveSWF2[3]; u32 saveSWF2[3];
u8 saveMSR;
u8 saveSR[8];
u8 saveGR[25];
u8 saveAR_INDEX;
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
uint64_t saveFENCE[I915_MAX_NUM_FENCES]; uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
u32 saveCURBCNTR;
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
u32 saveDP_B;
u32 saveDP_C;
u32 saveDP_D;
u32 savePIPEA_GMCH_DATA_M;
u32 savePIPEB_GMCH_DATA_M;
u32 savePIPEA_GMCH_DATA_N;
u32 savePIPEB_GMCH_DATA_N;
u32 savePIPEA_DP_LINK_M;
u32 savePIPEB_DP_LINK_M;
u32 savePIPEA_DP_LINK_N;
u32 savePIPEB_DP_LINK_N;
u32 saveFDI_RXA_CTL;
u32 saveFDI_TXA_CTL;
u32 saveFDI_RXB_CTL;
u32 saveFDI_TXB_CTL;
u32 savePFA_CTL_1;
u32 savePFB_CTL_1;
u32 savePFA_WIN_SZ;
u32 savePFB_WIN_SZ;
u32 savePFA_WIN_POS;
u32 savePFB_WIN_POS;
u32 savePCH_DREF_CONTROL;
u32 saveDISP_ARB_CTL;
u32 savePIPEA_DATA_M1;
u32 savePIPEA_DATA_N1;
u32 savePIPEA_LINK_M1;
u32 savePIPEA_LINK_N1;
u32 savePIPEB_DATA_M1;
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG; u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS; u16 saveGCDGMBUS;
}; };
@ -1455,6 +1337,7 @@ struct intel_vbt_data {
bool edp_initialized; bool edp_initialized;
bool edp_support; bool edp_support;
int edp_bpp; int edp_bpp;
bool edp_low_vswing;
struct edp_power_seq edp_pps; struct edp_power_seq edp_pps;
struct { struct {
@ -2144,8 +2027,9 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */ /** Position in the ringbuffer of the end of the whole request */
u32 tail; u32 tail;
/** Context related to this request */ /** Context and ring buffer related to this request */
struct intel_context *ctx; struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
/** Batch buffer related to this request if any */ /** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
@ -3123,10 +3007,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
extern int i915_save_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev);
/* i915_ums.c */
void i915_save_display_reg(struct drm_device *dev);
void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */ /* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv); void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv); void i915_teardown_sysfs(struct drm_device *dev_priv);

View File

@ -2763,7 +2763,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
while (!list_empty(&ring->request_list)) { while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
request = list_first_entry(&ring->request_list, request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request, struct drm_i915_gem_request,
@ -2774,23 +2773,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
trace_i915_gem_request_retire(request); trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
* we need to tell them apart in order to find the correct
* ringbuffer to which the request belongs to.
*/
if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx;
ringbuf = ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
/* We know the GPU must have read the request to have /* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position * sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position * of tail of the request to update the last known position
* of the GPU head. * of the GPU head.
*/ */
ringbuf->last_retired_head = request->postfix; request->ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request); i915_gem_free_request(request);
} }
@ -4238,7 +4226,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
fenceable = (vma->node.size == fence_size && fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0); (vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + obj->base.size <= mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end); dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
@ -4613,10 +4601,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
i915_gem_stop_ringbuffers(dev); i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -4973,18 +4957,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_idle_work_handler); i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32; dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))

View File

@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
/* In execlists mode we will unreference the context when the execlist if (i915.enable_execlists) {
* queue is cleared and the requests destroyed. struct intel_context *ctx;
*/
if (i915.enable_execlists) list_for_each_entry(ctx, &dev_priv->context_list, link) {
intel_lr_context_reset(dev, ctx);
}
return; return;
}
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_engine_cs *ring = &dev_priv->ring[i];

View File

@ -1076,16 +1076,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master, bool is_master)
u32 *flags)
{ {
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj; struct drm_i915_gem_object *shadow_batch_obj;
bool need_reloc = false; struct i915_vma *vma;
int ret; int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
batch_obj->base.size); PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj)) if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj; return shadow_batch_obj;
@ -1095,40 +1094,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
batch_start_offset, batch_start_offset,
batch_len, batch_len,
is_master); is_master);
if (ret) { if (ret)
if (ret == -EACCES) goto err;
return batch_obj;
} else {
struct i915_vma *vma;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
if (ret)
goto err;
vma = i915_gem_obj_to_ggtt(shadow_batch_obj); memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
drm_gem_object_reference(&shadow_batch_obj->base);
i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
list_add_tail(&vma->exec_list, &eb->vmas);
shadow_batch_obj->base.pending_read_domains = vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
batch_obj->base.pending_read_domains; vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas);
/* shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically
* don't want that set when the command parser is
* enabled.
*
* FIXME: with aliasing ppgtt, buffers that should only
* be in ggtt still end up in the aliasing ppgtt. remove
* this check when that is fixed.
*/
if (USES_FULL_PPGTT(dev))
*flags |= I915_DISPATCH_SECURE;
}
return ret ? ERR_PTR(ret) : shadow_batch_obj; return shadow_batch_obj;
err:
if (ret == -EACCES) /* unhandled chained batch */
return batch_obj;
else
return ERR_PTR(ret);
} }
int int
@ -1138,7 +1127,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas, struct list_head *vmas,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags) u64 exec_start, u32 dispatch_flags)
{ {
struct drm_clip_rect *cliprects = NULL; struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -1266,19 +1255,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
ret = ring->dispatch_execbuffer(ring, ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len, exec_start, exec_len,
flags); dispatch_flags);
if (ret) if (ret)
goto error; goto error;
} }
} else { } else {
ret = ring->dispatch_execbuffer(ring, ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len, exec_start, exec_len,
flags); dispatch_flags);
if (ret) if (ret)
return ret; return ret;
} }
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@ -1353,7 +1342,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm; struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args); const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset; u64 exec_start = args->batch_start_offset;
u32 flags; u32 dispatch_flags;
int ret; int ret;
bool need_relocs; bool need_relocs;
@ -1364,15 +1353,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
flags = 0; dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) { if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN)) if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
flags |= I915_DISPATCH_SECURE; dispatch_flags |= I915_DISPATCH_SECURE;
} }
if (args->flags & I915_EXEC_IS_PINNED) if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED; dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n", DRM_DEBUG("execbuf with unknown ring: %d\n",
@ -1494,12 +1483,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj, batch_obj,
args->batch_start_offset, args->batch_start_offset,
args->batch_len, args->batch_len,
file->is_master, file->is_master);
&flags);
if (IS_ERR(batch_obj)) { if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj); ret = PTR_ERR(batch_obj);
goto err; goto err;
} }
/*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically
* don't want that set when the command parser is
* enabled.
*
* FIXME: with aliasing ppgtt, buffers that should only
* be in ggtt still end up in the aliasing ppgtt. remove
* this check when that is fixed.
*/
if (USES_FULL_PPGTT(dev))
dispatch_flags |= I915_DISPATCH_SECURE;
exec_start = 0;
} }
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@ -1507,7 +1511,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt. * batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */ * hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE) { if (dispatch_flags & I915_DISPATCH_SECURE) {
/* /*
* So on first glance it looks freaky that we pin the batch here * So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But: * outside of the reservation loop. But:
@ -1527,7 +1531,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm); exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
&eb->vmas, batch_obj, exec_start, flags); &eb->vmas, batch_obj, exec_start,
dispatch_flags);
/* /*
* FIXME: We crucially rely upon the active tracking for the (ppgtt) * FIXME: We crucially rely upon the active tracking for the (ppgtt)
@ -1535,7 +1540,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* needs to be adjusted to also track the ggtt batch vma properly as * needs to be adjusted to also track the ggtt batch vma properly as
* active. * active.
*/ */
if (flags & I915_DISPATCH_SECURE) if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj); i915_gem_object_ggtt_unpin(batch_obj);
err: err:
/* the request owns the ref now */ /* the request owns the ref now */

View File

@ -142,7 +142,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
} }
static void ppgtt_bind_vma(struct i915_vma *vma, static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags); u32 flags);
@ -279,6 +278,100 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte; return pte;
} }
static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev)
{
if (WARN_ON(!pt->page))
return;
__free_page(pt->page);
kfree(pt);
}
static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
{
struct i915_page_table_entry *pt;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt)
return ERR_PTR(-ENOMEM);
pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pt->page) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
return pt;
}
/**
* alloc_pt_range() - Allocate a multiple page tables
* @pd: The page directory which will have at least @count entries
* available to point to the allocated page tables.
* @pde: First page directory entry for which we are allocating.
* @count: Number of pages to allocate.
* @dev: DRM device.
*
* Allocates multiple page table pages and sets the appropriate entries in the
* page table structure within the page directory. Function cleans up after
* itself on any failures.
*
* Return: 0 if allocation succeeded.
*/
static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
struct drm_device *dev)
{
int i, ret;
/* 512 is the max page tables per page_directory on any platform. */
if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES))
return -EINVAL;
for (i = pde; i < pde + count; i++) {
struct i915_page_table_entry *pt = alloc_pt_single(dev);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto err_out;
}
WARN(pd->page_table[i],
"Leaking page directory entry %d (%p)\n",
i, pd->page_table[i]);
pd->page_table[i] = pt;
}
return 0;
err_out:
while (i-- > pde)
unmap_and_free_pt(pd->page_table[i], dev);
return ret;
}
static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
{
if (pd->page) {
__free_page(pd->page);
kfree(pd);
}
}
static struct i915_page_directory_entry *alloc_pd_single(void)
{
struct i915_page_directory_entry *pd;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pd->page) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
return pd;
}
/* Broadwell Page Directory Pointer Descriptors */ /* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val) uint64_t val)
@ -311,7 +404,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
for (i = used_pd - 1; i >= 0; i--) { for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i]; dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
ret = gen8_write_pdp(ring, i, addr); ret = gen8_write_pdp(ring, i, addr);
if (ret) if (ret)
return ret; return ret;
@ -338,7 +431,24 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch); I915_CACHE_LLC, use_scratch);
while (num_entries) { while (num_entries) {
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; struct i915_page_directory_entry *pd;
struct i915_page_table_entry *pt;
struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
continue;
pd = ppgtt->pdp.page_directory[pdpe];
if (WARN_ON(!pd->page_table[pde]))
continue;
pt = pd->page_table[pde];
if (WARN_ON(!pt->page))
continue;
page_table = pt->page;
last_pte = pte + num_entries; last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE) if (last_pte > GEN8_PTES_PER_PAGE)
@ -382,8 +492,13 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break; break;
if (pt_vaddr == NULL) if (pt_vaddr == NULL) {
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
struct i915_page_table_entry *pt = pd->page_table[pde];
struct page *page_table = pt->page;
pt_vaddr = kmap_atomic(page_table);
}
pt_vaddr[pte] = pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
@ -407,29 +522,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
} }
} }
static void gen8_free_page_tables(struct page **pt_pages) static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
{ {
int i; int i;
if (pt_pages == NULL) if (!pd->page)
return; return;
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
if (pt_pages[i]) if (WARN_ON(!pd->page_table[i]))
__free_pages(pt_pages[i], 0); continue;
unmap_and_free_pt(pd->page_table[i], dev);
pd->page_table[i] = NULL;
}
} }
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{ {
int i; int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) { for (i = 0; i < ppgtt->num_pd_pages; i++) {
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); if (WARN_ON(!ppgtt->pdp.page_directory[i]))
kfree(ppgtt->gen8_pt_pages[i]); continue;
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
}
} }
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@ -440,14 +559,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) { for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this /* TODO: In the future we'll support sparse mappings, so this
* will have to change. */ * will have to change. */
if (!ppgtt->pd_dma_addr[i]) if (!ppgtt->pdp.page_directory[i]->daddr)
continue; continue;
pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
struct i915_page_table_entry *pt;
dma_addr_t addr;
if (WARN_ON(!pd->page_table[j]))
continue;
pt = pd->page_table[j];
addr = pt->daddr;
if (addr) if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE, pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
@ -464,86 +592,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt); gen8_ppgtt_free(ppgtt);
} }
static struct page **__gen8_alloc_page_tables(void) static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{ {
struct page **pt_pages;
int i;
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
if (!pt_pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
pt_pages[i] = alloc_page(GFP_KERNEL);
if (!pt_pages[i])
goto bail;
}
return pt_pages;
bail:
gen8_free_page_tables(pt_pages);
kfree(pt_pages);
return ERR_PTR(-ENOMEM);
}
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
struct page **pt_pages[GEN8_LEGACY_PDPES];
int i, ret; int i, ret;
for (i = 0; i < max_pdp; i++) { for (i = 0; i < ppgtt->num_pd_pages; i++) {
pt_pages[i] = __gen8_alloc_page_tables(); ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
if (IS_ERR(pt_pages[i])) { 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev);
ret = PTR_ERR(pt_pages[i]); if (ret)
goto unwind_out; goto unwind_out;
}
} }
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
* "atomic" - for cleanup purposes.
*/
for (i = 0; i < max_pdp; i++)
ppgtt->gen8_pt_pages[i] = pt_pages[i];
return 0; return 0;
unwind_out: unwind_out:
while (i--) { while (i--)
gen8_free_page_tables(pt_pages[i]); gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
kfree(pt_pages[i]);
}
return ret; return -ENOMEM;
}
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
return -ENOMEM;
}
return 0;
} }
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp) const int max_pdp)
{ {
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); int i;
if (!ppgtt->pd_pages)
return -ENOMEM;
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); for (i = 0; i < max_pdp; i++) {
ppgtt->pdp.page_directory[i] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[i]))
goto unwind_out;
}
ppgtt->num_pd_pages = max_pdp;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0; return 0;
unwind_out:
while (i--)
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
return -ENOMEM;
} }
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@ -555,18 +644,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret) if (ret)
return ret; return ret;
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); ret = gen8_ppgtt_allocate_page_tables(ppgtt);
if (ret) { if (ret)
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); goto err_out;
return ret;
}
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ret = gen8_ppgtt_allocate_dma(ppgtt); return 0;
if (ret)
gen8_ppgtt_free(ppgtt);
err_out:
gen8_ppgtt_free(ppgtt);
return ret; return ret;
} }
@ -577,14 +664,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret; int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev, pd_addr = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[pd], 0, ppgtt->pdp.page_directory[pd]->page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
if (ret) if (ret)
return ret; return ret;
ppgtt->pd_dma_addr[pd] = pd_addr; ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
return 0; return 0;
} }
@ -594,17 +681,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pt) const int pt)
{ {
dma_addr_t pt_addr; dma_addr_t pt_addr;
struct page *p; struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
struct i915_page_table_entry *ptab = pdir->page_table[pt];
struct page *p = ptab->page;
int ret; int ret;
p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev, pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
if (ret) if (ret)
return ret; return ret;
ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; ptab->daddr = pt_addr;
return 0; return 0;
} }
@ -657,10 +745,12 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* will never need to touch the PDEs again. * will never need to touch the PDEs again.
*/ */
for (i = 0; i < max_pdp; i++) { for (i = 0; i < max_pdp; i++) {
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
gen8_ppgtt_pde_t *pd_vaddr; gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; struct i915_page_table_entry *pt = pd->page_table[j];
dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC); I915_CACHE_LLC);
} }
@ -703,14 +793,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); ppgtt->pd.pd_offset,
ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected; u32 expected;
gen6_gtt_pte_t *pt_vaddr; gen6_gtt_pte_t *pt_vaddr;
dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde); pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@ -721,7 +812,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected); expected);
seq_printf(m, "\tPDE: %x\n", pd_entry); seq_printf(m, "\tPDE: %x\n", pd_entry);
pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
unsigned long va = unsigned long va =
(pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
@ -754,13 +845,13 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
uint32_t pd_entry; uint32_t pd_entry;
int i; int i;
WARN_ON(ppgtt->pd_offset & 0x3f); WARN_ON(ppgtt->pd.pd_offset & 0x3f);
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) { for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr; dma_addr_t pt_addr;
pt_addr = ppgtt->pt_dma_addr[i]; pt_addr = ppgtt->pd.page_table[i]->daddr;
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID; pd_entry |= GEN6_PDE_VALID;
@ -771,9 +862,9 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{ {
BUG_ON(ppgtt->pd_offset & 0x3f); BUG_ON(ppgtt->pd.pd_offset & 0x3f);
return (ppgtt->pd_offset / 64) << 16; return (ppgtt->pd.pd_offset / 64) << 16;
} }
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@ -936,7 +1027,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > I915_PPGTT_PT_ENTRIES) if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES; last_pte = I915_PPGTT_PT_ENTRIES;
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
for (i = first_pte; i < last_pte; i++) for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte; pt_vaddr[i] = scratch_pte;
@ -965,7 +1056,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL; pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL) if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
pt_vaddr[act_pte] = pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter), vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@ -986,22 +1077,20 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{ {
int i; int i;
if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++)
for (i = 0; i < ppgtt->num_pd_entries; i++) pci_unmap_page(ppgtt->base.dev->pdev,
pci_unmap_page(ppgtt->base.dev->pdev, ppgtt->pd.page_table[i]->daddr,
ppgtt->pt_dma_addr[i], 4096, PCI_DMA_BIDIRECTIONAL);
4096, PCI_DMA_BIDIRECTIONAL);
}
} }
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{ {
int i; int i;
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]); unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
kfree(ppgtt->pt_pages);
unmap_and_free_pd(&ppgtt->pd);
} }
static void gen6_ppgtt_cleanup(struct i915_address_space *vm) static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@ -1056,27 +1145,6 @@ alloc:
return 0; return 0;
} }
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
int i;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i]) {
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
}
return 0;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{ {
int ret; int ret;
@ -1085,20 +1153,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
if (ret) if (ret)
return ret; return ret;
ret = gen6_ppgtt_allocate_page_tables(ppgtt); ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
ppgtt->base.dev);
if (ret) { if (ret) {
drm_mm_remove_node(&ppgtt->node); drm_mm_remove_node(&ppgtt->node);
return ret; return ret;
} }
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr) {
drm_mm_remove_node(&ppgtt->node);
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
return 0; return 0;
} }
@ -1108,9 +1170,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
int i; int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) { for (i = 0; i < ppgtt->num_pd_entries; i++) {
struct page *page;
dma_addr_t pt_addr; dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, page = ppgtt->pd.page_table[i]->page;
pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) { if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
@ -1118,7 +1182,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
return -EIO; return -EIO;
} }
ppgtt->pt_dma_addr[i] = pt_addr; ppgtt->pd.page_table[i]->daddr = pt_addr;
} }
return 0; return 0;
@ -1157,10 +1221,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0; ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt; ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd_offset = ppgtt->pd.pd_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
@ -1171,7 +1235,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
gen6_write_pdes(ppgtt); gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n", DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd_offset << 10); ppgtt->pd.pd_offset << 10);
return 0; return 0;
} }

View File

@ -187,6 +187,26 @@ struct i915_vma {
u32 flags); u32 flags);
}; };
struct i915_page_table_entry {
struct page *page;
dma_addr_t daddr;
};
struct i915_page_directory_entry {
struct page *page; /* NULL for GEN6-GEN7 */
union {
uint32_t pd_offset;
dma_addr_t daddr;
};
struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */
};
struct i915_page_directory_pointer_entry {
/* struct page *page; */
struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
};
struct i915_address_space { struct i915_address_space {
struct drm_mm mm; struct drm_mm mm;
struct drm_device *dev; struct drm_device *dev;
@ -272,17 +292,8 @@ struct i915_hw_ppgtt {
unsigned num_pd_entries; unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */ unsigned num_pd_pages; /* gen8+ */
union { union {
struct page **pt_pages; struct i915_page_directory_pointer_entry pdp;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPES]; struct i915_page_directory_entry pd;
};
struct page *pd_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
}; };
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;

View File

@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
if (!drm_mm_initialized(&dev_priv->mm.stolen)) if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV; return -ENODEV;
if (size < dev_priv->fbc.uncompressed_size) if (size <= dev_priv->fbc.uncompressed_size)
return 0; return 0;
/* Release any current block */ /* Release any current block */

View File

@ -492,31 +492,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }
/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
*
* Reading certain registers when the pipe is disabled can hang the chip.
* Use this routine to make sure the PLL is running and the pipe is active
* before reading such registers if unsure.
*/
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Locking is horribly broken here, but whatever. */
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
return intel_crtc->active;
} else {
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
}
/* /*
* This timing diagram depicts the video signal in and * This timing diagram depicts the video signal in and
* around the vertical blanking period. * around the vertical blanking period.
@ -582,34 +557,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
unsigned long high_frame; unsigned long high_frame;
unsigned long low_frame; unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
&intel_crtc->config->base.adjusted_mode;
if (!i915_pipe_enabled(dev, pipe)) { htotal = mode->crtc_htotal;
DRM_DEBUG_DRIVER("trying to get vblank count for disabled " hsync_start = mode->crtc_hsync_start;
"pipe %c\n", pipe_name(pipe)); vbl_start = mode->crtc_vblank_start;
return 0; if (mode->flags & DRM_MODE_FLAG_INTERLACE)
} vbl_start = DIV_ROUND_UP(vbl_start, 2);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
&intel_crtc->config->base.adjusted_mode;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
vbl_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
} else {
enum transcoder cpu_transcoder = (enum transcoder) pipe;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
if ((I915_READ(PIPECONF(cpu_transcoder)) &
PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
}
/* Convert to pixel count */ /* Convert to pixel count */
vbl_start *= htotal; vbl_start *= htotal;
@ -648,12 +605,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe); int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
return I915_READ(reg); return I915_READ(reg);
} }
@ -840,7 +791,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return -EINVAL; return -EINVAL;
} }
if (!crtc->enabled) { if (!crtc->state->enable) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
return -EBUSY; return -EBUSY;
} }
@ -2647,9 +2598,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe, i915_enable_pipestat(dev_priv, pipe,
@ -2669,9 +2617,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe); DE_PIPE_VBLANK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit); ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@ -2684,9 +2629,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe, i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS); PIPE_START_VBLANK_INTERRUPT_STATUS);
@ -2700,9 +2642,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@ -2754,9 +2693,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
if (!i915_pipe_enabled(dev, pipe))
return;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@ -4368,10 +4304,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv)) if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true; dev->vblank_disable_immediate = true;
if (drm_core_check_feature(dev, DRIVER_MODESET)) { dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
if (IS_CHERRYVIEW(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler; dev->driver->irq_handler = cherryview_irq_handler;

View File

@ -139,6 +139,19 @@
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) #define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) #define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE 0x20C8
#define GEN8_RPCS_ENABLE (1 << 31)
#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
#define GEN8_RPCS_S_CNT_SHIFT 15
#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
#define GEN8_RPCS_SS_CNT_SHIFT 8
#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
#define GEN8_RPCS_EU_MAX_SHIFT 4
#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
#define GEN8_RPCS_EU_MIN_SHIFT 0
#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
#define GAM_ECOCHK 0x4090 #define GAM_ECOCHK 0x4090
#define BDW_DISABLE_HDC_INVALIDATION (1<<25) #define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10) #define ECOCHK_SNB_BIT (1<<10)
@ -1025,6 +1038,16 @@ enum skl_disp_power_wells {
#define DPIO_CHV_PROP_COEFF_SHIFT 0 #define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
#define _CHV_PLL_DW8_CH0 0x8020
#define _CHV_PLL_DW8_CH1 0x81A0
#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
#define _CHV_PLL_DW9_CH0 0x8024
#define _CHV_PLL_DW9_CH1 0x81A4
#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
#define _CHV_CMN_DW5_CH0 0x8114 #define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) #define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) #define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@ -1328,6 +1351,8 @@ enum skl_disp_power_wells {
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) #define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
#define GFX_MODE 0x02520 #define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c #define GFX_MODE_GEN7 0x0229c
@ -1506,6 +1531,17 @@ enum skl_disp_power_wells {
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 #define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) #define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
#define GEN8_FUSE2 0x9120
#define GEN8_F2_S_ENA_SHIFT 25
#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
#define GEN8_EU_DISABLE0 0x9134
#define GEN8_EU_DISABLE1 0x9138
#define GEN8_EU_DISABLE2 0x913c
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@ -3880,6 +3916,7 @@ enum skl_disp_power_wells {
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) #define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) #define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16) #define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5) #define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5) #define PIPECONF_8BPC (0<<5)
@ -5246,8 +5283,9 @@ enum skl_disp_power_wells {
#define COMMON_SLICE_CHICKEN2 0x7014 #define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN 0x7018 #define HIZ_CHICKEN 0x7018
# define CHV_HZ_8X8_MODE_IN_1X (1<<15) # define CHV_HZ_8X8_MODE_IN_1X (1<<15)
# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 #define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
#define DISABLE_PIXEL_MASK_CAMMING (1<<14) #define DISABLE_PIXEL_MASK_CAMMING (1<<14)
@ -6187,6 +6225,26 @@ enum skl_disp_power_wells {
#define GEN6_RC6 3 #define GEN6_RC6 3
#define GEN6_RC7 4 #define GEN6_RC7 4
#define GEN9_SLICE0_PGCTL_ACK 0x804c
#define GEN9_SLICE1_PGCTL_ACK 0x8050
#define GEN9_SLICE2_PGCTL_ACK 0x8054
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN7_MISCCPCTL (0x9424) #define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)

View File

@ -29,166 +29,6 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_reg.h" #include "i915_reg.h"
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
return I915_READ8(data_port);
}
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
return I915_READ8(VGA_AR_DATA_READ);
}
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
I915_WRITE8(VGA_AR_DATA_WRITE, val);
}
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
I915_WRITE8(data_port, val);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(dev, cr_index, cr_data, 0x11,
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_display(struct drm_device *dev) static void i915_save_display(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4) if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
/* LVDS state */ /* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
/* save FBC interval */ /* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_vga(dev);
} }
static void i915_restore_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev)
@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4) if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) mask = ~LVDS_PORT_EN;
i915_restore_display_reg(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
/* LVDS state */ /* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_redisable_vga(dev);
i915_restore_vga(dev);
else
i915_redisable_vga(dev);
} }
int i915_save_state(struct drm_device *dev) int i915_save_state(struct drm_device *dev)
@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev); i915_save_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Interrupt state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveDEIER = I915_READ(DEIER);
dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
dev_priv->regfile.saveGTIER = I915_READ(GTIER);
dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
} else {
dev_priv->regfile.saveIER = I915_READ(IER);
dev_priv->regfile.saveIMR = I915_READ(IMR);
}
}
if (IS_GEN4(dev)) if (IS_GEN4(dev))
pci_read_config_word(dev->pdev, GCDGMBUS, pci_read_config_word(dev->pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS); &dev_priv->regfile.saveGCDGMBUS);
@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
dev_priv->regfile.saveGCDGMBUS); dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev); i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Interrupt state */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);
}
}
/* Cache mode state */ /* Cache mode state */
if (INTEL_INFO(dev)->gen < 7) if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |

View File

@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
} }
static ssize_t
show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_get_drvdata(kdev);
u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
static struct attribute *rc6_attrs[] = { static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr, &dev_attr_rc6_enable.attr,
@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
.name = power_group_name, .name = power_group_name,
.attrs = rc6p_attrs .attrs = rc6p_attrs
}; };
static struct attribute *media_rc6_attrs[] = {
&dev_attr_media_rc6_residency_ms.attr,
NULL
};
static struct attribute_group media_rc6_attr_group = {
.name = power_group_name,
.attrs = media_rc6_attrs
};
#endif #endif
static int l3_access_valid(struct drm_device *dev, loff_t offset) static int l3_access_valid(struct drm_device *dev, loff_t offset)
@ -487,38 +506,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap; u32 val;
ssize_t ret;
ret = mutex_lock_interruptible(&dev->struct_mutex); if (attr == &dev_attr_gt_RP0_freq_mhz)
if (ret) val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
return ret; else if (attr == &dev_attr_gt_RP1_freq_mhz)
intel_runtime_pm_get(dev_priv); val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); else if (attr == &dev_attr_gt_RPn_freq_mhz)
intel_runtime_pm_put(dev_priv); val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
mutex_unlock(&dev->struct_mutex); else
if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else
val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0x0000ff) >> 0));
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else
val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0x00ff00) >> 8));
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else
val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0xff0000) >> 16));
} else {
BUG(); BUG();
}
return snprintf(buf, PAGE_SIZE, "%d\n", val); return snprintf(buf, PAGE_SIZE, "%d\n", val);
} }
@ -627,6 +625,12 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret) if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n"); DRM_ERROR("RC6p residency sysfs setup failed\n");
} }
if (IS_VALLEYVIEW(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&media_rc6_attr_group);
if (ret)
DRM_ERROR("Media RC6 residency sysfs setup failed\n");
}
#endif #endif
if (HAS_L3_DPF(dev)) { if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);

View File

@ -1,552 +0,0 @@
/*
*
* Copyright 2008 (c) Intel Corporation
* Jesse Barnes <jbarnes@virtuousgeek.org>
* Copyright 2013 (c) Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
/* On IVB, 3rd pipe shares PLL with another one */
if (pipe > 1)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
void i915_save_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* Cursor state */
dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
}
dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
}
dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
/* CRT state */
if (HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
else
dev_priv->regfile.saveADPA = I915_READ(ADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
dev_priv->regfile.saveDP_B = I915_READ(DP_B);
dev_priv->regfile.saveDP_C = I915_READ(DP_C);
dev_priv->regfile.saveDP_D = I915_READ(DP_D);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
}
/* FIXME: regfile.save TV & SDVO state */
/* Panel fitter */
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
}
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
&dev_priv->regfile.saveLBB);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
} else {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
}
return;
}
void i915_restore_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_write_config_byte(dev->pdev, PCI_LBPC,
dev_priv->regfile.saveLBB);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
} else {
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
}
/* Panel fitter */
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
}
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
}
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = _PCH_DPLL_A;
dpll_b_reg = _PCH_DPLL_B;
fpa0_reg = _PCH_FPA0;
fpb0_reg = _PCH_FPB0;
fpa1_reg = _PCH_FPA1;
fpb1_reg = _PCH_FPB1;
} else {
dpll_a_reg = _DPLL_A;
dpll_b_reg = _DPLL_B;
fpa0_reg = _FPA0;
fpb0_reg = _FPB0;
fpa1_reg = _FPA1;
fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
else
I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
}
/* FIXME: restore TV & SDVO state */
return;
}

View File

@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp_link_params->vswing); edp_link_params->vswing);
break; break;
} }
if (bdb->version >= 173) {
uint8_t vswing;
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp_low_vswing = vswing == 0;
}
} }
static void static void

View File

@ -554,6 +554,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */ /* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature; u16 edp_s3d_feature;
u16 edp_t3_optimization; u16 edp_t3_optimization;
u64 edp_vswing_preemph; /* v173 */
} __packed; } __packed;
struct psr_table { struct psr_table {

View File

@ -139,6 +139,21 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00004014, 0x00000087 }, { 0x00004014, 0x00000087 },
}; };
/* eDP 1.4 low vswing translation parameters */
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
{ 0x00000018, 0x000000a8 },
{ 0x00002016, 0x000000ab },
{ 0x00006012, 0x000000a2 },
{ 0x00008010, 0x00000088 },
{ 0x00000018, 0x000000ab },
{ 0x00004014, 0x000000a2 },
{ 0x00006012, 0x000000a6 },
{ 0x00000018, 0x000000a2 },
{ 0x00005013, 0x0000009c },
{ 0x00000018, 0x00000088 },
};
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */ /* Idx NT mV T mV db */
{ 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
@ -187,7 +202,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg; u32 reg;
int i, n_hdmi_entries, hdmi_800mV_0dB; int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_dp;
@ -198,7 +214,15 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL; ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp; ddi_translations_dp = skl_ddi_translations_dp;
ddi_translations_edp = skl_ddi_translations_dp; n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
if (dev_priv->vbt.edp_low_vswing) {
ddi_translations_edp = skl_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
ddi_translations_edp = skl_ddi_translations_dp;
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
ddi_translations_hdmi = skl_ddi_translations_hdmi; ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
hdmi_800mV_0dB = 7; hdmi_800mV_0dB = 7;
@ -207,6 +231,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp; ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi; ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7; hdmi_800mV_0dB = 7;
} else if (IS_HASWELL(dev)) { } else if (IS_HASWELL(dev)) {
@ -214,6 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi; ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
hdmi_800mV_0dB = 6; hdmi_800mV_0dB = 6;
} else { } else {
@ -222,6 +249,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi; ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7; hdmi_800mV_0dB = 7;
} }
@ -229,29 +258,34 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
switch (port) { switch (port) {
case PORT_A: case PORT_A:
ddi_translations = ddi_translations_edp; ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break; break;
case PORT_B: case PORT_B:
case PORT_C: case PORT_C:
ddi_translations = ddi_translations_dp; ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break; break;
case PORT_D: case PORT_D:
if (intel_dp_is_edp(dev, PORT_D)) if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp; ddi_translations = ddi_translations_edp;
else size = n_edp_entries;
} else {
ddi_translations = ddi_translations_dp; ddi_translations = ddi_translations_dp;
size = n_dp_entries;
}
break; break;
case PORT_E: case PORT_E:
if (ddi_translations_fdi) if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi; ddi_translations = ddi_translations_fdi;
else else
ddi_translations = ddi_translations_dp; ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break; break;
default: default:
BUG(); BUG();
} }
for (i = 0, reg = DDI_BUF_TRANS(port); for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i].trans1); I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4; reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2); I915_WRITE(reg, ddi_translations[i].trans2);

View File

@ -390,7 +390,7 @@ static const intel_limit_t intel_limits_chv = {
* them would make no difference. * them would make no difference.
*/ */
.dot = { .min = 25000 * 5, .max = 540000 * 5}, .dot = { .min = 25000 * 5, .max = 540000 * 5},
.vco = { .min = 4860000, .max = 6700000 }, .vco = { .min = 4860000, .max = 6480000 },
.n = { .min = 1, .max = 1 }, .n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 }, .m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 }, .m2 = { .min = 24 << 22, .max = 175 << 22 },
@ -2195,9 +2195,44 @@ intel_fb_align_height(struct drm_device *dev, int height,
uint64_t fb_format_modifier) uint64_t fb_format_modifier)
{ {
int tile_height; int tile_height;
uint32_t bits_per_pixel;
tile_height = fb_format_modifier == I915_FORMAT_MOD_X_TILED ? switch (fb_format_modifier) {
(IS_GEN2(dev) ? 16 : 8) : 1; case DRM_FORMAT_MOD_NONE:
tile_height = 1;
break;
case I915_FORMAT_MOD_X_TILED:
tile_height = IS_GEN2(dev) ? 16 : 8;
break;
case I915_FORMAT_MOD_Y_TILED:
tile_height = 32;
break;
case I915_FORMAT_MOD_Yf_TILED:
bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
switch (bits_per_pixel) {
default:
case 8:
tile_height = 64;
break;
case 16:
case 32:
tile_height = 32;
break;
case 64:
tile_height = 16;
break;
case 128:
WARN_ONCE(1,
"128-bit pixels are not supported for display!");
tile_height = 16;
break;
}
break;
default:
MISSING_CASE(fb_format_modifier);
tile_height = 1;
break;
}
return ALIGN(height, tile_height); return ALIGN(height, tile_height);
} }
@ -2235,8 +2270,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
} }
break; break;
case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Y_TILED:
WARN(1, "Y tiled bo slipped through, driver bug!\n"); case I915_FORMAT_MOD_Yf_TILED:
return -EINVAL; if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
"Y tiling bo slipped through, driver bug!\n"))
return -EINVAL;
alignment = 1 * 1024 * 1024;
break;
default: default:
MISSING_CASE(fb->modifier[0]); MISSING_CASE(fb->modifier[0]);
return -EINVAL; return -EINVAL;
@ -2728,6 +2767,40 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg); POSTING_READ(reg);
} }
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format)
{
u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
/*
* The stride is either expressed as a multiple of 64 bytes
* chunks for linear buffers or in number of tiles for tiled
* buffers.
*/
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
return 64;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen == 2)
return 128;
return 512;
case I915_FORMAT_MOD_Y_TILED:
/* No need to check for old gens and Y tiling since this is
* about the display engine and those will be blocked before
* we get here.
*/
return 128;
case I915_FORMAT_MOD_Yf_TILED:
if (bits_per_pixel == 8)
return 64;
else
return 128;
default:
MISSING_CASE(fb_modifier);
return 64;
}
}
static void skylake_update_primary_plane(struct drm_crtc *crtc, static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
int x, int y) int x, int y)
@ -2735,10 +2808,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
u32 plane_ctl, stride; u32 plane_ctl, stride_div;
if (!intel_crtc->primary_enabled) { if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0); I915_WRITE(PLANE_CTL(pipe, 0), 0);
@ -2773,29 +2845,30 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
BUG(); BUG();
} }
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
switch (fb->modifier[0]) { switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE: case DRM_FORMAT_MOD_NONE:
stride = fb->pitches[0] >> 6;
break; break;
case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X; plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9; break;
case I915_FORMAT_MOD_Y_TILED:
plane_ctl |= PLANE_CTL_TILED_Y;
break;
case I915_FORMAT_MOD_Yf_TILED:
plane_ctl |= PLANE_CTL_TILED_YF;
break; break;
default: default:
BUG(); MISSING_CASE(fb->modifier[0]);
} }
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180; plane_ctl |= PLANE_CTL_ROTATE_180;
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
@ -2808,7 +2881,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(PLANE_SIZE(pipe, 0), I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config->pipe_src_h - 1) << 16 | (intel_crtc->config->pipe_src_h - 1) << 16 |
(intel_crtc->config->pipe_src_w - 1)); (intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride); I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
POSTING_READ(PLANE_SURF(pipe, 0)); POSTING_READ(PLANE_SURF(pipe, 0));
@ -3062,7 +3135,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static bool pipe_has_enabled_pch(struct intel_crtc *crtc) static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{ {
return crtc->base.enabled && crtc->active && return crtc->base.state->enable && crtc->active &&
crtc->config->has_pch_encoder; crtc->config->has_pch_encoder;
} }
@ -4200,7 +4273,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
bool reenable_ips = false; bool reenable_ips = false;
/* The clocks have to be on to load the palette. */ /* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active) if (!crtc->state->enable || !intel_crtc->active)
return; return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) { if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@ -4313,7 +4386,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder; struct intel_encoder *encoder;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
WARN_ON(!crtc->enabled); WARN_ON(!crtc->state->enable);
if (intel_crtc->active) if (intel_crtc->active)
return; return;
@ -4322,7 +4395,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_prepare_shared_dpll(intel_crtc); intel_prepare_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder) if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc); intel_set_pipe_timings(intel_crtc);
@ -4421,7 +4494,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder; struct intel_encoder *encoder;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
WARN_ON(!crtc->enabled); WARN_ON(!crtc->state->enable);
if (intel_crtc->active) if (intel_crtc->active)
return; return;
@ -4430,7 +4503,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_enable_shared_dpll(intel_crtc); intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder) if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc); intel_set_pipe_timings(intel_crtc);
@ -4768,7 +4841,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain; enum intel_display_power_domain domain;
if (!crtc->base.enabled) if (!crtc->base.state->enable)
continue; continue;
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@ -4989,7 +5062,7 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
/* disable/enable all currently active pipes while we change cdclk */ /* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc) for_each_intel_crtc(dev, intel_crtc)
if (intel_crtc->base.enabled) if (intel_crtc->base.state->enable)
*prepare_pipes |= (1 << intel_crtc->pipe); *prepare_pipes |= (1 << intel_crtc->pipe);
} }
@ -5029,7 +5102,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
bool is_dsi; bool is_dsi;
WARN_ON(!crtc->enabled); WARN_ON(!crtc->state->enable);
if (intel_crtc->active) if (intel_crtc->active)
return; return;
@ -5044,7 +5117,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
} }
if (intel_crtc->config->has_dp_encoder) if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc); intel_set_pipe_timings(intel_crtc);
@ -5112,7 +5185,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder; struct intel_encoder *encoder;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
WARN_ON(!crtc->enabled); WARN_ON(!crtc->state->enable);
if (intel_crtc->active) if (intel_crtc->active)
return; return;
@ -5120,7 +5193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc); i9xx_set_pll_dividers(intel_crtc);
if (intel_crtc->config->has_dp_encoder) if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc); intel_set_pipe_timings(intel_crtc);
@ -5311,7 +5384,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* crtc should still be enabled when we disable it. */ /* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled); WARN_ON(!crtc->state->enable);
dev_priv->display.crtc_disable(crtc); dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc); dev_priv->display.off(crtc);
@ -5389,7 +5462,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
crtc = encoder->base.crtc; crtc = encoder->base.crtc;
I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); I915_STATE_WARN(!crtc->state->enable,
"crtc not enabled\n");
I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n"); "encoder active on the wrong pipe\n");
@ -5576,7 +5650,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode * - LVDS dual channel mode
* - Double wide pipe * - Double wide pipe
*/ */
if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1; pipe_config->pipe_src_w &= ~1;
@ -5879,7 +5953,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the * for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed). * registers are not unnecessarily accessed).
*/ */
if (m2_n2 && INTEL_INFO(dev)->gen < 8 && if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
crtc->config->has_drrs) { crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder), I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@ -5895,13 +5969,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
} }
} }
void intel_dp_set_m_n(struct intel_crtc *crtc) void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
{ {
struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
if (m_n == M1_N1) {
dp_m_n = &crtc->config->dp_m_n;
dp_m2_n2 = &crtc->config->dp_m2_n2;
} else if (m_n == M2_N2) {
/*
* M2_N2 registers are not supported. Hence m2_n2 divider value
* needs to be programmed into M1_N1.
*/
dp_m_n = &crtc->config->dp_m2_n2;
} else {
DRM_ERROR("Unsupported divider value\n");
return;
}
if (crtc->config->has_pch_encoder) if (crtc->config->has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else else
intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n, intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
&crtc->config->dp_m2_n2);
} }
static void vlv_update_pll(struct intel_crtc *crtc, static void vlv_update_pll(struct intel_crtc *crtc,
@ -7650,7 +7740,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset, stride_mult; u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe; int pipe = crtc->pipe;
int fourcc, pixel_format; int fourcc, pixel_format;
int aligned_height; int aligned_height;
@ -7669,11 +7759,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
if (!(val & PLANE_CTL_ENABLE)) if (!(val & PLANE_CTL_ENABLE))
goto error; goto error;
if (val & PLANE_CTL_TILED_MASK) {
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
}
pixel_format = val & PLANE_CTL_FORMAT_MASK; pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format, fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ORDER_RGBX,
@ -7681,6 +7766,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc; fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
fb->modifier[0] = DRM_FORMAT_MOD_NONE;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
goto error;
}
base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
plane_config->base = base; plane_config->base = base;
@ -7691,17 +7796,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1; fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0)); val = I915_READ(PLANE_STRIDE(pipe, 0));
switch (plane_config->tiling) { stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
case I915_TILING_NONE: fb->pixel_format);
stride_mult = 64;
break;
case I915_TILING_X:
stride_mult = 512;
break;
default:
MISSING_CASE(plane_config->tiling);
goto error;
}
fb->pitches[0] = (val & 0x3ff) * stride_mult; fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height, aligned_height = intel_fb_align_height(dev, fb->height,
@ -8686,7 +8782,7 @@ retry:
i++; i++;
if (!(encoder->possible_crtcs & (1 << i))) if (!(encoder->possible_crtcs & (1 << i)))
continue; continue;
if (possible_crtc->enabled) if (possible_crtc->state->enable)
continue; continue;
/* This can occur when applying the pipe A quirk on resume. */ /* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled) if (to_intel_crtc(possible_crtc)->new_enabled)
@ -8754,7 +8850,7 @@ retry:
return true; return true;
fail: fail:
intel_crtc->new_enabled = crtc->enabled; intel_crtc->new_enabled = crtc->state->enable;
if (intel_crtc->new_enabled) if (intel_crtc->new_enabled)
intel_crtc->new_config = intel_crtc->config; intel_crtc->new_config = intel_crtc->config;
else else
@ -9661,10 +9757,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
!i915_gem_request_completed(work->flip_queued_req, true)) !i915_gem_request_completed(work->flip_queued_req, true))
return false; return false;
work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
} }
if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
return false; return false;
/* Potential stall - if we see that the flip has happened, /* Potential stall - if we see that the flip has happened,
@ -9695,7 +9791,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
spin_lock(&dev->event_lock); spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); intel_crtc->unpin_work->flip_queued_vblank,
drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc); page_flip_completed(intel_crtc);
} }
spin_unlock(&dev->event_lock); spin_unlock(&dev->event_lock);
@ -9837,7 +9934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_ring_get_request(ring)); intel_ring_get_request(ring));
} }
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true; work->enable_stall_check = true;
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
@ -9913,7 +10010,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
} }
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.enabled; crtc->new_enabled = crtc->base.state->enable;
if (crtc->new_enabled) if (crtc->new_enabled)
crtc->new_config = crtc->config; crtc->new_config = crtc->config;
@ -9943,6 +10040,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
} }
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
crtc->base.state->enable = crtc->new_enabled;
crtc->base.enabled = crtc->new_enabled; crtc->base.enabled = crtc->new_enabled;
} }
} }
@ -10206,6 +10304,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!pipe_config) if (!pipe_config)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
pipe_config->base.crtc = crtc;
drm_mode_copy(&pipe_config->base.adjusted_mode, mode); drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
drm_mode_copy(&pipe_config->base.mode, mode); drm_mode_copy(&pipe_config->base.mode, mode);
@ -10354,7 +10453,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
/* Check for pipes that will be enabled/disabled ... */ /* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) { for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->base.enabled == intel_crtc->new_enabled) if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
continue; continue;
if (!intel_crtc->new_enabled) if (!intel_crtc->new_enabled)
@ -10429,10 +10528,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
/* Double check state. */ /* Double check state. */
for_each_intel_crtc(dev, intel_crtc) { for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config && WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != intel_crtc->config); intel_crtc->new_config != intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
} }
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@ -10819,7 +10918,7 @@ check_crtc_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d]\n", DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id); crtc->base.base.id);
I915_STATE_WARN(crtc->active && !crtc->base.enabled, I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
"active crtc, but not enabled in sw tracking\n"); "active crtc, but not enabled in sw tracking\n");
for_each_intel_encoder(dev, encoder) { for_each_intel_encoder(dev, encoder) {
@ -10833,9 +10932,10 @@ check_crtc_state(struct drm_device *dev)
I915_STATE_WARN(active != crtc->active, I915_STATE_WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state " "crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active); "(expected %i, found %i)\n", active, crtc->active);
I915_STATE_WARN(enabled != crtc->base.enabled, I915_STATE_WARN(enabled != crtc->base.state->enable,
"crtc's computed enabled state doesn't match tracked enabled state " "crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled); "(expected %i, found %i)\n", enabled,
crtc->base.state->enable);
active = dev_priv->display.get_pipe_config(crtc, active = dev_priv->display.get_pipe_config(crtc,
&pipe_config); &pipe_config);
@ -10899,7 +10999,7 @@ check_shared_dpll_state(struct drm_device *dev)
pll->on, active); pll->on, active);
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++; enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++; active_crtcs++;
@ -11085,7 +11185,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
intel_crtc_disable(&intel_crtc->base); intel_crtc_disable(&intel_crtc->base);
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
if (intel_crtc->base.enabled) if (intel_crtc->base.state->enable)
dev_priv->display.crtc_disable(&intel_crtc->base); dev_priv->display.crtc_disable(&intel_crtc->base);
} }
@ -11141,7 +11241,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* FIXME: add subpixel order */ /* FIXME: add subpixel order */
done: done:
if (ret && crtc->enabled) if (ret && crtc->state->enable)
crtc->mode = *saved_mode; crtc->mode = *saved_mode;
kfree(saved_mode); kfree(saved_mode);
@ -11237,7 +11337,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
*/ */
count = 0; count = 0;
for_each_crtc(dev, crtc) { for_each_crtc(dev, crtc) {
config->save_crtc_enabled[count++] = crtc->enabled; config->save_crtc_enabled[count++] = crtc->state->enable;
} }
count = 0; count = 0;
@ -11471,7 +11571,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
} }
} }
if (crtc->new_enabled != crtc->base.enabled) { if (crtc->new_enabled != crtc->base.state->enable) {
DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
crtc->new_enabled ? "en" : "dis"); crtc->new_enabled ? "en" : "dis");
config->mode_changed = true; config->mode_changed = true;
@ -11907,6 +12007,12 @@ intel_check_primary_plane(struct drm_plane *plane,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
intel_crtc->atomic.update_fbc = true; intel_crtc->atomic.update_fbc = true;
/* Update watermarks on tiling changes. */
if (!plane->state->fb || !state->base.fb ||
plane->state->fb->modifier[0] !=
state->base.fb->modifier[0])
intel_crtc->atomic.update_wm = true;
} }
return 0; return 0;
@ -12297,6 +12403,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (!crtc_state) if (!crtc_state)
goto fail; goto fail;
intel_crtc_set_state(intel_crtc, crtc_state); intel_crtc_set_state(intel_crtc, crtc_state);
crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev, pipe); primary = intel_primary_plane_create(dev, pipe);
if (!primary) if (!primary)
@ -12374,9 +12481,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc; struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc; struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc) { if (!drmmode_crtc) {
@ -12649,14 +12753,43 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle, .create_handle = intel_user_framebuffer_create_handle,
}; };
static
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format)
{
u32 gen = INTEL_INFO(dev)->gen;
if (gen >= 9) {
/* "The stride in bytes must not exceed the of the size of 8K
* pixels and 32K bytes."
*/
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
return 16*1024;
else
return 32*1024;
} else if (gen >= 3) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
return 8*1024;
else
return 16*1024;
} else {
/* XXX DSPC is limited to 4k tiled */
return 8*1024;
}
}
static int intel_framebuffer_init(struct drm_device *dev, static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb, struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
int aligned_height; int aligned_height;
int pitch_limit;
int ret; int ret;
u32 pitch_limit, stride_alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@ -12677,36 +12810,37 @@ static int intel_framebuffer_init(struct drm_device *dev,
} }
} }
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED) { /* Passed in modifier sanity checking. */
DRM_DEBUG("hardware does not support tiling Y\n"); switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (INTEL_INFO(dev)->gen < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
}
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
break;
default:
DRM_ERROR("Unsupported fb modifier 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL; return -EINVAL;
} }
if (mode_cmd->pitches[0] & 63) { stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", mode_cmd->pixel_format);
mode_cmd->pitches[0]); if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
mode_cmd->pitches[0], stride_alignment);
return -EINVAL; return -EINVAL;
} }
if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
pitch_limit = 32*1024; mode_cmd->pixel_format);
} else if (INTEL_INFO(dev)->gen >= 4) {
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
pitch_limit = 16*1024;
else
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 3) {
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
pitch_limit = 8*1024;
else
pitch_limit = 16*1024;
} else
/* XXX DSPC is limited to 4k tiled */
pitch_limit = 8*1024;
if (mode_cmd->pitches[0] > pitch_limit) { if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%d) must be at less than %d\n", DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED ? mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
"tiled" : "linear", "tiled" : "linear",
mode_cmd->pitches[0], pitch_limit); mode_cmd->pitches[0], pitch_limit);
return -EINVAL; return -EINVAL;
@ -13318,11 +13452,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */ /* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) { if (crtc->active) {
update_scanline_offset(crtc); update_scanline_offset(crtc);
drm_vblank_on(dev, crtc->pipe); drm_crtc_vblank_on(&crtc->base);
} else }
drm_vblank_off(dev, crtc->pipe);
/* We need to sanitize the plane -> pipe mapping first because this will /* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note * disable the crtc (and hence change the state) if it is wrong. Note
@ -13362,6 +13496,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
} }
WARN_ON(crtc->active); WARN_ON(crtc->active);
crtc->base.state->enable = false;
crtc->base.enabled = false; crtc->base.enabled = false;
} }
@ -13378,7 +13513,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* have active connectors/encoders. */ * have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base); intel_crtc_update_dpms(&crtc->base);
if (crtc->active != crtc->base.enabled) { if (crtc->active != crtc->base.state->enable) {
struct intel_encoder *encoder; struct intel_encoder *encoder;
/* This can happen either due to bugs in the get_hw_state /* This can happen either due to bugs in the get_hw_state
@ -13386,9 +13521,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* pipe A quirk. */ * pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id, crtc->base.base.id,
crtc->base.enabled ? "enabled" : "disabled", crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled"); crtc->active ? "enabled" : "disabled");
crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active; crtc->base.enabled = crtc->active;
/* Because we only establish the connector -> encoder -> /* Because we only establish the connector -> encoder ->
@ -13525,6 +13661,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = dev_priv->display.get_pipe_config(crtc, crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config); crtc->config);
crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active; crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc); crtc->primary_enabled = primary_get_hw_state(crtc);

View File

@ -2691,11 +2691,14 @@ static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp) intel_dp_voltage_max(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port; enum port port = dp_to_dig_port(intel_dp)->port;
if (INTEL_INFO(dev)->gen >= 9) if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (IS_VALLEYVIEW(dev)) } else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A) else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@ -2719,6 +2722,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
return DP_TRAIN_PRE_EMPH_LEVEL_2; return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1; return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
default: default:
return DP_TRAIN_PRE_EMPH_LEVEL_0; return DP_TRAIN_PRE_EMPH_LEVEL_0;
} }
@ -3201,6 +3206,9 @@ intel_hsw_signal_levels(uint8_t train_set)
return DDI_BUF_TRANS_SELECT(7); return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8); return DDI_BUF_TRANS_SELECT(8);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(9);
default: default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels); "0x%x\n", signal_levels);
@ -4736,6 +4744,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(pp_div_reg)); I915_READ(pp_div_reg));
} }
/**
* intel_dp_set_drrs_state - program registers for RR switch to take effect
* @dev: DRM device
* @refresh_rate: RR to be programmed
*
* This function gets called when refresh rate (RR) has to be changed from
* one frequency to another. Switches can be between high and low RR
* supported by the panel or to any other RR based on media playback (in
* this case, RR value needs to be passed from user space).
*
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -4793,14 +4813,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return; return;
} }
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) { if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
switch (index) {
case DRRS_HIGH_RR:
intel_dp_set_m_n(intel_crtc, M1_N1);
break;
case DRRS_LOW_RR:
intel_dp_set_m_n(intel_crtc, M2_N2);
break;
case DRRS_MAX_RR:
default:
DRM_ERROR("Unsupported refreshrate type\n");
}
} else if (INTEL_INFO(dev)->gen > 6) {
reg = PIPECONF(intel_crtc->config->cpu_transcoder); reg = PIPECONF(intel_crtc->config->cpu_transcoder);
val = I915_READ(reg); val = I915_READ(reg);
if (index > DRRS_HIGH_RR) { if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH; if (IS_VALLEYVIEW(dev))
intel_dp_set_m_n(intel_crtc); val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else { } else {
val &= ~PIPECONF_EDP_RR_MODE_SWITCH; if (IS_VALLEYVIEW(dev))
val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
} }
I915_WRITE(reg, val); I915_WRITE(reg, val);
} }
@ -4810,6 +4848,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
} }
/**
* intel_edp_drrs_enable - init drrs struct if supported
* @intel_dp: DP struct
*
* Initializes frontbuffer_bits and drrs.dp
*/
void intel_edp_drrs_enable(struct intel_dp *intel_dp) void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
@ -4837,6 +4881,11 @@ unlock:
mutex_unlock(&dev_priv->drrs.mutex); mutex_unlock(&dev_priv->drrs.mutex);
} }
/**
* intel_edp_drrs_disable - Disable DRRS
* @intel_dp: DP struct
*
*/
void intel_edp_drrs_disable(struct intel_dp *intel_dp) void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
@ -4896,6 +4945,17 @@ unlock:
mutex_unlock(&dev_priv->drrs.mutex); mutex_unlock(&dev_priv->drrs.mutex);
} }
/**
* intel_edp_drrs_invalidate - Invalidate DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* When there is a disturbance on screen (due to cursor movement/time
* update etc), DRRS needs to be invalidated, i.e. need to switch to
* high RR.
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits) unsigned frontbuffer_bits)
{ {
@ -4923,6 +4983,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex); mutex_unlock(&dev_priv->drrs.mutex);
} }
/**
* intel_edp_drrs_flush - Flush DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* When there is no movement on screen, DRRS work can be scheduled.
* This DRRS work is responsible for setting relevant registers after a
* timeout of 1 second.
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
void intel_edp_drrs_flush(struct drm_device *dev, void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits) unsigned frontbuffer_bits)
{ {
@ -4947,6 +5018,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
mutex_unlock(&dev_priv->drrs.mutex); mutex_unlock(&dev_priv->drrs.mutex);
} }
/**
* DOC: Display Refresh Rate Switching (DRRS)
*
* Display Refresh Rate Switching (DRRS) is a power conservation feature
* which enables swtching between low and high refresh rates,
* dynamically, based on the usage scenario. This feature is applicable
* for internal panels.
*
* Indication that the panel supports DRRS is given by the panel EDID, which
* would list multiple refresh rates for one resolution.
*
* DRRS is of 2 types - static and seamless.
* Static DRRS involves changing refresh rate (RR) by doing a full modeset
* (may appear as a blink on screen) and is used in dock-undock scenario.
* Seamless DRRS involves changing RR without any visual effect to the user
* and can be used during normal system usage. This is done by programming
* certain registers.
*
* Support for static/seamless DRRS may be indicated in the VBT based on
* inputs from the panel spec.
*
* DRRS saves power by switching to low RR based on usage scenarios.
*
* eDP DRRS:-
* The implementation is based on frontbuffer tracking implementation.
* When there is a disturbance on the screen triggered by user activity or a
* periodic system activity, DRRS is disabled (RR is changed to high RR).
* When there is no movement on screen, after a timeout of 1 second, a switch
* to low RR is made.
* For integration with frontbuffer tracking code,
* intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
*
* DRRS can be further extended to support other internal panels and also
* the scenario of video playback wherein RR is set based on the rate
* requested by userspace.
*/
/**
* intel_dp_drrs_init - Init basic DRRS work and mutex.
* @intel_connector: eDP connector
* @fixed_mode: preferred mode of panel
*
* This function is called only once at driver load to initialize basic
* DRRS stuff.
*
* Returns:
* Downclock mode if panel supports it, else return NULL.
* DRRS support is determined by the presence of downclock mode (apart
* from VBT setting).
*/
static struct drm_display_mode * static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector *intel_connector, intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode) struct drm_display_mode *fixed_mode)
@ -4970,7 +5091,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
(dev, fixed_mode, connector); (dev, fixed_mode, connector);
if (!downclock_mode) { if (!downclock_mode) {
DRM_DEBUG_KMS("DRRS not supported\n"); DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL; return NULL;
} }

View File

@ -501,6 +501,7 @@ struct intel_plane_wm_parameters {
uint8_t bytes_per_pixel; uint8_t bytes_per_pixel;
bool enabled; bool enabled;
bool scaled; bool scaled;
u64 tiling;
}; };
struct intel_plane { struct intel_plane {
@ -593,6 +594,26 @@ struct intel_hdmi {
struct intel_dp_mst_encoder; struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10 #define DP_MAX_DOWNSTREAM_PORTS 0x10
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
* program them and switch between them incase of DRRS.
* But When only one such register is provided, we have to program the
* required divider value on that registers itself based on the DRRS state.
*
* M1_N1 : Program dp_m_n on M1_N1 registers
* dp_m2_n2 on M2_N2 registers (If supported)
*
* M2_N2 : Program dp_m2_n2 on M1_N1 registers
* M2_N2 registers are not supported
*/
enum link_m_n_set {
/* Sets the m1_n1 and m2_n2 */
M1_N1 = 0,
M2_N2
};
struct intel_dp { struct intel_dp {
uint32_t output_reg; uint32_t output_reg;
uint32_t aux_ch_ctl_reg; uint32_t aux_ch_ctl_reg;
@ -883,6 +904,8 @@ int intel_fb_align_height(struct drm_device *dev, int height,
uint64_t fb_format_modifier); uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
/* intel_audio.c */ /* intel_audio.c */
void intel_init_audio(struct drm_device *dev); void intel_init_audio(struct drm_device *dev);
@ -996,7 +1019,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void void
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,

View File

@ -473,6 +473,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
return true; return true;
} }
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc = NULL, *tmp_crtc;
enum pipe pipe;
bool pipe_a_only = false, one_pipe_only = false;
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
pipe_a_only = true;
else if (INTEL_INFO(dev_priv)->gen <= 4)
one_pipe_only = true;
for_each_pipe(dev_priv, pipe) {
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (one_pipe_only && crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
return NULL;
}
crtc = tmp_crtc;
}
if (pipe_a_only)
break;
}
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
return NULL;
}
return crtc;
}
/** /**
* intel_fbc_update - enable/disable FBC as needed * intel_fbc_update - enable/disable FBC as needed
* @dev: the drm_device * @dev: the drm_device
@ -495,7 +532,7 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
void intel_fbc_update(struct drm_device *dev) void intel_fbc_update(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc; struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
@ -530,23 +567,9 @@ void intel_fbc_update(struct drm_device *dev)
* - new fb is too large to fit in compressed buffer * - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.) * - going to an unsupported config (interlace, pixel multiply, etc.)
*/ */
for_each_crtc(dev, tmp_crtc) { crtc = intel_fbc_find_crtc(dev_priv);
if (intel_crtc_active(tmp_crtc) && if (!crtc)
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
}
}
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable; goto out_disable;
}
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb; fb = crtc->primary->fb;

View File

@ -620,7 +620,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
* @vmas: list of vmas. * @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit. * @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer. * @exec_start: batchbuffer start virtual address pointer.
* @flags: translated execbuffer call flags. * @dispatch_flags: translated execbuffer call flags.
* *
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call. * away the submission details of the execbuffer ioctl call.
@ -633,7 +633,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas, struct list_head *vmas,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags) u64 exec_start, u32 dispatch_flags)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@ -706,10 +706,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode; dev_priv->relative_constants_mode = instp_mode;
} }
ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags); ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
if (ret) if (ret)
return ret; return ret;
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@ -886,12 +888,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
return ret; return ret;
} }
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
*/
request->ctx = ctx; request->ctx = ctx;
i915_gem_context_reference(request->ctx); i915_gem_context_reference(request->ctx);
request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request; ring->outstanding_lazy_request = request;
return 0; return 0;
@ -1163,9 +1162,9 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, struct intel_context *ctx,
u64 offset, unsigned flags) u64 offset, unsigned dispatch_flags)
{ {
bool ppgtt = !(flags & I915_DISPATCH_SECURE); bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret; int ret;
ret = intel_logical_ring_begin(ringbuf, ctx, 4); ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@ -1638,6 +1637,49 @@ cleanup_render_ring:
return ret; return ret;
} }
static u32
make_rpcs(struct drm_device *dev)
{
u32 rpcs = 0;
/*
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
if (INTEL_INFO(dev)->gen < 9)
return 0;
/*
* Starting in Gen9, render power gating can leave
* slice/subslice/EU in a partially enabled state. We
* must make an explicit request through RPCS for full
* enablement.
*/
if (INTEL_INFO(dev)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
rpcs |= INTEL_INFO(dev)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
if (INTEL_INFO(dev)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
if (INTEL_INFO(dev)->has_eu_pg) {
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
return rpcs;
}
static int static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@ -1731,18 +1773,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
if (ring->id == RCS) { if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
reg_state[CTX_R_PWR_CLK_STATE+1] = 0; reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
} }
kunmap_atomic(reg_state); kunmap_atomic(reg_state);
@ -1950,3 +1992,38 @@ error_unpin_ctx:
drm_gem_object_unreference(&ctx_obj->base); drm_gem_object_unreference(&ctx_obj->base);
return ret; return ret;
} }
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[ring->id].ringbuf;
uint32_t *reg_state;
struct page *page;
if (!ctx_obj)
continue;
if (i915_gem_object_get_pages(ctx_obj)) {
WARN(1, "Failed get_pages for context obj\n");
continue;
}
page = i915_gem_object_get_page(ctx_obj, 1);
reg_state = kmap_atomic(page);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
kunmap_atomic(reg_state);
ringbuf->head = 0;
ringbuf->tail = 0;
}
}

View File

@ -73,6 +73,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring, void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx); struct intel_context *ctx);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
/* Execlists */ /* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@ -82,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas, struct list_head *vmas,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags); u64 exec_start, u32 dispatch_flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring); void intel_lrc_irq_handler(struct intel_engine_cs *ring);

View File

@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
intel_connector->panel.fitting_mode = value; intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc; crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) { if (crtc && crtc->state->enable) {
/* /*
* If the CRTC is enabled, the display will be changed * If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode. * according to the new panel fitting mode.

View File

@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
return; return;
if (opregion->acpi) { if (opregion->acpi) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_didl_outputs(dev);
intel_didl_outputs(dev); intel_setup_cadls(dev);
intel_setup_cadls(dev);
}
/* Notify BIOS we are ready to handle ACPI video ext notifs. /* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module. * Right now, all the events are handled by the ACPI video module.

View File

@ -1065,7 +1065,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params; struct put_image_params *params;
int ret; int ret;
/* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay; overlay = dev_priv->overlay;
if (!overlay) { if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n"); DRM_DEBUG("userspace bug: no overlay\n");
@ -1261,7 +1260,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
int ret; int ret;
/* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay; overlay = dev_priv->overlay;
if (!overlay) { if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n"); DRM_DEBUG("userspace bug: no overlay\n");

View File

@ -2522,6 +2522,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks; uint16_t alloc_size, start, cursor_blocks;
uint16_t minimum[I915_MAX_PLANES];
unsigned int total_data_rate; unsigned int total_data_rate;
int plane; int plane;
@ -2540,9 +2541,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
alloc_size -= cursor_blocks; alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks; alloc->end -= cursor_blocks;
/* 1. Allocate the mininum required blocks for each active plane */
for_each_plane(pipe, plane) {
const struct intel_plane_wm_parameters *p;
p = &params->plane[plane];
if (!p->enabled)
continue;
minimum[plane] = 8;
alloc_size -= minimum[plane];
}
/* /*
* Each active plane get a portion of the remaining space, in * 2. Distribute the remaining space in proportion to the amount of
* proportion to the amount of data they need to fetch from memory. * data each plane needs to fetch from memory.
* *
* FIXME: we may not allocate every single block here. * FIXME: we may not allocate every single block here.
*/ */
@ -2564,8 +2577,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
* promote the expression to 64 bits to avoid overflowing, the * promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1 * result is < available as data_rate / total_data_rate < 1
*/ */
plane_blocks = div_u64((uint64_t)alloc_size * data_rate, plane_blocks = minimum[plane];
total_data_rate); plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
ddb->plane[pipe][plane].start = start; ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks; ddb->plane[pipe][plane].end = start + plane_blocks;
@ -2595,7 +2609,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
if (latency == 0) if (latency == 0)
return UINT_MAX; return UINT_MAX;
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel; wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000); ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
return ret; return ret;
@ -2603,17 +2617,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel, uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency) uint64_t tiling, uint32_t latency)
{ {
uint32_t ret, plane_bytes_per_line, wm_intermediate_val; uint32_t ret;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t wm_intermediate_val;
if (latency == 0) if (latency == 0)
return UINT_MAX; return UINT_MAX;
plane_bytes_per_line = horiz_pixels * bytes_per_pixel; plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
if (tiling == I915_FORMAT_MOD_Y_TILED ||
tiling == I915_FORMAT_MOD_Yf_TILED) {
plane_bytes_per_line *= 4;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line /= 4;
} else {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
}
wm_intermediate_val = latency * pixel_rate; wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
plane_bytes_per_line; plane_blocks_per_line;
return ret; return ret;
} }
@ -2662,6 +2688,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */ int i = 1; /* Index for sprite planes start */
p->active = intel_crtc_active(crtc); p->active = intel_crtc_active(crtc);
@ -2677,6 +2704,14 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
crtc->primary->fb->bits_per_pixel / 8; crtc->primary->fb->bits_per_pixel / 8;
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
fb = crtc->primary->state->fb;
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
*/
if (fb)
p->plane[0].tiling = fb->modifier[0];
p->cursor.enabled = true; p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4; p->cursor.bytes_per_pixel = 4;
@ -2693,41 +2728,60 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
} }
} }
static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p, static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params, struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation, uint16_t ddb_allocation,
uint32_t mem_value, int level,
uint16_t *out_blocks, /* out */ uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */) uint8_t *out_lines /* out */)
{ {
uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines; uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t result_bytes; uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
if (mem_value == 0 || !p->active || !p_params->enabled) if (latency == 0 || !p->active || !p_params->enabled)
return false; return false;
method1 = skl_wm_method1(p->pixel_rate, method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel, p_params->bytes_per_pixel,
mem_value); latency);
method2 = skl_wm_method2(p->pixel_rate, method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal, p->pipe_htotal,
p_params->horiz_pixels, p_params->horiz_pixels,
p_params->bytes_per_pixel, p_params->bytes_per_pixel,
mem_value); p_params->tiling,
latency);
plane_bytes_per_line = p_params->horiz_pixels * plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel; p_params->bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
/* For now xtile and linear */ if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1) p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
result_bytes = min(method1, method2); uint32_t y_tile_minimum = plane_blocks_per_line * 4;
else selected_result = max(method2, y_tile_minimum);
result_bytes = method1; } else {
if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
selected_result = method1;
}
res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1; res_blocks = selected_result + 1;
res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line); res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
if (res_blocks > ddb_allocation || res_lines > 31) if (level >= 1 && level <= 7) {
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
res_lines += 4;
else
res_blocks++;
}
if (res_blocks >= ddb_allocation || res_lines > 31)
return false; return false;
*out_blocks = res_blocks; *out_blocks = res_blocks;
@ -2744,23 +2798,24 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
int num_planes, int num_planes,
struct skl_wm_level *result) struct skl_wm_level *result)
{ {
uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks; uint16_t ddb_blocks;
int i; int i;
for (i = 0; i < num_planes; i++) { for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i], result->plane_en[i] = skl_compute_plane_wm(dev_priv,
p, &p->plane[i],
ddb_blocks, ddb_blocks,
latency, level,
&result->plane_res_b[i], &result->plane_res_b[i],
&result->plane_res_l[i]); &result->plane_res_l[i]);
} }
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks, result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
latency, &result->cursor_res_b, ddb_blocks, level,
&result->cursor_res_b,
&result->cursor_res_l); &result->cursor_res_l);
} }
@ -3153,12 +3208,20 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
int pixel_size, bool enabled, bool scaled) int pixel_size, bool enabled, bool scaled)
{ {
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane->state->fb;
intel_plane->wm.enabled = enabled; intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled; intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width; intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height; intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size; intel_plane->wm.bytes_per_pixel = pixel_size;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
*/
if (fb)
intel_plane->wm.tiling = fb->modifier[0];
skl_update_wm(crtc); skl_update_wm(crtc);
} }

View File

@ -1002,13 +1002,64 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
return 0; return 0;
} }
static int skl_init_workarounds(struct intel_engine_cs *ring) static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
{ {
gen9_init_workarounds(ring); struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
for (i = 0; i < 3; i++) {
u8 ss;
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
continue;
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return 0;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
return 0; return 0;
} }
static int skl_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_workarounds(ring);
/* WaDisablePowerCompilerClockGating:skl */
if (INTEL_REVID(dev) == SKL_REVID_B0)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
return skl_tune_iz_hashing(ring);
}
int init_workarounds_ring(struct intel_engine_cs *ring) int init_workarounds_ring(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
@ -1690,7 +1741,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
static int static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring, i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length, u64 offset, u32 length,
unsigned flags) unsigned dispatch_flags)
{ {
int ret; int ret;
@ -1701,7 +1752,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_BUFFER_START |
MI_BATCH_GTT | MI_BATCH_GTT |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1714,8 +1766,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring, i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned dispatch_flags)
{ {
u32 cs_offset = ring->scratch.gtt_offset; u32 cs_offset = ring->scratch.gtt_offset;
int ret; int ret;
@ -1733,7 +1785,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
if ((flags & I915_DISPATCH_PINNED) == 0) { if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
@ -1765,7 +1817,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER); intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8); intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1776,7 +1829,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
static int static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring, i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned dispatch_flags)
{ {
int ret; int ret;
@ -1785,7 +1838,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring); intel_ring_advance(ring);
return 0; return 0;
@ -2176,6 +2230,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
kref_init(&request->ref); kref_init(&request->ref);
request->ring = ring; request->ring = ring;
request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++; request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno); ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@ -2352,9 +2407,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned dispatch_flags)
{ {
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); bool ppgtt = USES_PPGTT(ring->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret; int ret;
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
@ -2373,8 +2429,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned dispatch_flags)
{ {
int ret; int ret;
@ -2384,7 +2440,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */ /* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
@ -2396,7 +2452,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned dispatch_flags)
{ {
int ret; int ret;
@ -2406,7 +2462,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */ /* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_advance(ring); intel_ring_advance(ring);

View File

@ -164,7 +164,7 @@ struct intel_engine_cs {
u32 seqno); u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring, int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length, u64 offset, u32 length,
unsigned flags); unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1 #define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2 #define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring); void (*cleanup)(struct intel_engine_cs *ring);
@ -242,7 +242,7 @@ struct intel_engine_cs {
u32 flush_domains); u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, struct intel_context *ctx,
u64 offset, unsigned flags); u64 offset, unsigned dispatch_flags);
/** /**
* List of objects currently involved in rendering from the * List of objects currently involved in rendering from the
@ -373,11 +373,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
* 0x06: ring 2 head pointer (915-class) * 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45) * 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45) * 0x1f: Last written status offset. (GM45)
* 0x20-0x2f: Reserved (Gen6+)
* *
* The area from dword 0x20 to 0x3ff is available for driver usage. * The area from dword 0x30 to 0x3ff is available for driver usage.
*/ */
#define I915_GEM_HWS_INDEX 0x20 #define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_INDEX 0x30 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);

View File

@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
if (min <= 0 || max <= 0) if (min <= 0 || max <= 0)
return false; return false;
if (WARN_ON(drm_vblank_get(dev, pipe))) if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return false; return false;
local_irq_disable(); local_irq_disable();
@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
finish_wait(wq, &wait); finish_wait(wq, &wait);
drm_vblank_put(dev, pipe); drm_crtc_vblank_put(&crtc->base);
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
@ -189,7 +189,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe; const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1; const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride; u32 plane_ctl, stride_div;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
@ -247,15 +247,20 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
switch (fb->modifier[0]) { switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE: case DRM_FORMAT_MOD_NONE:
stride = fb->pitches[0] >> 6;
break; break;
case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_X_TILED:
plane_ctl |= PLANE_CTL_TILED_X; plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9; break;
case I915_FORMAT_MOD_Y_TILED:
plane_ctl |= PLANE_CTL_TILED_Y;
break;
case I915_FORMAT_MOD_Yf_TILED:
plane_ctl |= PLANE_CTL_TILED_YF;
break; break;
default: default:
BUG(); MISSING_CASE(fb->modifier[0]);
} }
if (drm_plane->state->rotation == BIT(DRM_ROTATE_180)) if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180; plane_ctl |= PLANE_CTL_ROTATE_180;
@ -266,6 +271,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
pixel_size, true, pixel_size, true,
src_w != crtc_w || src_h != crtc_h); src_w != crtc_w || src_h != crtc_h);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
/* Sizes are 0 based */ /* Sizes are 0 based */
src_w--; src_w--;
src_h--; src_h--;
@ -273,7 +281,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
crtc_h--; crtc_h--;
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride); I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
@ -1248,6 +1256,12 @@ finish:
if (!intel_crtc->primary_enabled && !state->hides_primary) if (!intel_crtc->primary_enabled && !state->hides_primary)
intel_crtc->atomic.post_enable_primary = true; intel_crtc->atomic.post_enable_primary = true;
/* Update watermarks on tiling changes. */
if (!plane->state->fb || !state->base.fb ||
plane->state->fb->modifier[0] !=
state->base.fb->modifier[0])
intel_crtc->atomic.update_wm = true;
} }
return 0; return 0;
@ -1301,9 +1315,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane; struct intel_plane *intel_plane;
int ret = 0; int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */ /* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL; return -EINVAL;
@ -1332,9 +1343,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane; struct intel_plane *intel_plane;
int ret = 0; int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, get->plane_id); plane = drm_plane_find(dev, get->plane_id);

View File

@ -211,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
gen6_gt_check_fifodbg(dev_priv); gen6_gt_check_fifodbg(dev_priv);
} }
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
{
u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{ {
int ret = 0; int ret = 0;
@ -218,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW. /* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */ * So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
dev_priv->uncore.fifo_count = dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500; int loop = 500;
u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; u32 fifo = fifo_free_entries(dev_priv);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10); udelay(10);
fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; fifo = fifo_free_entries(dev_priv);
} }
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret; ++ret;
@ -315,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_GEN6(dev) || IS_GEN7(dev)) if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count = dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) & fifo_free_entries(dev_priv);
GT_FIFO_FREE_ENTRIES_MASK;
} }
if (!restore) if (!restore)

View File

@ -931,6 +931,7 @@ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc); extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc); extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc); extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev); extern void drm_vblank_cleanup(struct drm_device *dev);

View File

@ -192,4 +192,19 @@
*/ */
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2) #define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
/*
* Intel Yf-tiling layout
*
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
* 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
* in pixel depends on the pixel depth.
*/
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
#endif /* DRM_FOURCC_H */ #endif /* DRM_FOURCC_H */