drm next/fixes for 5.16-rc1

bridge:
 - HPD improvments for lt9611uxc
 - eDP aux-bus support for ps8640
 - LVDS data-mapping selection support
 
 ttm:
 - remove huge page functionality (needs reworking)
 - fix a race condition during BO eviction
 
 panels:
 - add some new panels
 
 fbdev:
 - fix double-free
 - remove unused scrolling acceleration
 - CONFIG_FB dep improvements
 
 locking:
 - improve contended locking logging
 - naming collision fix
 
 dma-buf:
 - add dma_resv_for_each_fence iterator
 - fix fence refcounting bug
 - name locking fixesA
 
 prime:
 - fix object references during mmap
 
 nouveau:
 - various code style changes
 - refcount fix
 - device removal fixes
 - protect client list with a mutex
 - fix CE0 address calculation
 
 i915:
 - DP rates related fixes
 - Revert disabling dual eDP that was causing state readout problems
 - put the cdclk vtables in const data
 - Fix DVO port type for older platforms
 - Fix blankscreen by turning DP++ TMDS output buffers on encoder->shutdown
 - CCS FBs related fixes
 - Fix recursive lock in GuC submission
 - Revert guc_id from i915_request tracepoint
 - Build fix around dmabuf
 
 amdgpu:
 - GPU reset fix
 - Aldebaran fix
 - Yellow Carp fixes
 - DCN2.1 DMCUB fix
 - IOMMU regression fix for Picasso
 - DSC display fixes
 - BPC display calculation fixes
 - Other misc display fixes
 - Don't allow partial copy from user for DC debugfs
 - SRIOV fixes
 - GFX9 CSB pin count fix
 - Various IP version check fixes
 - DP 2.0 fixes
 - Limit DCN1 MPO fix to DCN1
 
 amdkfd:
 - SVM fixes
 - Fix gfx version for renoir
 - Reset fixes
 
 udl:
 - timeout fix
 
 imx:
 - circular locking fix
 
 virtio:
 - NULL ptr deref fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmGN3YwACgkQDHTzWXnE
 hr6aZQ/+Pobf1VE7V3wPUcopxccJYmgBvG/uY8EDyjA8qaxHs2pQqGN2IooOGxr6
 F8G1N94Hem/PCDn3T8JI2Tqw5z4sy4UwLahEWISurFCen1IMAfA7hYfutp9X3O7X
 8h7b+PgkvVruEAHF7z0kqnWGPHmcro29cIHNkXVRjnJuz+Gmn1XRfo6Jj65n6D7u
 NfMeU4/lWRR3767oJQzTqyAYtGxsKaZT3/tBD5WggZBzEKC7hqhAl8EUoOLWwojo
 fDqwiEpLXpraPRIQH8trkXVHhzPeLAmG916WwS8JG3CEk9mUQ+I7Jshhd8cw+bsQ
 XPuk3OBfU9mtuiGgNzrLP3xXJZs/QN3EkpKZWLefTnJY+C4BgiP2RifTnghmwV31
 6/7Pr83CX/cn3BRd7r0xaeBZYvVYBZmwoZcsZFJBM8SVjd/ofKUfAmCzZZKheio2
 5qa6bj9DQoyjEoFAULh23plcX6hvATGP7wzfRTnJ9AlAJ0KyEjVJ3r0qE6jHMDc/
 uzcTAnKIWCxt9kSgE5qwLQtxLBaBpr/iOniZbCqGkPjiZeMzqP/ug1AKVP7kk39x
 FxZVT8ZOKk8Xt4iLZx8jmHi2KKheXYZi9LqieoTrJd44qMXDOmR9DCtQX9FZuWJS
 EJAlMj6sCowAZdODPZMVpoMc3Gti9nZ2Fpu7mLrRcMk1gKfjKwo=
 =qMNk
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2021-11-12' of git://anongit.freedesktop.org/drm/drm

Pull more drm updates from Dave Airlie:
 "I missed a drm-misc-next pull for the main pull last week. It wasn't
  that major and isn't the bulk of this at all. This has a bunch of
  fixes all over, a lot for amdgpu and i915.

  bridge:
   - HPD improvments for lt9611uxc
   - eDP aux-bus support for ps8640
   - LVDS data-mapping selection support

  ttm:
   - remove huge page functionality (needs reworking)
   - fix a race condition during BO eviction

  panels:
   - add some new panels

  fbdev:
   - fix double-free
   - remove unused scrolling acceleration
   - CONFIG_FB dep improvements

  locking:
   - improve contended locking logging
   - naming collision fix

  dma-buf:
   - add dma_resv_for_each_fence iterator
   - fix fence refcounting bug
   - name locking fixesA

  prime:
   - fix object references during mmap

  nouveau:
   - various code style changes
   - refcount fix
   - device removal fixes
   - protect client list with a mutex
   - fix CE0 address calculation

  i915:
   - DP rates related fixes
   - Revert disabling dual eDP that was causing state readout problems
   - put the cdclk vtables in const data
   - Fix DVO port type for older platforms
   - Fix blankscreen by turning DP++ TMDS output buffers on encoder->shutdown
   - CCS FBs related fixes
   - Fix recursive lock in GuC submission
   - Revert guc_id from i915_request tracepoint
   - Build fix around dmabuf

  amdgpu:
   - GPU reset fix
   - Aldebaran fix
   - Yellow Carp fixes
   - DCN2.1 DMCUB fix
   - IOMMU regression fix for Picasso
   - DSC display fixes
   - BPC display calculation fixes
   - Other misc display fixes
   - Don't allow partial copy from user for DC debugfs
   - SRIOV fixes
   - GFX9 CSB pin count fix
   - Various IP version check fixes
   - DP 2.0 fixes
   - Limit DCN1 MPO fix to DCN1

  amdkfd:
   - SVM fixes
   - Fix gfx version for renoir
   - Reset fixes

  udl:
   - timeout fix

  imx:
   - circular locking fix

  virtio:
   - NULL ptr deref fix"

* tag 'drm-next-2021-11-12' of git://anongit.freedesktop.org/drm/drm: (126 commits)
  drm/ttm: Double check mem_type of BO while eviction
  drm/amdgpu: add missed support for UVD IP_VERSION(3, 0, 64)
  drm/amdgpu: drop jpeg IP initialization in SRIOV case
  drm/amd/display: reject both non-zero src_x and src_y only for DCN1x
  drm/amd/display: Add callbacks for DMUB HPD IRQ notifications
  drm/amd/display: Don't lock connection_mutex for DMUB HPD
  drm/amd/display: Add comment where CONFIG_DRM_AMD_DC_DCN macro ends
  drm/amdkfd: Fix retry fault drain race conditions
  drm/amdkfd: lower the VAs base offset to 8KB
  drm/amd/display: fix exit from amdgpu_dm_atomic_check() abruptly
  drm/amd/amdgpu: fix the kfd pre_reset sequence in sriov
  drm/amdgpu: fix uvd crash on Polaris12 during driver unloading
  drm/i915/adlp/fb: Prevent the mapping of redundant trailing padding NULL pages
  drm/i915/fb: Fix rounding error in subsampled plane size calculation
  drm/i915/hdmi: Turn DP++ TMDS output buffers back on in encoder->shutdown()
  drm/locking: fix __stack_depot_* name conflict
  drm/virtio: Fix NULL dereference error in virtio_gpu_poll
  drm/amdgpu: fix SI handling in amdgpu_device_asic_has_dc_support()
  drm/amdgpu: Fix dangling kfd_bo pointer for shared BOs
  drm/amd/amdkfd: Don't sent command to HWS on kfd reset
  ...
This commit is contained in:
Linus Torvalds 2021-11-12 12:11:07 -08:00
commit 304ac8032d
140 changed files with 1770 additions and 1501 deletions

View File

@ -49,11 +49,26 @@ properties:
properties: properties:
port@0: port@0:
$ref: /schemas/graph.yaml#/properties/port $ref: /schemas/graph.yaml#/$defs/port-base
description: | description: |
For LVDS encoders, port 0 is the parallel input For LVDS encoders, port 0 is the parallel input
For LVDS decoders, port 0 is the LVDS input For LVDS decoders, port 0 is the LVDS input
properties:
endpoint:
$ref: /schemas/media/video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-mapping:
enum:
- jeida-18
- jeida-24
- vesa-24
description: |
The color signals mapping order. See details in
Documentation/devicetree/bindings/display/panel/lvds.yaml
port@1: port@1:
$ref: /schemas/graph.yaml#/properties/port $ref: /schemas/graph.yaml#/properties/port
description: | description: |
@ -71,6 +86,22 @@ properties:
power-supply: true power-supply: true
if:
not:
properties:
compatible:
contains:
const: lvds-decoder
then:
properties:
ports:
properties:
port@0:
properties:
endpoint:
properties:
data-mapping: false
required: required:
- compatible - compatible
- ports - ports

View File

@ -40,6 +40,9 @@ properties:
vdd33-supply: vdd33-supply:
description: Regulator for 3.3V digital core power. description: Regulator for 3.3V digital core power.
aux-bus:
$ref: /schemas/display/dp-aux-bus.yaml#
ports: ports:
$ref: /schemas/graph.yaml#/properties/ports $ref: /schemas/graph.yaml#/properties/ports
@ -101,6 +104,20 @@ examples:
}; };
}; };
}; };
aux-bus {
panel {
compatible = "boe,nv133fhm-n62";
power-supply = <&pp3300_dx_edp>;
backlight = <&backlight>;
port {
panel_in: endpoint {
remote-endpoint = <&ps8640_out>;
};
};
};
};
}; };
}; };

View File

@ -166,6 +166,8 @@ properties:
- innolux,at070tn92 - innolux,at070tn92
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel # Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070y2-l01 - innolux,g070y2-l01
# Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
- innolux,g070y2-t02
# Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel # Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
- innolux,g101ice-l01 - innolux,g101ice-l01
# Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel # Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
@ -309,6 +311,8 @@ properties:
- urt,umsh-8596md-11t - urt,umsh-8596md-11t
- urt,umsh-8596md-19t - urt,umsh-8596md-19t
- urt,umsh-8596md-20t - urt,umsh-8596md-20t
# Vivax TPC-9150 tablet 9.0" WSVGA TFT LCD panel
- vivax,tpc9150-panel
# VXT 800x480 color TFT LCD panel # VXT 800x480 color TFT LCD panel
- vxt,vl050-8048nt-c01 - vxt,vl050-8048nt-c01
# Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
@ -317,6 +321,7 @@ properties:
- yes-optoelectronics,ytc700tlag-05-201c - yes-optoelectronics,ytc700tlag-05-201c
backlight: true backlight: true
ddc-i2c-bus: true
enable-gpios: true enable-gpios: true
port: true port: true
power-supply: true power-supply: true

View File

@ -0,0 +1,56 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/sharp,ls060t1sx01.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sharp Microelectronics 6.0" FullHD TFT LCD panel
maintainers:
- Dmitry Baryskov <dmitry.baryshkov@linaro.org>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: sharp,ls060t1sx01
reg: true
backlight: true
reset-gpios: true
port: true
avdd-supply:
description: handle of the regulator that provides the positive supply voltage
avee-supply:
description: handle of the regulator that provides the negative supply voltage
vddi-supply:
description: handle of the regulator that provides the I/O supply voltage
vddh-supply:
description: handle of the regulator that provides the analog supply voltage
required:
- compatible
- reg
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "sharp,ls060t1sx01";
reg = <0>;
avdd-supply = <&pm8941_l22>;
backlight = <&backlight>;
reset-gpios = <&pm8916_gpios 25 GPIO_ACTIVE_LOW>;
};
};
...

View File

@ -1286,6 +1286,8 @@ patternProperties:
description: Vitesse Semiconductor Corporation description: Vitesse Semiconductor Corporation
"^vivante,.*": "^vivante,.*":
description: Vivante Corporation description: Vivante Corporation
"^vivax,.*":
description: Vivax brand by M SAN Grupa d.o.o.
"^vocore,.*": "^vocore,.*":
description: VoCore Studio description: VoCore Studio
"^voipac,.*": "^voipac,.*":

View File

@ -314,16 +314,19 @@ Level: Advanced
Garbage collect fbdev scrolling acceleration Garbage collect fbdev scrolling acceleration
-------------------------------------------- --------------------------------------------
Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = Scroll acceleration has been disabled in fbcon. Now it works as the old
SCROLL_REDRAW. There's a ton of code this will allow us to remove: SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
removed from fbcon_ops.
Remaining tasks:
- lots of code in fbcon.c - a bunch of the hooks in fbcon_ops could be removed or simplified by calling
- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
directly instead of the function table (with a switch on p->rotate) directly instead of the function table (with a switch on p->rotate)
- fb_copyarea is unused after this, and can be deleted from all drivers - fb_copyarea is unused after this, and can be deleted from all drivers
- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
well as cfb_copyarea
Note that not all acceleration code can be deleted, since clearing and cursor Note that not all acceleration code can be deleted, since clearing and cursor
support is still accelerated, which might be good candidates for further support is still accelerated, which might be good candidates for further
deletion projects. deletion projects.

View File

@ -67,12 +67,9 @@ static void dma_buf_release(struct dentry *dentry)
BUG_ON(dmabuf->vmapping_counter); BUG_ON(dmabuf->vmapping_counter);
/* /*
* Any fences that a dma-buf poll can wait on should be signaled * If you hit this BUG() it could mean:
* before releasing dma-buf. This is the responsibility of each * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
* driver that uses the reservation objects. * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
*
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/ */
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
@ -200,6 +197,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{ {
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dcb->poll->lock, flags); spin_lock_irqsave(&dcb->poll->lock, flags);
@ -207,21 +205,18 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
dcb->active = 0; dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags); spin_unlock_irqrestore(&dcb->poll->lock, flags);
dma_fence_put(fence); dma_fence_put(fence);
/* Paired with get_file in dma_buf_poll */
fput(dmabuf->file);
} }
static bool dma_buf_poll_shared(struct dma_resv *resv, static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_buf_poll_cb_t *dcb) struct dma_buf_poll_cb_t *dcb)
{ {
struct dma_resv_list *fobj = dma_resv_shared_list(resv); struct dma_resv_iter cursor;
struct dma_fence *fence; struct dma_fence *fence;
int i, r; int r;
if (!fobj) dma_resv_for_each_fence(&cursor, resv, write, fence) {
return false;
for (i = 0; i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(resv));
dma_fence_get(fence); dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r) if (!r)
@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv *resv,
return false; return false;
} }
static bool dma_buf_poll_excl(struct dma_resv *resv,
struct dma_buf_poll_cb_t *dcb)
{
struct dma_fence *fence = dma_resv_excl_fence(resv);
int r;
if (!fence)
return false;
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
return true;
dma_fence_put(fence);
return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll) static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
@ -282,8 +259,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock); spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLOUT) { if (events & EPOLLOUT) {
if (!dma_buf_poll_shared(resv, dcb) && /* Paired with fput in dma_buf_poll_cb */
!dma_buf_poll_excl(resv, dcb)) get_file(dmabuf->file);
if (!dma_buf_poll_add_cb(resv, true, dcb))
/* No callback queued, wake up any other waiters */ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb); dma_buf_poll_cb(NULL, &dcb->cb);
else else
@ -303,7 +282,10 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock); spin_unlock_irq(&dmabuf->poll.lock);
if (events & EPOLLIN) { if (events & EPOLLIN) {
if (!dma_buf_poll_excl(resv, dcb)) /* Paired with fput in dma_buf_poll_cb */
get_file(dmabuf->file);
if (!dma_buf_poll_add_cb(resv, false, dcb))
/* No callback queued, wake up any other waiters */ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb); dma_buf_poll_cb(NULL, &dcb->cb);
else else
@ -1356,10 +1338,9 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
{ {
struct dma_buf *buf_obj; struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj; struct dma_buf_attachment *attach_obj;
struct dma_resv *robj; struct dma_resv_iter cursor;
struct dma_resv_list *fobj;
struct dma_fence *fence; struct dma_fence *fence;
int count = 0, attach_count, shared_count, i; int count = 0, attach_count;
size_t size = 0; size_t size = 0;
int ret; int ret;
@ -1378,6 +1359,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
if (ret) if (ret)
goto error_unlock; goto error_unlock;
spin_lock(&buf_obj->name_lock);
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size, buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode, buf_obj->file->f_flags, buf_obj->file->f_mode,
@ -1385,22 +1368,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->exp_name, buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino, file_inode(buf_obj->file)->i_ino,
buf_obj->name ?: ""); buf_obj->name ?: "");
spin_unlock(&buf_obj->name_lock);
robj = buf_obj->resv; dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) {
fence = dma_resv_excl_fence(robj); seq_printf(s, "\t%s fence: %s %s %ssignalled\n",
if (fence) dma_resv_iter_is_exclusive(&cursor) ?
seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", "Exclusive" : "Shared",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un");
fobj = rcu_dereference_protected(robj->fence,
dma_resv_held(robj));
shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(robj));
seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
fence->ops->get_driver_name(fence), fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un"); dma_fence_is_signaled(fence) ? "" : "un");

View File

@ -333,10 +333,14 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{ {
cursor->seq = read_seqcount_begin(&cursor->obj->seq); cursor->seq = read_seqcount_begin(&cursor->obj->seq);
cursor->index = -1; cursor->index = -1;
if (cursor->all_fences) cursor->shared_count = 0;
if (cursor->all_fences) {
cursor->fences = dma_resv_shared_list(cursor->obj); cursor->fences = dma_resv_shared_list(cursor->obj);
else if (cursor->fences)
cursor->shared_count = cursor->fences->shared_count;
} else {
cursor->fences = NULL; cursor->fences = NULL;
}
cursor->is_restarted = true; cursor->is_restarted = true;
} }
@ -363,7 +367,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
continue; continue;
} else if (!cursor->fences || } else if (!cursor->fences ||
cursor->index >= cursor->fences->shared_count) { cursor->index >= cursor->shared_count) {
cursor->fence = NULL; cursor->fence = NULL;
break; break;
@ -423,6 +427,57 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
} }
EXPORT_SYMBOL(dma_resv_iter_next_unlocked); EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
/**
* dma_resv_iter_first - first fence from a locked dma_resv object
* @cursor: cursor to record the current position
*
* Return the first fence in the dma_resv object while holding the
* &dma_resv.lock.
*/
struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
{
struct dma_fence *fence;
dma_resv_assert_held(cursor->obj);
cursor->index = 0;
if (cursor->all_fences)
cursor->fences = dma_resv_shared_list(cursor->obj);
else
cursor->fences = NULL;
fence = dma_resv_excl_fence(cursor->obj);
if (!fence)
fence = dma_resv_iter_next(cursor);
cursor->is_restarted = true;
return fence;
}
EXPORT_SYMBOL_GPL(dma_resv_iter_first);
/**
* dma_resv_iter_next - next fence from a locked dma_resv object
* @cursor: cursor to record the current position
*
* Return the next fences from the dma_resv object while holding the
* &dma_resv.lock.
*/
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
{
unsigned int idx;
dma_resv_assert_held(cursor->obj);
cursor->is_restarted = false;
if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
return NULL;
idx = cursor->index++;
return rcu_dereference_protected(cursor->fences->shared[idx],
dma_resv_held(cursor->obj));
}
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
/** /**
* dma_resv_copy_fences - Copy all fences from src to dst. * dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object * @dst: the destination reservation object
@ -448,10 +503,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
dma_resv_list_free(list); dma_resv_list_free(list);
dma_fence_put(excl); dma_fence_put(excl);
if (cursor.fences) { if (cursor.shared_count) {
unsigned int cnt = cursor.fences->shared_count; list = dma_resv_list_alloc(cursor.shared_count);
list = dma_resv_list_alloc(cnt);
if (!list) { if (!list) {
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);
return -ENOMEM; return -ENOMEM;
@ -522,7 +575,7 @@ int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
if (fence_excl) if (fence_excl)
dma_fence_put(*fence_excl); dma_fence_put(*fence_excl);
count = cursor.fences ? cursor.fences->shared_count : 0; count = cursor.shared_count;
count += fence_excl ? 0 : 1; count += fence_excl ? 0 : 1;
/* Eventually re-allocate the array */ /* Eventually re-allocate the array */

View File

@ -100,11 +100,25 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
This has the potential to use a lot of memory and print some very This has the potential to use a lot of memory and print some very
large kernel messages. If in doubt, say "N". large kernel messages. If in doubt, say "N".
config DRM_DEBUG_MODESET_LOCK
bool "Enable backtrace history for lock contention"
depends on STACKTRACE_SUPPORT
depends on DEBUG_KERNEL
depends on EXPERT
select STACKDEPOT
default y if DEBUG_WW_MUTEX_SLOWPATH
help
Enable debug tracing of failures to gracefully handle drm modeset lock
contention. A history of each drm modeset lock path hitting -EDEADLK
will be saved until gracefully handled, and the backtrace will be
printed when attempting to lock a contended lock.
If in doubt, say "N".
config DRM_FBDEV_EMULATION config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver" bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM depends on DRM_KMS_HELPER
depends on FB=y || FB=DRM depends on FB=y || FB=DRM_KMS_HELPER
select DRM_KMS_HELPER
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT

View File

@ -297,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
void amdgpu_amdkfd_reserve_system_mem(uint64_t size); void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
#else #else
static inline static inline
@ -312,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
} }
static inline static inline
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{ {
} }
#endif #endif

View File

@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
spin_unlock(&kfd_mem_limit.mem_limit_lock); spin_unlock(&kfd_mem_limit.mem_limit_lock);
} }
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u32 domain = bo->preferred_domains; u32 domain = bo->preferred_domains;
@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
} }
unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
kfree(bo->kfd_bo);
} }
@ -734,14 +736,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
} }
/* Add BO to VM internal data structures */ /* Add BO to VM internal data structures */
ret = amdgpu_bo_reserve(bo[i], false);
if (ret) {
pr_debug("Unable to reserve BO during memory attach");
goto unwind;
}
attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
amdgpu_bo_unreserve(bo[i]);
if (unlikely(!attachment[i]->bo_va)) { if (unlikely(!attachment[i]->bo_va)) {
ret = -ENOMEM; ret = -ENOMEM;
pr_err("Failed to add BO object to VM. ret == %d\n", pr_err("Failed to add BO object to VM. ret == %d\n",
ret); ret);
goto unwind; goto unwind;
} }
attachment[i]->va = va; attachment[i]->va = va;
attachment[i]->pte_flags = get_pte_flags(adev, mem); attachment[i]->pte_flags = get_pte_flags(adev, mem);
attachment[i]->adev = adev; attachment[i]->adev = adev;
@ -757,7 +764,9 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
if (!attachment[i]) if (!attachment[i])
continue; continue;
if (attachment[i]->bo_va) { if (attachment[i]->bo_va) {
amdgpu_bo_reserve(bo[i], true);
amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
list_del(&attachment[i]->list); list_del(&attachment[i]->list);
} }
if (bo[i]) if (bo[i])
@ -1568,12 +1577,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue)); mem->va + bo_size * (1 + mem->aql_queue));
ret = unreserve_bo_and_vms(&ctx, false, false);
/* Remove from VM internal data structures */ /* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list) list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
kfd_mem_detach(entry); kfd_mem_detach(entry);
ret = unreserve_bo_and_vms(&ctx, false, false);
/* Free the sync object */ /* Free the sync object */
amdgpu_sync_free(&mem->sync); amdgpu_sync_free(&mem->sync);
@ -1600,9 +1609,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
if (mem->dmabuf) if (mem->dmabuf)
dma_buf_put(mem->dmabuf); dma_buf_put(mem->dmabuf);
drm_gem_object_put(&mem->bo->tbo.base);
mutex_destroy(&mem->lock); mutex_destroy(&mem->lock);
kfree(mem);
/* If this releases the last reference, it will end up calling
* amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
* this needs to be the last call here.
*/
drm_gem_object_put(&mem->bo->tbo.base);
return ret; return ret;
} }

View File

@ -2398,10 +2398,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset) if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev); amdgpu_amdkfd_device_init(adev);
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
goto init_failed;
amdgpu_fru_get_product_info(adev); amdgpu_fru_get_product_info(adev);
init_failed: init_failed:
@ -3171,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{ {
switch (asic_type) { switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC) #if defined(CONFIG_DRM_AMD_DC)
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI: case CHIP_TAHITI:
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
case CHIP_VERDE: case CHIP_VERDE:
case CHIP_OLAND: case CHIP_OLAND:
/*
* We have systems in the wild with these ASICs that require
* LVDS and VGA support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
*/
#if defined(CONFIG_DRM_AMD_DC_SI)
return amdgpu_dc > 0;
#else
return false;
#endif #endif
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_KAVERI: case CHIP_KAVERI:
@ -4287,8 +4293,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
amdgpu_amdkfd_pre_reset(adev);
/* Resume IP prior to SMC */ /* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev); r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r) if (r)
@ -4850,6 +4854,9 @@ static void amdgpu_device_recheck_guilty_jobs(
/* clear job's guilty and depend the folowing step to decide the real one */ /* clear job's guilty and depend the folowing step to decide the real one */
drm_sched_reset_karma(s_job); drm_sched_reset_karma(s_job);
/* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
* to make sure fence is balanced */
dma_fence_get(s_job->s_fence->parent);
drm_sched_resubmit_jobs_ext(&ring->sched, 1); drm_sched_resubmit_jobs_ext(&ring->sched, 1);
ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
@ -4885,6 +4892,7 @@ static void amdgpu_device_recheck_guilty_jobs(
/* got the hw fence, signal finished fence */ /* got the hw fence, signal finished fence */
atomic_dec(ring->sched.score); atomic_dec(ring->sched.score);
dma_fence_put(s_job->s_fence->parent);
dma_fence_get(&s_job->s_fence->finished); dma_fence_get(&s_job->s_fence->finished);
dma_fence_signal(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished);
dma_fence_put(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished);
@ -5020,7 +5028,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work); cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
if (!amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_pre_reset(tmp_adev); amdgpu_amdkfd_pre_reset(tmp_adev);
/* /*

View File

@ -867,6 +867,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 2, 0): case IP_VERSION(2, 2, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break; break;
case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 3):
@ -881,6 +882,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
break; break;
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16): case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 2):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);

View File

@ -61,7 +61,8 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
} }
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1); TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx); drm_dev_exit(idx);
} else { } else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);

View File

@ -1423,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw; struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context; struct atom_context *ctx = adev->mode_info.atom_context;
uint8_t smu_minor, smu_debug;
uint16_t smu_major;
int ret, i; int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
@ -1568,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret) if (ret)
return ret; return ret;
seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", smu_major = (fw_info.ver >> 16) & 0xffff;
fw_info.feature, fw_info.ver); smu_minor = (fw_info.ver >> 8) & 0xff;
smu_debug = (fw_info.ver >> 0) & 0xff;
seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n",
fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug);
/* SDMA */ /* SDMA */
query_fw.fw_type = AMDGPU_INFO_FW_SDMA; query_fw.fw_type = AMDGPU_INFO_FW_SDMA;

View File

@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo); abo = ttm_to_amdgpu_bo(bo);
if (abo->kfd_bo) if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo); amdgpu_amdkfd_release_notify(abo);
/* We only remove the fence if the resv has individualized. */ /* We only remove the fence if the resv has individualized. */
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel WARN_ON_ONCE(bo->type == ttm_bo_type_kernel

View File

@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.indirect_sram = true; adev->vcn.indirect_sram = true;
break; break;
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID; fw_name = FIRMWARE_SIENNA_CICHLID;
else else

View File

@ -806,9 +806,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev, pcs_clear_status(adev,
xgmi23_pcs_err_status_reg_aldebaran[i]); xgmi23_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev, pcs_clear_status(adev,
xgmi23_pcs_err_status_reg_aldebaran[i]); xgmi3x16_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev, pcs_clear_status(adev,
walf_pcs_err_status_reg_aldebaran[i]); walf_pcs_err_status_reg_aldebaran[i]);

View File

@ -8249,6 +8249,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{ {
u32 reg, data; u32 reg, data;
amdgpu_gfx_off_ctrl(adev, false);
/* not for *_SOC15 */ /* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_is_pp_one_vf(adev))
@ -8263,6 +8266,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true);
} }
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@ -8316,11 +8321,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
switch (adev->ip_versions[GC_HWIP][0]) { switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 1):
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 3):
data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break; break;
default: default:

View File

@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{ {
u32 data; u32 data;
amdgpu_gfx_off_ctrl(adev, false);
data = RREG32(mmRLC_SPM_VMID); data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
WREG32(mmRLC_SPM_VMID, data); WREG32(mmRLC_SPM_VMID, data);
amdgpu_gfx_off_ctrl(adev, true);
} }
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)

View File

@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{ {
u32 data; u32 data;
amdgpu_gfx_off_ctrl(adev, false);
if (amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(mmRLC_SPM_VMID); data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
else else
@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_NO_KIQ(mmRLC_SPM_VMID, data); WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
else else
WREG32(mmRLC_SPM_VMID, data); WREG32(mmRLC_SPM_VMID, data);
amdgpu_gfx_off_ctrl(adev, true);
} }
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {

View File

@ -2462,7 +2462,9 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev); amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev); gfx_v9_0_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr, &adev->gfx.rlc.cp_table_gpu_addr,
@ -5102,6 +5104,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{ {
u32 reg, data; u32 reg, data;
amdgpu_gfx_off_ctrl(adev, false);
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg); data = RREG32_NO_KIQ(reg);
@ -5115,6 +5119,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true);
} }
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,

View File

@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0); i * hub->ctx_distance, 0);
if (amdgpu_sriov_vf(adev))
/* Avoid write to GMC registers */
return;
/* Setup TLB control */ /* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);

View File

@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
seg_size = REG_GET_FIELD( seg_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE),
MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION);
} else { } else {
xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
seg_size = REG_GET_FIELD( seg_size = REG_GET_FIELD(
RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
}
max_region = max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
}
switch (adev->asic_type) { switch (adev->asic_type) {
@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL; return -EINVAL;
if (adev->asic_type == CHIP_ALDEBARAN) {
adev->gmc.xgmi.physical_node_id =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE,
PF_LFB_REGION);
} else {
adev->gmc.xgmi.physical_node_id = adev->gmc.xgmi.physical_node_id =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
PF_LFB_REGION); PF_LFB_REGION);
}
if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL; return -EINVAL;

View File

@ -182,6 +182,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
{ {
switch (adev->ip_versions[UVD_HWIP][0]) { switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
if (encode) if (encode)
*codecs = &sriov_sc_video_codecs_encode; *codecs = &sriov_sc_video_codecs_encode;

View File

@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
return 0;
}
static int uvd_v6_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* /*
* Proper cleanups before halting the HW engine: * Proper cleanups before halting the HW engine:
* - cancel the delayed idle work * - cancel the delayed idle work
@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
return 0;
}
static int uvd_v6_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v6_0_hw_fini(adev); r = uvd_v6_0_hw_fini(adev);
if (r) if (r)
return r; return r;

View File

@ -406,7 +406,7 @@ static const struct kfd_device_info aldebaran_device_info = {
static const struct kfd_device_info renoir_device_info = { static const struct kfd_device_info renoir_device_info = {
.asic_family = CHIP_RENOIR, .asic_family = CHIP_RENOIR,
.asic_name = "renoir", .asic_name = "renoir",
.gfx_target_version = 90002, .gfx_target_version = 90012,
.max_pasid_bits = 16, .max_pasid_bits = 16,
.max_no_of_hqd = 24, .max_no_of_hqd = 24,
.doorbell_size = 8, .doorbell_size = 8,

View File

@ -1430,7 +1430,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->sched_running) if (!dqm->sched_running)
return 0; return 0;
if (dqm->is_hws_hang) if (dqm->is_hws_hang || dqm->is_resetting)
return -EIO; return -EIO;
if (!dqm->active_runlist) if (!dqm->active_runlist)
return retval; return retval;

View File

@ -308,7 +308,7 @@
* 16MB are reserved for kernel use (CWSR trap handler and kernel IB * 16MB are reserved for kernel use (CWSR trap handler and kernel IB
* for now). * for now).
*/ */
#define SVM_USER_BASE 0x1000000ull #define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE)
#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)

View File

@ -281,6 +281,19 @@ static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
return cpages; return cpages;
} }
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
{
unsigned long upages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
if (migrate->src[i] & MIGRATE_PFN_VALID &&
!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
upages++;
}
return upages;
}
static int static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence, struct migrate_vma *migrate, struct dma_fence **mfence,
@ -632,10 +645,11 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end) struct vm_area_struct *vma, uint64_t start, uint64_t end)
{ {
uint64_t npages = (end - start) >> PAGE_SHIFT; uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL; struct dma_fence *mfence = NULL;
struct migrate_vma migrate; struct migrate_vma migrate;
unsigned long cpages = 0;
dma_addr_t *scratch; dma_addr_t *scratch;
size_t size; size_t size;
void *buf; void *buf;
@ -669,6 +683,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (!cpages) { if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last); prange->start, prange->last);
upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free; goto out_free;
} }
if (cpages != npages) if (cpages != npages)
@ -681,8 +696,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
scratch, npages); scratch, npages);
migrate_vma_pages(&migrate); migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", upages = svm_migrate_unsuccessful_pages(&migrate);
svm_migrate_successful_pages(&migrate), cpages, migrate.npages); pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
upages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence); svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate); migrate_vma_finalize(&migrate);
@ -696,9 +712,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (pdd) if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
return cpages; return upages;
} }
return r; return r ? r : upages;
} }
/** /**
@ -718,7 +734,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long addr; unsigned long addr;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
unsigned long cpages = 0; unsigned long upages = 0;
long r = 0; long r = 0;
if (!prange->actual_loc) { if (!prange->actual_loc) {
@ -754,12 +770,12 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
pr_debug("failed %ld to migrate\n", r); pr_debug("failed %ld to migrate\n", r);
break; break;
} else { } else {
cpages += r; upages += r;
} }
addr = next; addr = next;
} }
if (cpages) { if (!upages) {
svm_range_vram_node_free(prange); svm_range_vram_node_free(prange);
prange->actual_loc = 0; prange->actual_loc = 0;
} }
@ -782,7 +798,7 @@ static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm) struct mm_struct *mm)
{ {
int r; int r, retries = 3;
/* /*
* TODO: for both devices with PCIe large bar or on same xgmi hive, skip * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
@ -791,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
r = svm_migrate_vram_to_ram(prange, mm); r = svm_migrate_vram_to_ram(prange, mm);
if (r) if (r)
return r; return r;
} while (prange->actual_loc && --retries);
if (prange->actual_loc)
return -EDEADLK;
return svm_migrate_ram_to_vram(prange, best_loc, mm); return svm_migrate_ram_to_vram(prange, best_loc, mm);
} }
@ -838,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
pr_debug("failed find process at fault address 0x%lx\n", addr); pr_debug("failed find process at fault address 0x%lx\n", addr);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
kfd_unref_process(p);
return 0;
}
addr >>= PAGE_SHIFT; addr >>= PAGE_SHIFT;
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);

View File

@ -766,8 +766,10 @@ struct svm_range_list {
struct list_head deferred_range_list; struct list_head deferred_range_list;
spinlock_t deferred_list_lock; spinlock_t deferred_list_lock;
atomic_t evicted_ranges; atomic_t evicted_ranges;
bool drain_pagefaults;
struct delayed_work restore_work; struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
}; };
/* Process data */ /* Process data */

View File

@ -1715,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p)
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
&pdd->qpd); &pdd->qpd);
if (r) { /* evict return -EIO if HWS is hang or asic is resetting, in this case
* we would like to set all the queues to be in evicted state to prevent
* them been add back since they actually not be saved right now.
*/
if (r && r != -EIO) {
pr_err("Failed to evict process queues\n"); pr_err("Failed to evict process queues\n");
goto fail; goto fail;
} }

View File

@ -1496,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end); next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT; npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
addr, npages, &hmm_range, addr, npages, &hmm_range,
readonly, true, owner); readonly, true, owner);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) { if (r) {
pr_debug("failed %d to get svm range pages\n", r); pr_debug("failed %d to get svm range pages\n", r);
goto unreserve_out; goto unreserve_out;
@ -2000,20 +2002,28 @@ static void svm_range_deferred_list_work(struct work_struct *work)
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op); prange->start, prange->last, prange->work_item.op);
/* Make sure no stale retry fault coming after range is freed */
if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
svm_range_drain_retry_fault(prange->svms);
mm = prange->work_item.mm; mm = prange->work_item.mm;
retry:
mmap_write_lock(mm); mmap_write_lock(mm);
mutex_lock(&svms->lock); mutex_lock(&svms->lock);
/* Remove from deferred_list must be inside mmap write lock, /* Checking for the need to drain retry faults must be in
* mmap write lock to serialize with munmap notifiers.
*
* Remove from deferred_list must be inside mmap write lock,
* otherwise, svm_range_list_lock_and_flush_work may hold mmap * otherwise, svm_range_list_lock_and_flush_work may hold mmap
* write lock, and continue because deferred_list is empty, then * write lock, and continue because deferred_list is empty, then
* deferred_list handle is blocked by mmap write lock. * deferred_list handle is blocked by mmap write lock.
*/ */
spin_lock(&svms->deferred_list_lock); spin_lock(&svms->deferred_list_lock);
if (unlikely(svms->drain_pagefaults)) {
svms->drain_pagefaults = false;
spin_unlock(&svms->deferred_list_lock);
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
svm_range_drain_retry_fault(svms);
goto retry;
}
list_del_init(&prange->deferred_list); list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock); spin_unlock(&svms->deferred_list_lock);
@ -2046,6 +2056,12 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op) struct mm_struct *mm, enum svm_work_list_ops op)
{ {
spin_lock(&svms->deferred_list_lock); spin_lock(&svms->deferred_list_lock);
/* Make sure pending page faults are drained in the deferred worker
* before the range is freed to avoid straggler interrupts on
* unmapped memory causing "phantom faults".
*/
if (op == SVM_OP_UNMAP_RANGE)
svms->drain_pagefaults = true;
/* if prange is on the deferred list */ /* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) { if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op); pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@ -2261,7 +2277,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
* migration if actual loc is not best location, then update GPU page table * migration if actual loc is not best location, then update GPU page table
* mapping to the best location. * mapping to the best location.
* *
* If vm fault gpu is range preferred loc, the best_loc is preferred loc. * If the preferred loc is accessible by faulting GPU, use preferred loc.
* If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
* If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
* if range actual loc is cpu, best_loc is cpu * if range actual loc is cpu, best_loc is cpu
@ -2278,7 +2294,7 @@ svm_range_best_restore_location(struct svm_range *prange,
struct amdgpu_device *adev, struct amdgpu_device *adev,
int32_t *gpuidx) int32_t *gpuidx)
{ {
struct amdgpu_device *bo_adev; struct amdgpu_device *bo_adev, *preferred_adev;
struct kfd_process *p; struct kfd_process *p;
uint32_t gpuid; uint32_t gpuid;
int r; int r;
@ -2291,8 +2307,16 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1; return -1;
} }
if (prange->preferred_loc == gpuid) if (prange->preferred_loc == gpuid ||
prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
return prange->preferred_loc; return prange->preferred_loc;
} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
preferred_adev = svm_range_get_adev_by_id(prange,
prange->preferred_loc);
if (amdgpu_xgmi_same_hive(adev, preferred_adev))
return prange->preferred_loc;
/* fall through */
}
if (test_bit(*gpuidx, prange->bitmap_access)) if (test_bit(*gpuidx, prange->bitmap_access))
return gpuid; return gpuid;
@ -2313,7 +2337,8 @@ svm_range_best_restore_location(struct svm_range *prange,
static int static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
unsigned long *start, unsigned long *last) unsigned long *start, unsigned long *last,
bool *is_heap_stack)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct interval_tree_node *node; struct interval_tree_node *node;
@ -2324,6 +2349,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
pr_debug("VMA does not exist in address [0x%llx]\n", addr); pr_debug("VMA does not exist in address [0x%llx]\n", addr);
return -EFAULT; return -EFAULT;
} }
*is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
vma->vm_end >= vma->vm_mm->start_brk) ||
(vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack);
start_limit = max(vma->vm_start >> PAGE_SHIFT, start_limit = max(vma->vm_start >> PAGE_SHIFT,
(unsigned long)ALIGN_DOWN(addr, 2UL << 8)); (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
end_limit = min(vma->vm_end >> PAGE_SHIFT, end_limit = min(vma->vm_end >> PAGE_SHIFT,
@ -2353,9 +2384,9 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
*start = start_limit; *start = start_limit;
*last = end_limit - 1; *last = end_limit - 1;
pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n", pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
vma->vm_start >> PAGE_SHIFT, *start, vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
vma->vm_end >> PAGE_SHIFT, *last); *start, *last, *is_heap_stack);
return 0; return 0;
} }
@ -2420,11 +2451,13 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct svm_range *prange = NULL; struct svm_range *prange = NULL;
unsigned long start, last; unsigned long start, last;
uint32_t gpuid, gpuidx; uint32_t gpuid, gpuidx;
bool is_heap_stack;
uint64_t bo_s = 0; uint64_t bo_s = 0;
uint64_t bo_l = 0; uint64_t bo_l = 0;
int r; int r;
if (svm_range_get_range_boundaries(p, addr, &start, &last)) if (svm_range_get_range_boundaries(p, addr, &start, &last,
&is_heap_stack))
return NULL; return NULL;
r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
@ -2451,6 +2484,9 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
return NULL; return NULL;
} }
if (is_heap_stack)
prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
svm_range_add_to_svms(prange); svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange); svm_range_add_notifier_locked(mm, prange);
@ -3076,6 +3112,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
struct svm_range *prange = struct svm_range *prange =
list_first_entry(&svm_bo->range_list, list_first_entry(&svm_bo->range_list,
struct svm_range, svm_bo_list); struct svm_range, svm_bo_list);
int retries = 3;
list_del_init(&prange->svm_bo_list); list_del_init(&prange->svm_bo_list);
spin_unlock(&svm_bo->list_lock); spin_unlock(&svm_bo->list_lock);
@ -3083,7 +3121,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
prange->start, prange->last); prange->start, prange->last);
mutex_lock(&prange->migrate_mutex); mutex_lock(&prange->migrate_mutex);
svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); do {
svm_migrate_vram_to_ram(prange,
svm_bo->eviction_fence->mm);
} while (prange->actual_loc && --retries);
WARN(prange->actual_loc, "Migration failed during eviction");
mutex_lock(&prange->lock); mutex_lock(&prange->lock);
prange->svm_bo = NULL; prange->svm_bo = NULL;

View File

@ -217,6 +217,7 @@ static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
static bool static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
@ -619,7 +620,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
} }
#endif #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/** /**
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
@ -669,10 +670,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
return; return;
} }
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
link_index = notify->link_index; link_index = notify->link_index;
link = adev->dm.dc->links[link_index]; link = adev->dm.dc->links[link_index];
drm_connector_list_iter_begin(dev, &iter); drm_connector_list_iter_begin(dev, &iter);
@ -685,10 +683,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
} }
} }
drm_connector_list_iter_end(&iter); drm_connector_list_iter_end(&iter);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (hpd_aconnector) if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD)
handle_hpd_irq_helper(hpd_aconnector); handle_hpd_irq_helper(hpd_aconnector);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
handle_hpd_rx_irq(hpd_aconnector);
}
} }
/** /**
@ -764,6 +765,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
DRM_ERROR("DM: notify type %d invalid!", notify.type); DRM_ERROR("DM: notify type %d invalid!", notify.type);
continue; continue;
} }
if (!dm->dmub_callback[notify.type]) {
DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) { if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) { if (!dmub_hpd_wrk) {
@ -813,7 +818,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
if (count > DMUB_TRACE_MAX_READ) if (count > DMUB_TRACE_MAX_READ)
DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
} }
#endif #endif /* CONFIG_DRM_AMD_DC_DCN */
static int dm_set_clockgating_state(void *handle, static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
@ -1410,7 +1415,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 0):
init_data.flags.gpu_vm_support = true; init_data.flags.gpu_vm_support = true;
switch (adev->dm.dmcub_fw_version) {
case 0: /* development */
case 0x1: /* linux-firmware.git hash 6d9f399 */
case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
init_data.flags.disable_dmcu = false;
break;
default:
init_data.flags.disable_dmcu = true; init_data.flags.disable_dmcu = true;
}
break; break;
case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1): case IP_VERSION(1, 0, 1):
@ -1556,7 +1569,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback"); DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error; goto error;
} }
#endif if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
#endif /* CONFIG_DRM_AMD_DC_DCN */
} }
if (amdgpu_dm_initialize_drm_device(adev)) { if (amdgpu_dm_initialize_drm_device(adev)) {
@ -4565,7 +4582,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
} }
static int fill_dc_scaling_info(const struct drm_plane_state *state, static int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info) struct dc_scaling_info *scaling_info)
{ {
int scale_w, scale_h, min_downscale, max_upscale; int scale_w, scale_h, min_downscale, max_upscale;
@ -4579,7 +4597,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
/* /*
* For reasons we don't (yet) fully understand a non-zero * For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a * src_y coordinate into an NV12 buffer can cause a
* system hang. To avoid hangs (and maybe be overly cautious) * system hang on DCN1x.
* To avoid hangs (and maybe be overly cautious)
* let's reject both non-zero src_x and src_y. * let's reject both non-zero src_x and src_y.
* *
* We currently know of only one use-case to reproduce a * We currently know of only one use-case to reproduce a
@ -4587,10 +4606,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
* is to gesture the YouTube Android app into full screen * is to gesture the YouTube Android app into full screen
* on ChromeOS. * on ChromeOS.
*/ */
if (state->fb && if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
state->fb->format->format == DRM_FORMAT_NV12 && (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
(scaling_info->src_rect.x != 0 || (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
scaling_info->src_rect.y != 0)) (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
return -EINVAL; return -EINVAL;
scaling_info->src_rect.width = state->src_w >> 16; scaling_info->src_rect.width = state->src_w >> 16;
@ -5496,7 +5515,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
int ret; int ret;
bool force_disable_dcc = false; bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info); ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret) if (ret)
return ret; return ret;
@ -6070,7 +6089,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
} }
#endif #endif /* CONFIG_DRM_AMD_DC_DCN */
/** /**
* DOC: FreeSync Video * DOC: FreeSync Video
@ -7241,8 +7260,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct drm_connector_state *new_con_state; struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state; struct dm_connector_state *dm_conn_state;
int i, j, clock; int i, j;
int vcpi, pbn_div, pbn = 0; int vcpi, pbn_div, pbn, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) { for_each_new_connector_in_state(state, connector, new_con_state, i) {
@ -7270,17 +7289,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
if (!stream) if (!stream)
continue; continue;
if (stream->timing.flags.DSC != 1) {
drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
dm_conn_state->pbn,
0,
false);
continue;
}
pbn_div = dm_mst_get_pbn_divider(stream->link); pbn_div = dm_mst_get_pbn_divider(stream->link);
clock = stream->timing.pix_clk_100hz / 10;
/* pbn is calculated by compute_mst_dsc_configs_for_state*/ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
for (j = 0; j < dc_state->stream_count; j++) { for (j = 0; j < dc_state->stream_count; j++) {
if (vars[j].aconnector == aconnector) { if (vars[j].aconnector == aconnector) {
@ -7289,6 +7298,23 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
} }
} }
if (j == dc_state->stream_count)
continue;
slot_num = DIV_ROUND_UP(pbn, pbn_div);
if (stream->timing.flags.DSC != 1) {
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
drm_dp_mst_atomic_enable_dsc(state,
aconnector->port,
dm_conn_state->pbn,
0,
false);
continue;
}
vcpi = drm_dp_mst_atomic_enable_dsc(state, vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port, aconnector->port,
pbn, pbn_div, pbn, pbn_div,
@ -7552,7 +7578,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (ret) if (ret)
return ret; return ret;
ret = fill_dc_scaling_info(new_plane_state, &scaling_info); ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
if (ret) if (ret)
return ret; return ret;
@ -9000,7 +9026,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
} }
fill_dc_scaling_info(new_plane_state, fill_dc_scaling_info(dm->adev, new_plane_state,
&bundle->scaling_infos[planes_count]); &bundle->scaling_infos[planes_count]);
bundle->surface_updates[planes_count].scaling_info = bundle->surface_updates[planes_count].scaling_info =
@ -10787,7 +10813,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_atomic_add_affected_connectors(state, crtc); ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret) if (ret)
return ret; goto fail;
ret = drm_atomic_add_affected_planes(state, crtc); ret = drm_atomic_add_affected_planes(state, crtc);
if (ret) if (ret)

View File

@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
wr_buf_ptr = wr_buf; wr_buf_ptr = wr_buf;
r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
/* r is bytes not be copied */ /* r is bytes not be copied */
if (r >= wr_buf_size) { if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) {
DRM_DEBUG_DRIVER("user data not be read\n"); DRM_DEBUG_DRIVER("user data could not be read successfully\n");
return -EINVAL; return -EFAULT;
} }
/* check number of parameters. isspace could not differ space and \n */ /* check number of parameters. isspace could not differ space and \n */

View File

@ -534,13 +534,14 @@ static int kbps_to_peak_pbn(int kbps)
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars, struct dsc_mst_fairness_vars *vars,
int count) int count,
int k)
{ {
int i; int i;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i].dsc_enabled && dc_dsc_compute_config( if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0], params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps, &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
@ -553,7 +554,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
if (params[i].bpp_overwrite) if (params[i].bpp_overwrite)
params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
else else
params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
if (params[i].num_slices_h) if (params[i].num_slices_h)
params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
@ -586,7 +587,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
struct dc_link *dc_link, struct dc_link *dc_link,
struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars, struct dsc_mst_fairness_vars *vars,
int count) int count,
int k)
{ {
int i; int i;
bool bpp_increased[MAX_PIPES]; bool bpp_increased[MAX_PIPES];
@ -601,8 +603,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled) { if (vars[i + k].dsc_enabled) {
initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; initial_slack[i] =
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
bpp_increased[i] = false; bpp_increased[i] = false;
remaining_to_increase += 1; remaining_to_increase += 1;
} else { } else {
@ -629,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0; link_timeslots_used = 0;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
@ -682,7 +685,8 @@ static void try_disable_dsc(struct drm_atomic_state *state,
struct dc_link *dc_link, struct dc_link *dc_link,
struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars, struct dsc_mst_fairness_vars *vars,
int count) int count,
int k)
{ {
int i; int i;
bool tried[MAX_PIPES]; bool tried[MAX_PIPES];
@ -692,8 +696,8 @@ static void try_disable_dsc(struct drm_atomic_state *state,
int remaining_to_try = 0; int remaining_to_try = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled if (vars[i + k].dsc_enabled
&& vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
&& params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
tried[i] = false; tried[i] = false;
@ -748,9 +752,10 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state, struct dc_state *dc_state,
struct dc_link *dc_link, struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars) struct dsc_mst_fairness_vars *vars,
int *link_vars_start_index)
{ {
int i; int i, k;
struct dc_stream_state *stream; struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES]; struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
@ -768,11 +773,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (stream->link != dc_link) if (stream->link != dc_link)
continue; continue;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector)
continue;
if (!aconnector->port)
continue;
stream->timing.flags.DSC = 0; stream->timing.flags.DSC = 0;
params[count].timing = &stream->timing; params[count].timing = &stream->timing;
params[count].sink = stream->sink; params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].aconnector = aconnector; params[count].aconnector = aconnector;
params[count].port = aconnector->port; params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
@ -794,44 +805,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
count++; count++;
} }
if (count == 0) {
ASSERT(0);
return true;
}
/* k is start index of vars for current phy link used by mst hub */
k = *link_vars_start_index;
/* set vars start index for next mst hub phy link */
*link_vars_start_index += count;
/* Try no compression */ /* Try no compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vars[i].aconnector = params[i].aconnector; vars[i + k].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false; vars[i + k].dsc_enabled = false;
vars[i].bpp_x16 = 0; vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_vcpi_slots(state, if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr, params[i].port->mgr,
params[i].port, params[i].port,
vars[i].pbn, vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0) dm_mst_get_pbn_divider(dc_link)) < 0)
return false; return false;
} }
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
set_dsc_configs_from_fairness_vars(params, vars, count); set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true; return true;
} }
/* Try max compression */ /* Try max compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i].dsc_enabled = true; vars[i + k].dsc_enabled = true;
vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
if (drm_dp_atomic_find_vcpi_slots(state, if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr, params[i].port->mgr,
params[i].port, params[i].port,
vars[i].pbn, vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0) dm_mst_get_pbn_divider(dc_link)) < 0)
return false; return false;
} else { } else {
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false; vars[i + k].dsc_enabled = false;
vars[i].bpp_x16 = 0; vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_vcpi_slots(state, if (drm_dp_atomic_find_vcpi_slots(state,
params[i].port->mgr, params[i].port->mgr,
params[i].port, params[i].port,
vars[i].pbn, vars[i + k].pbn,
dm_mst_get_pbn_divider(dc_link)) < 0) dm_mst_get_pbn_divider(dc_link)) < 0)
return false; return false;
} }
@ -840,15 +862,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return false; return false;
/* Optimize degree of compression */ /* Optimize degree of compression */
increase_dsc_bpp(state, dc_link, params, vars, count); increase_dsc_bpp(state, dc_link, params, vars, count, k);
try_disable_dsc(state, dc_link, params, vars, count); try_disable_dsc(state, dc_link, params, vars, count, k);
set_dsc_configs_from_fairness_vars(params, vars, count); set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true; return true;
} }
static bool is_dsc_need_re_compute(
struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link)
{
int i;
bool is_dsc_need_re_compute = false;
/* only check phy used by mst branch */
if (dc_link->type != dc_connection_mst_branch)
return false;
/* check if there is mode change in new request */
for (i = 0; i < dc_state->stream_count; i++) {
struct amdgpu_dm_connector *aconnector;
struct dc_stream_state *stream;
struct drm_crtc_state *new_crtc_state;
struct drm_connector_state *new_conn_state;
stream = dc_state->streams[i];
if (!stream)
continue;
/* check if stream using the same link for mst */
if (stream->link != dc_link)
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
if (!aconnector)
continue;
new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
if (!new_conn_state)
continue;
if (IS_ERR(new_conn_state))
continue;
if (!new_conn_state->crtc)
continue;
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
if (!new_crtc_state)
continue;
if (IS_ERR(new_crtc_state))
continue;
if (new_crtc_state->enable && new_crtc_state->active) {
if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
new_crtc_state->connectors_changed)
is_dsc_need_re_compute = true;
}
}
return is_dsc_need_re_compute;
}
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state, struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars) struct dsc_mst_fairness_vars *vars)
@ -857,6 +940,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_stream_state *stream; struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES]; bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
int link_vars_start_index = 0;
for (i = 0; i < dc_state->stream_count; i++) for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false; computed_streams[i] = false;
@ -881,8 +965,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return false; return false;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
mutex_lock(&aconnector->mst_mgr.lock); mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
vars, &link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock); mutex_unlock(&aconnector->mst_mgr.lock);
return false; return false;
} }

View File

@ -1085,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream = struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream; dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true; bool should_disable = true;
bool pipe_split_change =
context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) { for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) { if (old_stream == context->streams[j]) {
@ -1092,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
break; break;
} }
} }
if (!should_disable && pipe_split_change)
should_disable = true;
if (should_disable && old_stream) { if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@ -1887,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false; return false;
} }
#ifdef CONFIG_DRM_AMD_DC_DCN
/* Perform updates here which need to be deferred until next vupdate /* Perform updates here which need to be deferred until next vupdate
* *
* i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
@ -1896,7 +1902,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
*/ */
static void process_deferred_updates(struct dc *dc) static void process_deferred_updates(struct dc *dc)
{ {
#ifdef CONFIG_DRM_AMD_DC_DCN
int i = 0; int i = 0;
if (dc->debug.enable_mem_low_power.bits.cm) { if (dc->debug.enable_mem_low_power.bits.cm) {
@ -1905,8 +1910,8 @@ static void process_deferred_updates(struct dc *dc)
if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
} }
#endif
} }
#endif /* CONFIG_DRM_AMD_DC_DCN */
void dc_post_update_surfaces_to_stream(struct dc *dc) void dc_post_update_surfaces_to_stream(struct dc *dc)
{ {
@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
} }
#ifdef CONFIG_DRM_AMD_DC_DCN
process_deferred_updates(dc); process_deferred_updates(dc);
#endif
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
@ -3603,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc)
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
/* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true; return true;
#endif #endif
/* dmub aux needs dmub notifications to be enabled */ /* dmub aux needs dmub notifications to be enabled */

View File

@ -4279,6 +4279,8 @@ void core_link_enable_stream(
*/ */
if (status != DC_FAIL_DP_LINK_TRAINING || if (status != DC_FAIL_DP_LINK_TRAINING ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
if (false == stream->link->link_status.link_active)
disable_link(stream->link, pipe_ctx->stream->signal);
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();
return; return;
} }
@ -4768,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
timing->dsc_cfg.bits_per_pixel, timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h, timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp); timing->dsc_cfg.is_dp);
#endif #endif /* CONFIG_DRM_AMD_DC_DCN */
switch (timing->display_color_depth) { switch (timing->display_color_depth) {
case COLOR_DEPTH_666: case COLOR_DEPTH_666:

View File

@ -5329,6 +5329,14 @@ bool dc_link_dp_set_test_pattern(
return false; return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
1);
#endif
/* tell receiver that we are sending qualification /* tell receiver that we are sending qualification
* pattern DP 1.2 or later - DP receiver's link quality * pattern DP 1.2 or later - DP receiver's link quality
* pattern is set using DPCD LINK_QUAL_LANEx_SET * pattern is set using DPCD LINK_QUAL_LANEx_SET

View File

@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc; return link_enc;
} }
/* Clear all link encoder assignments. */
static void clear_enc_assignments(struct dc_state *state)
{
int i;
enum engine_id eng_id;
struct dc_stream_state *stream;
for (i = 0; i < MAX_PIPES; i++) {
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
if (eng_id != ENGINE_ID_UNKNOWN)
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
if (stream)
stream->link_enc = NULL;
}
}
void link_enc_cfg_init( void link_enc_cfg_init(
struct dc *dc, struct dc *dc,
@ -250,6 +267,8 @@ void link_enc_cfg_init(
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
} }
clear_enc_assignments(state);
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
} }
@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count); ASSERT(state->stream_count == stream_count);
if (stream_count == 0)
clear_enc_assignments(state);
/* Release DIG link encoder resources before running assignment algorithm. */ /* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++) for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]); dc->res_pool->funcs->link_enc_unassign(state, streams[i]);

View File

@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload; struct set_config_cmd_payload;
struct dmub_notification; struct dmub_notification;
#define DC_VER "3.2.159" #define DC_VER "3.2.160"
#define MAX_SURFACES 3 #define MAX_SURFACES 3
#define MAX_PLANES 6 #define MAX_PLANES 6
@ -675,6 +675,7 @@ struct dc_debug_options {
#endif #endif
union mem_low_power_enable_options enable_mem_low_power; union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization; union root_clock_optimization_options root_clock_optimization;
bool hpo_optimization;
bool force_vblank_alignment; bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */ /* Enable dmub aux for legacy ddc */

View File

@ -898,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT #ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif #endif
#ifndef DP_LINK_SQUARE_PATTERN
#define DP_LINK_SQUARE_PATTERN 0x10F
#endif
#ifndef DP_DSC_CONFIGURATION #ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161 #define DP_DSC_CONFIGURATION 0x161
#endif #endif

View File

@ -671,6 +671,7 @@ struct dce_hwseq_registers {
uint32_t MC_VM_FB_LOCATION_BASE; uint32_t MC_VM_FB_LOCATION_BASE;
uint32_t MC_VM_FB_LOCATION_TOP; uint32_t MC_VM_FB_LOCATION_TOP;
uint32_t MC_VM_FB_OFFSET; uint32_t MC_VM_FB_OFFSET;
uint32_t HPO_TOP_HW_CONTROL;
}; };
/* set field name */ /* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@ -1152,7 +1153,8 @@ struct dce_hwseq_registers {
type DOMAIN_PGFSM_PWR_STATUS;\ type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;\ type HPO_HDMISTREAMCLK_G_GATE_DIS;\
type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\
type I2C_LIGHT_SLEEP_FORCE; type I2C_LIGHT_SLEEP_FORCE;\
type HPO_IO_EN;
struct dce_hwseq_shift { struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t) HWSEQ_REG_FIELD_LIST(uint8_t)

View File

@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
#endif #endif
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx))
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false);
#endif
} }
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,

View File

@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
if (!s->blank_en) if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
"% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,

View File

@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
* BY this, it is logic clean to separate stream and link * BY this, it is logic clean to separate stream and link
*/ */
if (is_dp_128b_132b_signal(pipe_ctx)) { if (is_dp_128b_132b_signal(pipe_ctx)) {
if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
pipe_ctx->stream->ctx->dc->hwseq, true);
setup_dp_hpo_stream(pipe_ctx, true); setup_dp_hpo_stream(pipe_ctx, true);
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
pipe_ctx->stream_res.hpo_dp_stream_enc); pipe_ctx->stream_res.hpo_dp_stream_enc);

View File

@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
} }
static void mpc3_mpc_init(struct mpc *mpc) static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
{ {
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id; int mpcc_id;
mpc1_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) {
REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3);
@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state, .read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane, .insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc, .remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc3_mpc_init, .mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst, .mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending, .update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock, .cursor_lock = mpc1_cursor_lock,
@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux, .get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color, .set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
}; };
void dcn30_mpc_construct(struct dcn30_mpc *mpc30, void dcn30_mpc_construct(struct dcn30_mpc *mpc30,

View File

@ -2128,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
int pipe_cnt, int pipe_cnt,
int vlevel) int vlevel)
{ {
int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
int i, pipe_idx; int i, pipe_idx;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
dm_dram_clock_change_unsupported;
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk; dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@ -2207,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
} }
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;

View File

@ -66,6 +66,45 @@
#define FN(reg_name, field_name) \ #define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name hws->shifts->field_name, hws->masks->field_name
static void enable_memory_low_power(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
int i;
if (dc->debug.enable_mem_low_power.bits.dmcu) {
// Force ERAM to shutdown if DMCU is not enabled
if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
}
}
// Set default OPTC memory power states
if (dc->debug.enable_mem_low_power.bits.optc) {
// Shutdown when unassigned and light sleep in VBLANK
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
if (dc->debug.enable_mem_low_power.bits.vga) {
// Power down VGA memory
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
if (dc->debug.enable_mem_low_power.bits.mpc)
dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
#if defined(CONFIG_DRM_AMD_DC_DCN)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
#endif
}
}
void dcn31_init_hw(struct dc *dc) void dcn31_init_hw(struct dc *dc)
{ {
struct abm **abms = dc->res_pool->multiple_abms; struct abm **abms = dc->res_pool->multiple_abms;
@ -108,35 +147,7 @@ void dcn31_init_hw(struct dc *dc)
if (res_pool->dccg->funcs->dccg_init) if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg); res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (dc->debug.enable_mem_low_power.bits.dmcu) { enable_memory_low_power(dc);
// Force ERAM to shutdown if DMCU is not enabled
if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
}
}
// Set default OPTC memory power states
if (dc->debug.enable_mem_low_power.bits.optc) {
// Shutdown when unassigned and light sleep in VBLANK
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
if (dc->debug.enable_mem_low_power.bits.vga) {
// Power down VGA memory
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
#if defined(CONFIG_DRM_AMD_DC_DP2_0)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
#endif
}
#endif
if (dc->ctx->dc_bios->fw_info_valid) { if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz =
@ -264,6 +275,9 @@ void dcn31_init_hw(struct dc *dc)
if (dc->debug.enable_mem_low_power.bits.i2c) if (dc->debug.enable_mem_low_power.bits.i2c)
REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
if (hws->funcs.setup_hpo_hw_control)
hws->funcs.setup_hpo_hw_control(hws, false);
if (!dc->debug.disable_clock_gate) { if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */ /* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
@ -597,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap(
/* New dc_state in the process of being applied to hardware. */ /* New dc_state in the process of being applied to hardware. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT;
} }
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
{
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}

View File

@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap(
bool dcn31_is_abm_supported(struct dc *dc, bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream); struct dc_state *context, struct dc_stream_state *stream);
void dcn31_init_pipes(struct dc *dc, struct dc_state *context); void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
#endif /* __DC_HWSS_DCN31_H__ */ #endif /* __DC_HWSS_DCN31_H__ */

View File

@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = {
.dccg_init = dcn20_dccg_init, .dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn30_set_blend_lut, .set_blend_lut = dcn30_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut, .set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
}; };
void dcn31_hw_sequencer_construct(struct dc *dc) void dcn31_hw_sequencer_construct(struct dc *dc)

View File

@ -860,7 +860,8 @@ static const struct dccg_mask dccg_mask = {
SR(D6VGA_CONTROL), \ SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \ SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \ SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING) SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = { static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST() HWSEQ_DCN31_REG_LIST()
@ -898,7 +899,8 @@ static const struct dce_hwseq_registers hwseq_reg = {
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = { static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)

View File

@ -3576,16 +3576,9 @@ static double TruncToValidBPP(
MinDSCBPP = 8; MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else { } else {
if (Output == dm_hdmi) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
NonDSCBPP2 = 24;
}
else {
NonDSCBPP0 = 16; NonDSCBPP0 = 16;
NonDSCBPP1 = 20; NonDSCBPP1 = 20;
NonDSCBPP2 = 24; NonDSCBPP2 = 24;
}
if (Format == dm_n422) { if (Format == dm_n422) {
MinDSCBPP = 7; MinDSCBPP = 7;

View File

@ -3892,15 +3892,11 @@ static double TruncToValidBPP(
MinDSCBPP = 8; MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else { } else {
if (Output == dm_hdmi) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
NonDSCBPP2 = 24;
} else {
NonDSCBPP0 = 16; NonDSCBPP0 = 16;
NonDSCBPP1 = 20; NonDSCBPP1 = 20;
NonDSCBPP2 = 24; NonDSCBPP2 = 24;
}
if (Format == dm_n422) { if (Format == dm_n422) {
MinDSCBPP = 7; MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;

View File

@ -367,6 +367,7 @@ struct mpc_funcs {
void (*set_bg_color)(struct mpc *mpc, void (*set_bg_color)(struct mpc *mpc,
struct tg_color *bg_color, struct tg_color *bg_color,
int mpcc_id); int mpcc_id);
void (*set_mpc_mem_lp_mode)(struct mpc *mpc);
}; };
#endif #endif

View File

@ -143,6 +143,7 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state); const struct dc_plane_state *plane_state);
void (*PLAT_58856_wa)(struct dc_state *context, void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx); struct pipe_ctx *pipe_ctx);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
}; };
struct dce_hwseq { struct dce_hwseq {

View File

@ -238,6 +238,7 @@ struct dmub_srv_hw_params {
bool load_inst_const; bool load_inst_const;
bool skip_panel_power_sequence; bool skip_panel_power_sequence;
bool disable_z10; bool disable_z10;
bool power_optimization;
bool dpia_supported; bool dpia_supported;
bool disable_dpia; bool disable_dpia;
}; };

View File

@ -46,10 +46,10 @@
/* Firmware versioning. */ /* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION #ifdef DMUB_EXPOSE_VERSION
#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5 #define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e
#define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0 #define DMUB_FW_VERSION_MINOR 0
#define DMUB_FW_VERSION_REVISION 90 #define DMUB_FW_VERSION_REVISION 91
#define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0 #define DMUB_FW_VERSION_HOTFIX 0

View File

@ -340,6 +340,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.z10_disable = params->disable_z10; boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported; boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1; boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;

View File

@ -2094,6 +2094,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED; *states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
} }
switch (asic_type) { switch (asic_type) {

View File

@ -51,7 +51,7 @@
#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type #define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used)
#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
@ -63,7 +63,7 @@
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
#define PPSMC_MSG_SPARE0 0x16 ///< Spared #define PPSMC_MSG_SPARE 0x16 ///< Spare
#define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry

View File

@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf) static int pp_get_power_profile_mode(void *handle, char *buf)
{ {
struct pp_hwmgr *hwmgr = handle; struct pp_hwmgr *hwmgr = handle;
int ret;
if (!hwmgr || !hwmgr->pm_en || !buf) if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
return -EOPNOTSUPP;
if (!buf)
return -EINVAL; return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { mutex_lock(&hwmgr->smu_lock);
pr_info_ratelimited("%s was not implemented.\n", __func__); ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
return snprintf(buf, PAGE_SIZE, "\n"); mutex_unlock(&hwmgr->smu_lock);
} return ret;
return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
} }
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{ {
struct pp_hwmgr *hwmgr = handle; struct pp_hwmgr *hwmgr = handle;
int ret = -EINVAL; int ret = -EOPNOTSUPP;
if (!hwmgr || !hwmgr->pm_en) if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
return ret; return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return ret;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n"); pr_debug("power profile setting is for manual dpm mode only.\n");
return ret; return -EINVAL;
} }
mutex_lock(&hwmgr->smu_lock); mutex_lock(&hwmgr->smu_lock);

View File

@ -1024,6 +1024,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t min_freq, max_freq = 0; uint32_t min_freq, max_freq = 0;
uint32_t ret = 0; uint32_t ret = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@ -1065,7 +1067,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret) if (ret)
return ret; return ret;
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -1081,7 +1083,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret) if (ret)
return ret; return ret;
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq); min_freq, max_freq);
} }
@ -1456,6 +1458,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf) if (!buf)
return -EINVAL; return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]); title[1], title[2], title[3], title[4], title[5]);

View File

@ -4914,6 +4914,8 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
int size = 0; int size = 0;
uint32_t i, now, clock, pcie_speed; uint32_t i, now, clock, pcie_speed;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@ -4963,7 +4965,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_SCLK: case OD_SCLK:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++) for (i = 0; i < odn_sclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100, i, odn_sclk_table->entries[i].clock/100,
@ -4972,7 +4974,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_MCLK: case OD_MCLK:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++) for (i = 0; i < odn_mclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100, i, odn_mclk_table->entries[i].clock/100,
@ -4981,7 +4983,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_RANGE: case OD_RANGE:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100); hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
@ -5518,6 +5520,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf) if (!buf)
return -EINVAL; return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
title[0], title[1], title[2], title[3], title[0], title[1], title[2], title[3],
title[4], title[5], title[6], title[7]); title[4], title[5], title[6], title[7]);

View File

@ -1550,6 +1550,8 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t i, now; uint32_t i, now;
int size = 0; int size = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,

View File

@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
*/
static inline void phm_get_sysfs_buf(char **buf, int *offset)
{
if (!*buf || !offset)
return;
*offset = offset_in_page(*buf);
*buf -= *offset;
}
int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,

View File

@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0; int ret = 0;
int size = 0; int size = 0;
phm_get_sysfs_buf(&buf, &size);
ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret, PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!", "[EnableAllSmuFeatures] Failed to get enabled smc features!",
@ -4637,6 +4639,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0; int i, now, size = 0, count = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled) if (data->registry_data.sclk_dpm_key_disabled)
@ -4717,7 +4721,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK: case OD_SCLK:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++) for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
@ -4727,7 +4731,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_MCLK: case OD_MCLK:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++) for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
@ -4737,7 +4741,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_RANGE: case OD_RANGE:
if (hwmgr->od_enabled) { if (hwmgr->od_enabled) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100); hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
@ -5112,6 +5116,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf) if (!buf)
return -EINVAL; return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]); title[1], title[2], title[3], title[4], title[5]);

View File

@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0; int ret = 0;
int size = 0; int size = 0;
phm_get_sysfs_buf(&buf, &size);
ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret, PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!", "[EnableAllSmuFeatures] Failed to get enabled smc features!",
@ -2244,6 +2246,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0; int i, now, size = 0;
struct pp_clock_levels_with_latency clocks; struct pp_clock_levels_with_latency clocks;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
PP_ASSERT_WITH_CODE( PP_ASSERT_WITH_CODE(

View File

@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
int ret = 0; int ret = 0;
int size = 0; int size = 0;
phm_get_sysfs_buf(&buf, &size);
ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret, PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!", "[EnableAllSmuFeatures] Failed to get enabled smc features!",
@ -3364,6 +3366,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
int ret = 0; int ret = 0;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
phm_get_sysfs_buf(&buf, &size);
switch (type) { switch (type) {
case PP_SCLK: case PP_SCLK:
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@ -3479,7 +3483,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK: case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
od_table->GfxclkFmin); od_table->GfxclkFmin);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -3489,7 +3493,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_MCLK: case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
od_table->UclkFmax); od_table->UclkFmax);
} }
@ -3503,7 +3507,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1, od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE); od_table->GfxclkVolt1 / VOLTAGE_SCALE);
@ -3518,7 +3522,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break; break;
case OD_RANGE: case OD_RANGE:
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
@ -4003,6 +4007,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
if (!buf) if (!buf)
return -EINVAL; return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5], title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]); title[6], title[7], title[8], title[9], title[10]);

View File

@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n"); dev_err(adev->dev, "Failed to disable smu features.\n");
} }
if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) && if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev); adev->gfx.rlc.funcs->stop(adev);
@ -2534,12 +2534,14 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
struct smu_context *smu = handle; struct smu_context *smu = handle;
int ret = 0; int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->get_power_profile_mode)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!buf)
return -EINVAL;
mutex_lock(&smu->mutex); mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_profile_mode)
ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
mutex_unlock(&smu->mutex); mutex_unlock(&smu->mutex);
@ -2554,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle,
struct smu_context *smu = handle; struct smu_context *smu = handle;
int ret = 0; int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&smu->mutex); mutex_lock(&smu->mutex);

View File

@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
@ -136,14 +135,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(DPMCLOCKS), TAB_MAP_VALID(DPMCLOCKS),
}; };
static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
static int yellow_carp_init_smc_tables(struct smu_context *smu) static int yellow_carp_init_smc_tables(struct smu_context *smu)
{ {
struct smu_table_context *smu_table = &smu->smu_table; struct smu_table_context *smu_table = &smu->smu_table;
@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu,
return 0; return 0;
} }
static int yellow_carp_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
"COMPUTE",
"CUSTOM"};
uint32_t i, size = 0;
int16_t workload_type = 0;
if (!buf)
return -EINVAL;
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT.
* Not all profile modes are supported on yellow carp.
*/
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
if (workload_type < 0)
continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
}
static int yellow_carp_set_power_profile_mode(struct smu_context *smu,
long *input, uint32_t size)
{
int workload_type, ret;
uint32_t profile_mode = input[size];
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) {
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n",
profile_mode);
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
workload_type);
return ret;
}
smu->power_profile_mode = profile_mode;
return 0;
}
static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
void **table) void **table)
{ {
@ -1238,8 +1154,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.read_sensor = yellow_carp_read_sensor, .read_sensor = yellow_carp_read_sensor,
.is_dpm_running = yellow_carp_is_dpm_running, .is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table, .set_watermarks_table = yellow_carp_set_watermarks_table,
.get_power_profile_mode = yellow_carp_get_power_profile_mode,
.set_power_profile_mode = yellow_carp_set_power_profile_mode,
.get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_gpu_metrics = yellow_carp_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
@ -1261,6 +1175,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu)
smu->message_map = yellow_carp_message_map; smu->message_map = yellow_carp_message_map;
smu->feature_map = yellow_carp_feature_mask_map; smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map; smu->table_map = yellow_carp_table_map;
smu->workload_map = yellow_carp_workload_map;
smu->is_apu = true; smu->is_apu = true;
} }

View File

@ -167,9 +167,10 @@ static void lt9611uxc_hpd_work(struct work_struct *work)
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
bool connected; bool connected;
if (lt9611uxc->connector.dev) if (lt9611uxc->connector.dev) {
if (lt9611uxc->connector.dev->mode_config.funcs)
drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
else { } else {
mutex_lock(&lt9611uxc->ocm_lock); mutex_lock(&lt9611uxc->ocm_lock);
connected = lt9611uxc->hdmi_connected; connected = lt9611uxc->hdmi_connected;
@ -339,6 +340,8 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc
return -ENODEV; return -ENODEV;
} }
lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(&lt9611uxc->connector, drm_connector_helper_add(&lt9611uxc->connector,
&lt9611uxc_bridge_connector_helper_funcs); &lt9611uxc_bridge_connector_helper_funcs);
ret = drm_connector_init(bridge->dev, &lt9611uxc->connector, ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,

View File

@ -12,6 +12,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h> #include <drm/drm_bridge.h>
#include <drm/drm_panel.h> #include <drm/drm_panel.h>
@ -22,6 +23,7 @@ struct lvds_codec {
struct regulator *vcc; struct regulator *vcc;
struct gpio_desc *powerdown_gpio; struct gpio_desc *powerdown_gpio;
u32 connector_type; u32 connector_type;
unsigned int bus_format;
}; };
static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge) static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
@ -74,12 +76,50 @@ static const struct drm_bridge_funcs funcs = {
.disable = lvds_codec_disable, .disable = lvds_codec_disable,
}; };
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
lvds_codec_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = lvds_codec->bus_format;
*num_input_fmts = MAX_INPUT_SEL_FORMATS;
return input_fmts;
}
static const struct drm_bridge_funcs funcs_decoder = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_get_input_bus_fmts = lvds_codec_atomic_get_input_bus_fmts,
};
static int lvds_codec_probe(struct platform_device *pdev) static int lvds_codec_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device_node *panel_node; struct device_node *panel_node;
struct device_node *bus_node;
struct drm_panel *panel; struct drm_panel *panel;
struct lvds_codec *lvds_codec; struct lvds_codec *lvds_codec;
const char *mapping;
int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
if (!lvds_codec) if (!lvds_codec)
@ -119,13 +159,47 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge)) if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge); return PTR_ERR(lvds_codec->panel_bridge);
lvds_codec->bridge.funcs = &funcs;
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
* case data-mapping is not present, do nothing, since there are still
* legacy bindings which do not specify this property.
*/
if (lvds_codec->connector_type != DRM_MODE_CONNECTOR_LVDS) {
bus_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!bus_node) {
dev_dbg(dev, "bus DT node not found\n");
return -ENXIO;
}
ret = of_property_read_string(bus_node, "data-mapping",
&mapping);
of_node_put(bus_node);
if (ret < 0) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
} else {
if (!strcmp(mapping, "jeida-18")) {
lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
} else if (!strcmp(mapping, "jeida-24")) {
lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
} else if (!strcmp(mapping, "vesa-24")) {
lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
} else {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return -EINVAL;
}
lvds_codec->bridge.funcs = &funcs_decoder;
}
}
/* /*
* The panel_bridge bridge is attached to the panel's of_node, * The panel_bridge bridge is attached to the panel's of_node,
* but we need a bridge attached to our of_node for our user * but we need a bridge attached to our of_node for our user
* to look up. * to look up.
*/ */
lvds_codec->bridge.of_node = dev->of_node; lvds_codec->bridge.of_node = dev->of_node;
lvds_codec->bridge.funcs = &funcs;
drm_bridge_add(&lvds_codec->bridge); drm_bridge_add(&lvds_codec->bridge);
platform_set_drvdata(pdev, lvds_codec); platform_set_drvdata(pdev, lvds_codec);

View File

@ -939,6 +939,40 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0); drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
} }
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts, input_fmt;
*num_input_fmts = 0;
switch (output_fmt) {
/* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
case MEDIA_BUS_FMT_FIXED:
input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB565_1X16:
input_fmt = output_fmt;
break;
default:
return NULL;
}
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = input_fmt;
*num_input_fmts = 1;
return input_fmts;
}
static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@ -946,6 +980,7 @@ static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.atomic_check = nwl_dsi_bridge_atomic_check, .atomic_check = nwl_dsi_bridge_atomic_check,
.atomic_enable = nwl_dsi_bridge_atomic_enable, .atomic_enable = nwl_dsi_bridge_atomic_enable,
.atomic_disable = nwl_dsi_bridge_atomic_disable, .atomic_disable = nwl_dsi_bridge_atomic_disable,
.atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts,
.mode_set = nwl_dsi_bridge_mode_set, .mode_set = nwl_dsi_bridge_mode_set,
.mode_valid = nwl_dsi_bridge_mode_valid, .mode_valid = nwl_dsi_bridge_mode_valid,
.attach = nwl_dsi_bridge_attach, .attach = nwl_dsi_bridge_attach,

View File

@ -288,6 +288,19 @@ static int sn65dsi83_attach(struct drm_bridge *bridge,
return ret; return ret;
} }
static void sn65dsi83_detach(struct drm_bridge *bridge)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
if (!ctx->dsi)
return;
mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_bridge_remove(&ctx->bridge);
ctx->dsi = NULL;
}
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state) struct drm_bridge_state *old_bridge_state)
{ {
@ -583,6 +596,7 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs sn65dsi83_funcs = { static const struct drm_bridge_funcs sn65dsi83_funcs = {
.attach = sn65dsi83_attach, .attach = sn65dsi83_attach,
.detach = sn65dsi83_detach,
.atomic_pre_enable = sn65dsi83_atomic_pre_enable, .atomic_pre_enable = sn65dsi83_atomic_pre_enable,
.atomic_enable = sn65dsi83_atomic_enable, .atomic_enable = sn65dsi83_atomic_enable,
.atomic_disable = sn65dsi83_atomic_disable, .atomic_disable = sn65dsi83_atomic_disable,
@ -697,9 +711,6 @@ static int sn65dsi83_remove(struct i2c_client *client)
{ {
struct sn65dsi83 *ctx = i2c_get_clientdata(client); struct sn65dsi83 *ctx = i2c_get_clientdata(client);
mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_bridge_remove(&ctx->bridge);
of_node_put(ctx->host_node); of_node_put(ctx->host_node);
return 0; return 0;

View File

@ -625,6 +625,8 @@ int drm_connector_register_all(struct drm_device *dev)
* *
* In contrast to the other drm_get_*_name functions this one here returns a * In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe. * const pointer and hence is threadsafe.
*
* Returns: connector status string
*/ */
const char *drm_get_connector_status_name(enum drm_connector_status status) const char *drm_get_connector_status_name(enum drm_connector_status status)
{ {
@ -707,7 +709,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
* drm_connector_list_iter_next - return next connector * drm_connector_list_iter_next - return next connector
* @iter: connector_list iterator * @iter: connector_list iterator
* *
* Returns the next connector for @iter, or NULL when the list walk has * Returns: the next connector for @iter, or NULL when the list walk has
* completed. * completed.
*/ */
struct drm_connector * struct drm_connector *
@ -780,6 +782,8 @@ static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
* *
* Note you could abuse this and return something out of bounds, but that * Note you could abuse this and return something out of bounds, but that
* would be a caller error. No unscrubbed user data should make it here. * would be a caller error. No unscrubbed user data should make it here.
*
* Returns: string describing an enumerated subpixel property
*/ */
const char *drm_get_subpixel_order_name(enum subpixel_order order) const char *drm_get_subpixel_order_name(enum subpixel_order order)
{ {
@ -809,6 +813,9 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
* Store the supported bus formats in display info structure. * Store the supported bus formats in display info structure.
* See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
* a full list of available formats. * a full list of available formats.
*
* Returns:
* 0 on success or a negative error code on failure.
*/ */
int drm_display_info_set_bus_formats(struct drm_display_info *info, int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats, const u32 *formats,
@ -1326,6 +1333,8 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
* @dev: DRM device * @dev: DRM device
* *
* Called by a driver the first time a DVI-I connector is made. * Called by a driver the first time a DVI-I connector is made.
*
* Returns: %0
*/ */
int drm_mode_create_dvi_i_properties(struct drm_device *dev) int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{ {
@ -1397,6 +1406,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* Game: * Game:
* Content type is game * Content type is game
* *
* The meaning of each content type is defined in CTA-861-G table 15.
*
* Drivers can set up this property by calling * Drivers can set up this property by calling
* drm_connector_attach_content_type_property(). Decoding to * drm_connector_attach_content_type_property(). Decoding to
* infoframe values is done through drm_hdmi_avi_infoframe_content_type(). * infoframe values is done through drm_hdmi_avi_infoframe_content_type().
@ -1407,6 +1418,8 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* @connector: connector to attach content type property on. * @connector: connector to attach content type property on.
* *
* Called by a driver the first time a HDMI connector is made. * Called by a driver the first time a HDMI connector is made.
*
* Returns: %0
*/ */
int drm_connector_attach_content_type_property(struct drm_connector *connector) int drm_connector_attach_content_type_property(struct drm_connector *connector)
{ {
@ -1487,6 +1500,9 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
* creates the TV margin properties for a given device. No need to call this * creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from * function for an SDTV connector, it's already called from
* drm_mode_create_tv_properties(). * drm_mode_create_tv_properties().
*
* Returns:
* 0 on success or a negative error code on failure.
*/ */
int drm_mode_create_tv_margin_properties(struct drm_device *dev) int drm_mode_create_tv_margin_properties(struct drm_device *dev)
{ {
@ -1527,6 +1543,9 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
* the TV specific connector properties for a given device. Caller is * the TV specific connector properties for a given device. Caller is
* responsible for allocating a list of format names and passing them to * responsible for allocating a list of format names and passing them to
* this routine. * this routine.
*
* Returns:
* 0 on success or a negative error code on failure.
*/ */
int drm_mode_create_tv_properties(struct drm_device *dev, int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes, unsigned int num_modes,
@ -1622,6 +1641,8 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
* Atomic drivers should use drm_connector_attach_scaling_mode_property() * Atomic drivers should use drm_connector_attach_scaling_mode_property()
* instead to correctly assign &drm_connector_state.scaling_mode * instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state. * in the atomic state.
*
* Returns: %0
*/ */
int drm_mode_create_scaling_mode_property(struct drm_device *dev) int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{ {
@ -1939,6 +1960,9 @@ EXPORT_SYMBOL(drm_mode_create_content_type_property);
* @dev: DRM device * @dev: DRM device
* *
* Create the suggested x/y offset property for connectors. * Create the suggested x/y offset property for connectors.
*
* Returns:
* 0 on success or a negative error code on failure.
*/ */
int drm_mode_create_suggested_offset_properties(struct drm_device *dev) int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
{ {
@ -2312,8 +2336,8 @@ int drm_connector_set_panel_orientation(
EXPORT_SYMBOL(drm_connector_set_panel_orientation); EXPORT_SYMBOL(drm_connector_set_panel_orientation);
/** /**
* drm_connector_set_panel_orientation_with_quirk - * drm_connector_set_panel_orientation_with_quirk - set the
* set the connector's panel_orientation after checking for quirks * connector's panel_orientation after checking for quirks
* @connector: connector for which to init the panel-orientation property. * @connector: connector for which to init the panel-orientation property.
* @panel_orientation: drm_panel_orientation value to set * @panel_orientation: drm_panel_orientation value to set
* @width: width in pixels of the panel, used for panel quirk detection * @width: width in pixels of the panel, used for panel quirk detection
@ -2597,7 +2621,7 @@ struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
/** /**
* drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
* @connector: connector to report the event on * @connector_fwnode: fwnode_handle to report the event on
* *
* On some hardware a hotplug event notification may come from outside the display * On some hardware a hotplug event notification may come from outside the display
* driver / device. An example of this is some USB Type-C setups where the hardware * driver / device. An example of this is some USB Type-C setups where the hardware

View File

@ -1340,31 +1340,15 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
struct drm_gem_object *obj, struct drm_gem_object *obj,
bool write) bool write)
{ {
int ret; struct dma_resv_iter cursor;
struct dma_fence **fences; struct dma_fence *fence;
unsigned int i, fence_count; int ret = 0;
if (!write) { dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
struct dma_fence *fence = ret = drm_gem_fence_array_add(fence_array, fence);
dma_resv_get_excl_unlocked(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
ret = dma_resv_get_fences(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
for (i = 0; i < fence_count; i++) {
ret = drm_gem_fence_array_add(fence_array, fences[i]);
if (ret) if (ret)
break; break;
} }
for (; i < fence_count; i++)
dma_fence_put(fences[i]);
kfree(fences);
return ret; return ret;
} }
EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);

View File

@ -25,6 +25,7 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_modeset_lock.h> #include <drm/drm_modeset_lock.h>
#include <drm/drm_print.h>
/** /**
* DOC: kms locking * DOC: kms locking
@ -77,6 +78,45 @@
static DEFINE_WW_CLASS(crtc_ww_class); static DEFINE_WW_CLASS(crtc_ww_class);
#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
static noinline depot_stack_handle_t __drm_stack_depot_save(void)
{
unsigned long entries[8];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
struct drm_printer p = drm_debug_printer("drm_modeset_lock");
unsigned long *entries;
unsigned int nr_entries;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
nr_entries = stack_depot_fetch(stack_depot, &entries);
stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf);
kfree(buf);
}
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
static depot_stack_handle_t __drm_stack_depot_save(void)
{
return 0;
}
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
}
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
/** /**
* drm_modeset_lock_all - take all modeset locks * drm_modeset_lock_all - take all modeset locks
* @dev: DRM device * @dev: DRM device
@ -225,7 +265,9 @@ EXPORT_SYMBOL(drm_modeset_acquire_fini);
*/ */
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{ {
WARN_ON(ctx->contended); if (WARN_ON(ctx->contended))
__drm_stack_depot_print(ctx->stack_depot);
while (!list_empty(&ctx->locked)) { while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock; struct drm_modeset_lock *lock;
@ -243,7 +285,8 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
{ {
int ret; int ret;
WARN_ON(ctx->contended); if (WARN_ON(ctx->contended))
__drm_stack_depot_print(ctx->stack_depot);
if (ctx->trylock_only) { if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx); lockdep_assert_held(&ctx->ww_ctx);
@ -274,6 +317,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
ret = 0; ret = 0;
} else if (ret == -EDEADLK) { } else if (ret == -EDEADLK) {
ctx->contended = lock; ctx->contended = lock;
ctx->stack_depot = __drm_stack_depot_save();
} }
return ret; return ret;
@ -296,6 +340,7 @@ int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
struct drm_modeset_lock *contended = ctx->contended; struct drm_modeset_lock *contended = ctx->contended;
ctx->contended = NULL; ctx->contended = NULL;
ctx->stack_depot = 0;
if (WARN_ON(!contended)) if (WARN_ON(!contended))
return 0; return 0;

View File

@ -123,7 +123,6 @@ static int drm_plane_helper_check_update(struct drm_plane *plane,
.crtc_w = drm_rect_width(dst), .crtc_w = drm_rect_width(dst),
.crtc_h = drm_rect_height(dst), .crtc_h = drm_rect_height(dst),
.rotation = rotation, .rotation = rotation,
.visible = *visible,
}; };
struct drm_crtc_state crtc_state = { struct drm_crtc_state crtc_state = {
.crtc = crtc, .crtc = crtc,

View File

@ -722,11 +722,13 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
if (obj->funcs && obj->funcs->mmap) { if (obj->funcs && obj->funcs->mmap) {
vma->vm_ops = obj->funcs->vm_ops; vma->vm_ops = obj->funcs->vm_ops;
ret = obj->funcs->mmap(obj, vma);
if (ret)
return ret;
vma->vm_private_data = obj;
drm_gem_object_get(obj); drm_gem_object_get(obj);
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put(obj);
return ret;
}
vma->vm_private_data = obj;
return 0; return 0;
} }

View File

@ -584,6 +584,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
else else
intel_encoder->enable = g4x_enable_hdmi; intel_encoder->enable = g4x_enable_hdmi;
} }
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->power_domain = intel_port_to_power_domain(port); intel_encoder->power_domain = intel_port_to_power_domain(port);

View File

@ -1707,6 +1707,39 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
child->aux_channel = 0; child->aux_channel = 0;
} }
static u8 dvo_port_type(u8 dvo_port)
{
switch (dvo_port) {
case DVO_PORT_HDMIA:
case DVO_PORT_HDMIB:
case DVO_PORT_HDMIC:
case DVO_PORT_HDMID:
case DVO_PORT_HDMIE:
case DVO_PORT_HDMIF:
case DVO_PORT_HDMIG:
case DVO_PORT_HDMIH:
case DVO_PORT_HDMII:
return DVO_PORT_HDMIA;
case DVO_PORT_DPA:
case DVO_PORT_DPB:
case DVO_PORT_DPC:
case DVO_PORT_DPD:
case DVO_PORT_DPE:
case DVO_PORT_DPF:
case DVO_PORT_DPG:
case DVO_PORT_DPH:
case DVO_PORT_DPI:
return DVO_PORT_DPA;
case DVO_PORT_MIPIA:
case DVO_PORT_MIPIB:
case DVO_PORT_MIPIC:
case DVO_PORT_MIPID:
return DVO_PORT_MIPIA;
default:
return dvo_port;
}
}
static enum port __dvo_port_to_port(int n_ports, int n_dvo, static enum port __dvo_port_to_port(int n_ports, int n_dvo,
const int port_mapping[][3], u8 dvo_port) const int port_mapping[][3], u8 dvo_port)
{ {
@ -1930,50 +1963,6 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd
} }
} }
static enum port get_edp_port(struct drm_i915_private *i915)
{
const struct intel_bios_encoder_data *devdata;
enum port port;
for_each_port(port) {
devdata = i915->vbt.ports[port];
if (devdata && intel_bios_encoder_supports_edp(devdata))
return port;
}
return PORT_NONE;
}
/*
* FIXME: The power sequencer and backlight code currently do not support more
* than one set registers, at least not on anything other than VLV/CHV. It will
* clobber the registers. As a temporary workaround, gracefully prevent more
* than one eDP from being registered.
*/
static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata,
enum port port)
{
struct drm_i915_private *i915 = devdata->i915;
struct child_device_config *child = &devdata->child;
enum port p;
/* CHV might not clobber PPS registers. */
if (IS_CHERRYVIEW(i915))
return;
p = get_edp_port(i915);
if (p == PORT_NONE)
return;
drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, "
"disabling port %c eDP\n", port_name(p), port_name(port),
port_name(port));
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR;
}
static bool is_port_valid(struct drm_i915_private *i915, enum port port) static bool is_port_valid(struct drm_i915_private *i915, enum port port)
{ {
/* /*
@ -2031,9 +2020,6 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt, supports_typec_usb, supports_tbt,
devdata->dsc != NULL); devdata->dsc != NULL);
if (is_edp)
sanitize_dual_edp(devdata, port);
if (is_dvi) if (is_dvi)
sanitize_ddc_pin(devdata, port); sanitize_ddc_pin(devdata, port);
@ -2670,7 +2656,24 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
return false; return false;
} }
static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, static bool child_dev_is_dp_dual_mode(const struct child_device_config *child)
{
if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA &&
child->aux_channel != 0)
return true;
return false;
}
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
enum port port) enum port port)
{ {
static const struct { static const struct {
@ -2686,32 +2689,23 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
}; };
const struct intel_bios_encoder_data *devdata;
if (HAS_DDI(i915)) {
const struct intel_bios_encoder_data *devdata;
devdata = intel_bios_encoder_data_lookup(i915, port);
return devdata && child_dev_is_dp_dual_mode(&devdata->child);
}
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false; return false;
if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
if (child->dvo_port == port_mapping[port].dp)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
if (child->dvo_port == port_mapping[port].hdmi &&
child->aux_channel != 0)
return true;
return false;
}
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
enum port port)
{
const struct intel_bios_encoder_data *devdata;
list_for_each_entry(devdata, &i915->vbt.display_devices, node) { list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
if (child_dev_is_dp_dual_mode(&devdata->child, port)) if ((devdata->child.dvo_port == port_mapping[port].dp ||
devdata->child.dvo_port == port_mapping[port].hdmi) &&
child_dev_is_dp_dual_mode(&devdata->child))
return true; return true;
} }

View File

@ -2885,7 +2885,7 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq; return freq;
} }
static struct intel_cdclk_funcs tgl_cdclk_funcs = { static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk, .get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk, .set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@ -2893,7 +2893,7 @@ static struct intel_cdclk_funcs tgl_cdclk_funcs = {
.calc_voltage_level = tgl_calc_voltage_level, .calc_voltage_level = tgl_calc_voltage_level,
}; };
static struct intel_cdclk_funcs ehl_cdclk_funcs = { static const struct intel_cdclk_funcs ehl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk, .get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk, .set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@ -2901,7 +2901,7 @@ static struct intel_cdclk_funcs ehl_cdclk_funcs = {
.calc_voltage_level = ehl_calc_voltage_level, .calc_voltage_level = ehl_calc_voltage_level,
}; };
static struct intel_cdclk_funcs icl_cdclk_funcs = { static const struct intel_cdclk_funcs icl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk, .get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk, .set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@ -2909,7 +2909,7 @@ static struct intel_cdclk_funcs icl_cdclk_funcs = {
.calc_voltage_level = icl_calc_voltage_level, .calc_voltage_level = icl_calc_voltage_level,
}; };
static struct intel_cdclk_funcs bxt_cdclk_funcs = { static const struct intel_cdclk_funcs bxt_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk, .get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk, .set_cdclk = bxt_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
@ -2917,54 +2917,54 @@ static struct intel_cdclk_funcs bxt_cdclk_funcs = {
.calc_voltage_level = bxt_calc_voltage_level, .calc_voltage_level = bxt_calc_voltage_level,
}; };
static struct intel_cdclk_funcs skl_cdclk_funcs = { static const struct intel_cdclk_funcs skl_cdclk_funcs = {
.get_cdclk = skl_get_cdclk, .get_cdclk = skl_get_cdclk,
.set_cdclk = skl_set_cdclk, .set_cdclk = skl_set_cdclk,
.bw_calc_min_cdclk = skl_bw_calc_min_cdclk, .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = skl_modeset_calc_cdclk, .modeset_calc_cdclk = skl_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs bdw_cdclk_funcs = { static const struct intel_cdclk_funcs bdw_cdclk_funcs = {
.get_cdclk = bdw_get_cdclk, .get_cdclk = bdw_get_cdclk,
.set_cdclk = bdw_set_cdclk, .set_cdclk = bdw_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = bdw_modeset_calc_cdclk, .modeset_calc_cdclk = bdw_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs chv_cdclk_funcs = { static const struct intel_cdclk_funcs chv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk, .get_cdclk = vlv_get_cdclk,
.set_cdclk = chv_set_cdclk, .set_cdclk = chv_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs vlv_cdclk_funcs = { static const struct intel_cdclk_funcs vlv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk, .get_cdclk = vlv_get_cdclk,
.set_cdclk = vlv_set_cdclk, .set_cdclk = vlv_set_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs hsw_cdclk_funcs = { static const struct intel_cdclk_funcs hsw_cdclk_funcs = {
.get_cdclk = hsw_get_cdclk, .get_cdclk = hsw_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
/* SNB, IVB, 965G, 945G */ /* SNB, IVB, 965G, 945G */
static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
.get_cdclk = fixed_400mhz_get_cdclk, .get_cdclk = fixed_400mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs ilk_cdclk_funcs = { static const struct intel_cdclk_funcs ilk_cdclk_funcs = {
.get_cdclk = fixed_450mhz_get_cdclk, .get_cdclk = fixed_450mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs gm45_cdclk_funcs = { static const struct intel_cdclk_funcs gm45_cdclk_funcs = {
.get_cdclk = gm45_get_cdclk, .get_cdclk = gm45_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@ -2972,7 +2972,7 @@ static struct intel_cdclk_funcs gm45_cdclk_funcs = {
/* G45 uses G33 */ /* G45 uses G33 */
static struct intel_cdclk_funcs i965gm_cdclk_funcs = { static const struct intel_cdclk_funcs i965gm_cdclk_funcs = {
.get_cdclk = i965gm_get_cdclk, .get_cdclk = i965gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@ -2980,19 +2980,19 @@ static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
/* i965G uses fixed 400 */ /* i965G uses fixed 400 */
static struct intel_cdclk_funcs pnv_cdclk_funcs = { static const struct intel_cdclk_funcs pnv_cdclk_funcs = {
.get_cdclk = pnv_get_cdclk, .get_cdclk = pnv_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs g33_cdclk_funcs = { static const struct intel_cdclk_funcs g33_cdclk_funcs = {
.get_cdclk = g33_get_cdclk, .get_cdclk = g33_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i945gm_cdclk_funcs = { static const struct intel_cdclk_funcs i945gm_cdclk_funcs = {
.get_cdclk = i945gm_get_cdclk, .get_cdclk = i945gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
@ -3000,37 +3000,37 @@ static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
/* i945G uses fixed 400 */ /* i945G uses fixed 400 */
static struct intel_cdclk_funcs i915gm_cdclk_funcs = { static const struct intel_cdclk_funcs i915gm_cdclk_funcs = {
.get_cdclk = i915gm_get_cdclk, .get_cdclk = i915gm_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i915g_cdclk_funcs = { static const struct intel_cdclk_funcs i915g_cdclk_funcs = {
.get_cdclk = fixed_333mhz_get_cdclk, .get_cdclk = fixed_333mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i865g_cdclk_funcs = { static const struct intel_cdclk_funcs i865g_cdclk_funcs = {
.get_cdclk = fixed_266mhz_get_cdclk, .get_cdclk = fixed_266mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i85x_cdclk_funcs = { static const struct intel_cdclk_funcs i85x_cdclk_funcs = {
.get_cdclk = i85x_get_cdclk, .get_cdclk = i85x_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i845g_cdclk_funcs = { static const struct intel_cdclk_funcs i845g_cdclk_funcs = {
.get_cdclk = fixed_200mhz_get_cdclk, .get_cdclk = fixed_200mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
}; };
static struct intel_cdclk_funcs i830_cdclk_funcs = { static const struct intel_cdclk_funcs i830_cdclk_funcs = {
.get_cdclk = fixed_133mhz_get_cdclk, .get_cdclk = fixed_133mhz_get_cdclk,
.bw_calc_min_cdclk = intel_bw_calc_min_cdclk, .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk,

View File

@ -4361,6 +4361,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(i915, encoder->port); enum phy phy = intel_port_to_phy(i915, encoder->port);
intel_dp_encoder_shutdown(encoder); intel_dp_encoder_shutdown(encoder);
intel_hdmi_encoder_shutdown(encoder);
if (!intel_phy_is_tc(i915, phy)) if (!intel_phy_is_tc(i915, phy))
return; return;

View File

@ -848,9 +848,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
int i; int i;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
unsigned int plane_size;
plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
if (plane_size == 0)
continue;
if (rem_info->plane_alignment) if (rem_info->plane_alignment)
size = ALIGN(size, rem_info->plane_alignment); size = ALIGN(size, rem_info->plane_alignment);
size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
size += plane_size;
} }
return size; return size;

View File

@ -120,6 +120,12 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
return crtc_state->port_clock >= 1000000; return crtc_state->port_clock >= 1000000;
} }
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
{
intel_dp->sink_rates[0] = 162000;
intel_dp->num_sink_rates = 1;
}
/* update sink rates from dpcd */ /* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{ {
@ -281,7 +287,7 @@ intel_dp_max_data_rate(int max_link_rate, int max_lanes)
*/ */
int max_link_rate_kbps = max_link_rate * 10; int max_link_rate_kbps = max_link_rate * 10;
max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000); max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
max_link_rate = max_link_rate_kbps / 8; max_link_rate = max_link_rate_kbps / 8;
} }
@ -1858,6 +1864,12 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
intel_dp->lane_count = lane_count; intel_dp->lane_count = lane_count;
} }
static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
{
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
}
/* Enable backlight PWM and backlight PP control. */ /* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state) const struct drm_connector_state *conn_state)
@ -2017,8 +2029,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (intel_dp->dpcd[DP_DPCD_REV] == 0) if (intel_dp->dpcd[DP_DPCD_REV] == 0)
intel_dp_get_dpcd(intel_dp); intel_dp_get_dpcd(intel_dp);
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); intel_dp_reset_max_link_params(intel_dp);
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
} }
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@ -2556,6 +2567,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/ */
intel_psr_init_dpcd(intel_dp); intel_psr_init_dpcd(intel_dp);
/* Clear the default sink rates */
intel_dp->num_sink_rates = 0;
/* Read the eDP 1.4+ supported link rates. */ /* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES]; __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@ -2591,6 +2605,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_sink_rates(intel_dp); intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp); intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);
/* Read the eDP DSC DPCD registers */ /* Read the eDP DSC DPCD registers */
if (DISPLAY_VER(dev_priv) >= 10) if (DISPLAY_VER(dev_priv) >= 10)
@ -4332,12 +4347,7 @@ intel_dp_detect(struct drm_connector *connector,
* supports link training fallback params. * supports link training fallback params.
*/ */
if (intel_dp->reset_link_params || intel_dp->is_mst) { if (intel_dp->reset_link_params || intel_dp->is_mst) {
/* Initial max link lane count */ intel_dp_reset_max_link_params(intel_dp);
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
/* Initial max link rate */
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->reset_link_params = false; intel_dp->reset_link_params = false;
} }
@ -5003,6 +5013,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
} }
intel_dp_set_source_rates(intel_dp); intel_dp_set_source_rates(intel_dp);
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);

View File

@ -378,8 +378,8 @@ static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_pl
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
*w = main_width / main_hsub / hsub; *w = DIV_ROUND_UP(main_width, main_hsub * hsub);
*h = main_height / main_vsub / vsub; *h = DIV_ROUND_UP(main_height, main_vsub * vsub);
} }
static u32 intel_adjust_tile_offset(int *x, int *y, static u32 intel_adjust_tile_offset(int *x, int *y,

View File

@ -1246,12 +1246,13 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{ {
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
struct i2c_adapter *adapter = struct i2c_adapter *adapter;
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return; return;
adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling"); enable ? "Enabling" : "Disabling");
@ -2258,6 +2259,17 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return 0; return 0;
} }
void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
/*
* Give a hand to buggy BIOSen which forget to turn
* the TMDS output buffers back on after a reboot.
*/
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
}
static void static void
intel_hdmi_unset_edid(struct drm_connector *connector) intel_hdmi_unset_edid(struct drm_connector *connector)
{ {

View File

@ -28,6 +28,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
int intel_hdmi_compute_config(struct intel_encoder *encoder, int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state); struct drm_connector_state *conn_state);
void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder);
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
struct drm_connector *connector, struct drm_connector *connector,
bool high_tmds_clock_ratio, bool high_tmds_clock_ratio,

View File

@ -9,6 +9,8 @@
#include <linux/dma-resv.h> #include <linux/dma-resv.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/smp.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_object.h" #include "i915_gem_object.h"
#include "i915_scatterlist.h" #include "i915_scatterlist.h"

View File

@ -1396,6 +1396,9 @@ remap_pages(struct drm_i915_gem_object *obj,
{ {
unsigned int row; unsigned int row;
if (!width || !height)
return sg;
if (alignment_pad) { if (alignment_pad) {
st->nents++; st->nents++;

View File

@ -2373,6 +2373,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
unsigned long flags; unsigned long flags;
bool disabled; bool disabled;
lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
@ -2388,7 +2389,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
} }
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) { if (unlikely(disabled)) {
release_guc_id(guc, ce); __release_guc_id(guc, ce);
__guc_context_destroy(ce); __guc_context_destroy(ce);
return; return;
} }

View File

@ -1537,38 +1537,14 @@ i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
bool write) bool write)
{ {
struct dma_fence *excl; struct dma_resv_iter cursor;
struct dma_fence *fence;
int ret = 0; int ret = 0;
if (write) { dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
struct dma_fence **shared; ret = i915_request_await_dma_fence(to, fence);
unsigned int count, i;
ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
&shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
ret = i915_request_await_dma_fence(to, shared[i]);
if (ret) if (ret)
break; break;
dma_fence_put(shared[i]);
}
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
if (ret == 0)
ret = i915_request_await_dma_fence(to, excl);
dma_fence_put(excl);
} }
return ret; return ret;

View File

@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false; bool plane_disabling = false;
int i; int i;
bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_disables(dev, state);
@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
} }
drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_commit_hw_done(state);
dma_fence_end_signalling(fence_cookie);
} }
static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {

View File

@ -88,7 +88,7 @@ static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb,
ctrl |= CTRL_BUS_WIDTH_24; ctrl |= CTRL_BUS_WIDTH_24;
break; break;
default: default:
dev_err(drm->dev, "Unknown media bus format %d\n", bus_format); dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
break; break;
} }
@ -362,6 +362,12 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
drm_atomic_get_new_bridge_state(state, drm_atomic_get_new_bridge_state(state,
mxsfb->bridge); mxsfb->bridge);
bus_format = bridge_state->input_bus_cfg.format; bus_format = bridge_state->input_bus_cfg.format;
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
"Please fix bridge driver by handling atomic_get_input_bus_fmts.\n");
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
}
} }
/* If there is no bridge, use bus format from connector */ /* If there is no bridge, use bus format from connector */

View File

@ -1249,7 +1249,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
{ {
struct ttm_tt *ttm_dma = (void *)ttm; struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
@ -1262,7 +1261,6 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
} }
drm = nouveau_bdev(bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
} }
@ -1272,7 +1270,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave) if (slave)
@ -1281,7 +1278,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
nouveau_ttm_tt_unbind(bdev, ttm); nouveau_ttm_tt_unbind(bdev, ttm);
drm = nouveau_bdev(bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
return ttm_pool_free(&drm->ttm.bdev.pool, ttm); return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
} }

View File

@ -562,6 +562,7 @@ nouveau_drm_device_init(struct drm_device *dev)
nvkm_dbgopt(nouveau_debug, "DRM"); nvkm_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients); INIT_LIST_HEAD(&drm->clients);
mutex_init(&drm->clients_lock);
spin_lock_init(&drm->tile.lock); spin_lock_init(&drm->tile.lock);
/* workaround an odd issue on nvc1 by disabling the device's /* workaround an odd issue on nvc1 by disabling the device's
@ -632,6 +633,7 @@ nouveau_drm_device_init(struct drm_device *dev)
static void static void
nouveau_drm_device_fini(struct drm_device *dev) nouveau_drm_device_fini(struct drm_device *dev)
{ {
struct nouveau_cli *cli, *temp_cli;
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
if (nouveau_pmops_runtime()) { if (nouveau_pmops_runtime()) {
@ -656,9 +658,28 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_ttm_fini(drm); nouveau_ttm_fini(drm);
nouveau_vga_fini(drm); nouveau_vga_fini(drm);
/*
* There may be existing clients from as-yet unclosed files. For now,
* clean them up here rather than deferring until the file is closed,
* but this likely not correct if we want to support hot-unplugging
* properly.
*/
mutex_lock(&drm->clients_lock);
list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
list_del(&cli->head);
mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex);
nouveau_cli_fini(cli);
kfree(cli);
}
mutex_unlock(&drm->clients_lock);
nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master); nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent); nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm); kfree(drm);
} }
@ -796,7 +817,7 @@ nouveau_drm_device_remove(struct drm_device *dev)
struct nvkm_client *client; struct nvkm_client *client;
struct nvkm_device *device; struct nvkm_device *device;
drm_dev_unregister(dev); drm_dev_unplug(dev);
client = nvxx_client(&drm->client.base); client = nvxx_client(&drm->client.base);
device = nvkm_device_find(client->device); device = nvkm_device_find(client->device);
@ -1090,9 +1111,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
fpriv->driver_priv = cli; fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex); mutex_lock(&drm->clients_lock);
list_add(&cli->head, &drm->clients); list_add(&cli->head, &drm->clients);
mutex_unlock(&drm->client.mutex); mutex_unlock(&drm->clients_lock);
done: done:
if (ret && cli) { if (ret && cli) {
@ -1110,6 +1131,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{ {
struct nouveau_cli *cli = nouveau_cli(fpriv); struct nouveau_cli *cli = nouveau_cli(fpriv);
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
int dev_index;
/*
* The device is gone, and as it currently stands all clients are
* cleaned up in the removal codepath. In the future this may change
* so that we can support hot-unplugging, but for now we immediately
* return to avoid a double-free situation.
*/
if (!drm_dev_enter(dev, &dev_index))
return;
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
@ -1118,14 +1149,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
nouveau_abi16_fini(cli->abi16); nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
mutex_lock(&drm->client.mutex); mutex_lock(&drm->clients_lock);
list_del(&cli->head); list_del(&cli->head);
mutex_unlock(&drm->client.mutex); mutex_unlock(&drm->clients_lock);
nouveau_cli_fini(cli); nouveau_cli_fini(cli);
kfree(cli); kfree(cli);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
drm_dev_exit(dev_index);
} }
static const struct drm_ioctl_desc static const struct drm_ioctl_desc

View File

@ -139,6 +139,11 @@ struct nouveau_drm {
struct list_head clients; struct list_head clients;
/**
* @clients_lock: Protects access to the @clients list of &struct nouveau_cli.
*/
struct mutex clients_lock;
u8 old_pm_cap; u8 old_pm_cap;
struct { struct {

View File

@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
nouveau_bo_del_io_reserve_lru(bo); nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags); prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
nouveau_bo_add_io_reserve_lru(bo); nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret; return ret;
@ -337,7 +337,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
struct ttm_buffer_object *bo = &nvbo->bo; struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains & uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains); (write_domains ? write_domains : read_domains);
uint32_t pref_domains = 0;; uint32_t pref_domains = 0;
if (!domains) if (!domains)
return -EINVAL; return -EINVAL;

View File

@ -162,10 +162,14 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/ */
mm = get_task_mm(current); mm = get_task_mm(current);
if (!mm) {
return -EINVAL;
}
mmap_read_lock(mm); mmap_read_lock(mm);
if (!cli->svm.svmm) { if (!cli->svm.svmm) {
mmap_read_unlock(mm); mmap_read_unlock(mm);
mmput(mm);
return -EINVAL; return -EINVAL;
} }

Some files were not shown because too many files have changed in this diff Show More