A fix for color format check from Ville, plus the re-enable of -Wuninitialized
from Nathan, and the GVT fixes including fixes for ww locking, cmd parser and a general cleanup of dev_priv->gt. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmA3t44ACgkQ+mJfZA7r E8pa3Af+L/rcQQ8G/fQiPiOXQQKoyPhzjxGnocd/Ff31H1b/faPOiKHnYtnnuLMS sHMLaVy2Nq9M0EoCEO0XqHn4ahJLtz86xqh71TntekT9io52D64r0HS95XEvdTFQ Ex0Guz7ef3MKCyUfG6SY/IB2DEzmCKniahXC6UWl1yVGKAeEQ49bWFcgPqUe0F+a 4PztjYmK7k6NFCHpjaCQJxX5rK3UjyBQy28E1rCcWjD5zidkh/vzTLnlnGqh9J9l yK2Lz3rtojUSXT7ML6rracwmdFyf+r/WrSMPBtIA3R9gye6bGjf9oR3DgLr4Lhdn zyw40uyq2471Idc2oTlFhxaVnqX1Vw== =yAkb -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-fixes-2021-02-25' of git://anongit.freedesktop.org/drm/drm-intel into drm-next A fix for color format check from Ville, plus the re-enable of -Wuninitialized from Nathan, and the GVT fixes including fixes for ww locking, cmd parser and a general cleanup of dev_priv->gt. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YDe3pBPV5Kx3hpk6@intel.com
This commit is contained in:
commit
d153e8c156
@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
|
||||
|
||||
|
@ -109,7 +109,6 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
|
||||
crtc_state->cpu_transcoder = INVALID_TRANSCODER;
|
||||
crtc_state->master_transcoder = INVALID_TRANSCODER;
|
||||
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
|
||||
crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
|
||||
crtc_state->scaler_state.scaler_id = -1;
|
||||
crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
|
||||
}
|
||||
|
@ -10211,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len,
|
||||
}
|
||||
|
||||
static const char * const output_format_str[] = {
|
||||
[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
|
||||
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
|
||||
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
|
||||
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
|
||||
@ -10220,7 +10219,7 @@ static const char * const output_format_str[] = {
|
||||
static const char *output_formats(enum intel_output_format format)
|
||||
{
|
||||
if (format >= ARRAY_SIZE(output_format_str))
|
||||
format = INTEL_OUTPUT_FORMAT_INVALID;
|
||||
return "invalid";
|
||||
return output_format_str[format];
|
||||
}
|
||||
|
||||
|
@ -830,7 +830,6 @@ struct intel_crtc_wm_state {
|
||||
};
|
||||
|
||||
enum intel_output_format {
|
||||
INTEL_OUTPUT_FORMAT_INVALID,
|
||||
INTEL_OUTPUT_FORMAT_RGB,
|
||||
INTEL_OUTPUT_FORMAT_YCBCR420,
|
||||
INTEL_OUTPUT_FORMAT_YCBCR444,
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "gt/intel_lrc.h"
|
||||
#include "gt/intel_ring.h"
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/shmem_utils.h"
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
#include "trace.h"
|
||||
@ -3094,71 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
*/
|
||||
void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
|
||||
struct i915_request *rq;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
||||
bool is_ctx_pinned[I915_NUM_ENGINES] = {};
|
||||
int ret = 0;
|
||||
|
||||
if (gvt->is_reg_whitelist_updated)
|
||||
return;
|
||||
|
||||
for_each_engine(engine, &dev_priv->gt, id) {
|
||||
ret = intel_context_pin(s->shadow[id]);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to pin shadow ctx\n");
|
||||
goto out;
|
||||
}
|
||||
is_ctx_pinned[id] = true;
|
||||
|
||||
rq = i915_request_create(s->shadow[id]);
|
||||
if (IS_ERR(rq)) {
|
||||
gvt_vgpu_err("fail to alloc default request\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
requests[id] = i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
}
|
||||
|
||||
if (intel_gt_wait_for_idle(&dev_priv->gt,
|
||||
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* scan init ctx to update cmd accessible list */
|
||||
for_each_engine(engine, &dev_priv->gt, id) {
|
||||
int size = engine->context_size - PAGE_SIZE;
|
||||
void *vaddr;
|
||||
for_each_engine(engine, gvt->gt, id) {
|
||||
struct parser_exec_state s;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_request *rq;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
rq = requests[id];
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
GEM_BUG_ON(!intel_context_is_pinned(rq->context));
|
||||
obj = rq->context->state->obj;
|
||||
if (!engine->default_state)
|
||||
continue;
|
||||
|
||||
if (!obj) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
vaddr = shmem_pin_map(engine->default_state);
|
||||
if (IS_ERR(vaddr)) {
|
||||
gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
|
||||
id, PTR_ERR(vaddr));
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto out;
|
||||
gvt_err("failed to map %s->default state, err:%zd\n",
|
||||
engine->name, PTR_ERR(vaddr));
|
||||
return;
|
||||
}
|
||||
|
||||
s.buf_type = RING_BUFFER_CTX;
|
||||
@ -3166,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
s.vgpu = vgpu;
|
||||
s.engine = engine;
|
||||
s.ring_start = 0;
|
||||
s.ring_size = size;
|
||||
s.ring_size = engine->context_size - start;
|
||||
s.ring_head = 0;
|
||||
s.ring_tail = size;
|
||||
s.ring_tail = s.ring_size;
|
||||
s.rb_va = vaddr + start;
|
||||
s.workload = NULL;
|
||||
s.is_ctx_wa = false;
|
||||
@ -3176,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
|
||||
|
||||
/* skipping the first RING_CTX_SIZE(0x50) dwords */
|
||||
ret = ip_gma_set(&s, RING_CTX_SIZE);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin_map(obj);
|
||||
goto out;
|
||||
if (ret == 0) {
|
||||
ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
|
||||
if (ret)
|
||||
gvt_err("Scan init ctx error\n");
|
||||
}
|
||||
|
||||
ret = command_scan(&s, 0, size, 0, size);
|
||||
shmem_unpin_map(engine->default_state, vaddr);
|
||||
if (ret)
|
||||
gvt_err("Scan init ctx error\n");
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
gvt->is_reg_whitelist_updated = true;
|
||||
|
||||
for (id = 0; id < I915_NUM_ENGINES ; id++) {
|
||||
if (requests[id])
|
||||
i915_request_put(requests[id]);
|
||||
|
||||
if (is_ctx_pinned[id])
|
||||
intel_context_unpin(s->shadow[id]);
|
||||
}
|
||||
gvt->is_reg_whitelist_updated = true;
|
||||
}
|
||||
|
||||
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
|
||||
|
@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
|
||||
static void clean_execlist(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct intel_engine_cs *engine;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
|
||||
kfree(s->ring_scan_buffer[engine->id]);
|
||||
s->ring_scan_buffer[engine->id] = NULL;
|
||||
s->ring_scan_buffer_size[engine->id] = 0;
|
||||
@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
|
||||
static void reset_execlist(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
|
||||
init_vgpu_execlist(vgpu, engine);
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
if (!wa_ctx->indirect_ctx.obj)
|
||||
return;
|
||||
|
||||
i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
|
||||
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
wa_ctx->indirect_ctx.obj = NULL;
|
||||
@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
struct intel_gvt *gvt = workload->vgpu->gvt;
|
||||
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
|
||||
struct intel_vgpu_shadow_bb *bb;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(bb, &workload->shadow_bb, list) {
|
||||
@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
* directly
|
||||
*/
|
||||
if (!bb->ppgtt) {
|
||||
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
|
||||
NULL, 0, 0, 0);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
i915_gem_object_lock(bb->obj, &ww);
|
||||
|
||||
bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
|
||||
NULL, 0, 0, 0);
|
||||
if (IS_ERR(bb->vma)) {
|
||||
ret = PTR_ERR(bb->vma);
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
i915_gem_object_unlock(bb->obj);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
release_shadow_batch_buffer(workload);
|
||||
return ret;
|
||||
}
|
||||
@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
unsigned char *per_ctx_va =
|
||||
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
|
||||
wa_ctx->indirect_ctx.size;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
int ret;
|
||||
|
||||
if (wa_ctx->indirect_ctx.size == 0)
|
||||
return 0;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
|
||||
0, CACHELINE_BYTES, 0);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
i915_gem_ww_ctx_init(&ww, false);
|
||||
retry:
|
||||
i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
|
||||
|
||||
vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
|
||||
0, CACHELINE_BYTES, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
if (ret == -EDEADLK) {
|
||||
ret = i915_gem_ww_ctx_backoff(&ww);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
|
||||
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
||||
if (bb->obj) {
|
||||
i915_gem_object_lock(bb->obj, NULL);
|
||||
if (bb->va && !IS_ERR(bb->va))
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
|
||||
if (bb->vma && !IS_ERR(bb->vma))
|
||||
i915_vma_unpin(bb->vma);
|
||||
|
||||
i915_gem_object_unlock(bb->obj);
|
||||
i915_gem_object_put(bb->obj);
|
||||
}
|
||||
list_del(&bb->list);
|
||||
@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_workload *pos, *n;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
/* free the unsubmited workloads in the queues. */
|
||||
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
|
||||
list_for_each_entry_safe(pos, n,
|
||||
&s->workload_q_head[engine->id], list) {
|
||||
list_del_init(&pos->list);
|
||||
|
Loading…
Reference in New Issue
Block a user