drm/virtio: Conditionally allocate virtio_gpu_fence
commit 70d1ace56db6c79d39dbe9c0d5244452b67e2fde upstream. We don't want to create a fence for every command submission. It's only necessary when userspace provides a waitable token for submission. This could be: 1) bo_handles, to be used with VIRTGPU_WAIT 2) out_fence_fd, to be used with dma_fence apis 3) a ring_idx provided with VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK + DRM event API 4) syncobjs in the future The use case for just submitting a command to the host, and expecting no response. For example, gfxstream has GFXSTREAM_CONTEXT_PING that just wakes up the host side worker threads. There's also CROSS_DOMAIN_CMD_SEND which just sends data to the Wayland server. This prevents the need to signal the automatically created virtio_gpu_fence. In addition, VIRTGPU_EXECBUF_RING_IDX is checked when creating a DRM event object. VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is already defined in terms of per-context rings. It was theoretically possible to create a DRM event on the global timeline (ring_idx == 0), if the context enabled DRM event polling. However, that wouldn't work and userspace (Sommelier). Explicitly disallow it for clarity. Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org> Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # edited coding style Link: https://patchwork.freedesktop.org/patch/msgid/20230707213124.494-1-gurchetansingh@chromium.org Signed-off-by: Alyssa Ross <hi@alyssa.is> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3e8b9b06de
commit
98d3e7c5f7
@ -43,13 +43,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
|
||||
struct virtio_gpu_fence *fence,
|
||||
uint32_t ring_idx)
|
||||
{
|
||||
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
||||
struct virtio_gpu_fence_event *e = NULL;
|
||||
int ret;
|
||||
|
||||
if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
|
||||
return 0;
|
||||
|
||||
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
@ -121,6 +117,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
||||
struct virtio_gpu_fence *out_fence;
|
||||
bool drm_fence_event;
|
||||
int ret;
|
||||
uint32_t *bo_handles = NULL;
|
||||
void __user *user_bo_handles = NULL;
|
||||
@ -216,15 +213,24 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
goto out_memdup;
|
||||
}
|
||||
|
||||
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
|
||||
if(!out_fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unresv;
|
||||
}
|
||||
if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
|
||||
(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
|
||||
drm_fence_event = true;
|
||||
else
|
||||
drm_fence_event = false;
|
||||
|
||||
ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
|
||||
if (ret)
|
||||
goto out_unresv;
|
||||
if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
|
||||
exbuf->num_bo_handles ||
|
||||
drm_fence_event)
|
||||
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
|
||||
else
|
||||
out_fence = NULL;
|
||||
|
||||
if (drm_fence_event) {
|
||||
ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
|
||||
if (ret)
|
||||
goto out_unresv;
|
||||
}
|
||||
|
||||
if (out_fence_fd >= 0) {
|
||||
sync_file = sync_file_create(&out_fence->f);
|
||||
|
Loading…
Reference in New Issue
Block a user