Merge 08994edbb8 ("Merge tag 'drm-misc-intel-oob-hotplug-v1' of git://git.kernel.org/pub/scm/linux/kernel/git/hansg/linux into drm-misc-next") into android-mainline

Steps on the way to 5.16-rc1

Resolves conflicts in:
	drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
	drivers/gpu/drm/drm_panel_orientation_quirks.c
	drivers/gpu/drm/i915/gem/i915_gem_ttm.c

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iaae62240e81569925b6bb6c11f08c07abd2ecd52
This commit is contained in:
Greg Kroah-Hartman 2021-11-11 14:24:01 +01:00
commit 6904105a3f
39 changed files with 662 additions and 604 deletions

View File

@ -0,0 +1,98 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,s6d27a1.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S6D27A1 display panel
description: The S6D27A1 is a 480x800 DPI display panel from Samsung Mobile
Displays (SMD). The panel must obey the rules for a SPI slave device
as specified in spi/spi-controller.yaml
maintainers:
- Markuss Broks <markuss.broks@gmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,s6d27a1
reg: true
interrupts:
description: provides an optional ESD (electrostatic discharge)
interrupt that signals abnormalities in the display hardware.
This can also be raised for other reasons like erroneous
configuration.
maxItems: 1
reset-gpios: true
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
vccio-supply:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
backlight: true
spi-cpha: true
spi-cpol: true
spi-max-frequency:
maximum: 1200000
port: true
required:
- compatible
- reg
- vci-supply
- vccio-supply
- spi-cpha
- spi-cpol
- port
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
spi {
compatible = "spi-gpio";
sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,s6d27a1";
spi-max-frequency = <1200000>;
spi-cpha;
spi-cpol;
reg = <0>;
vci-supply = <&lcd_3v0_reg>;
vccio-supply = <&lcd_1v8_reg>;
reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio>;
interrupts = <5 IRQ_TYPE_EDGE_RISING>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};
};
...

View File

@ -6046,6 +6046,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
DRM DRIVER FOR SAMSUNG S6D27A1 PANELS
M: Markuss Broks <markuss.broks@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
F: driver/gpu/drm/panel/panel-samsung-s6d27a1.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o seqno-fence.o
dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o

View File

@ -82,6 +82,7 @@ static void dma_buf_release(struct dentry *dentry)
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
WARN_ON(!list_empty(&dmabuf->attachments));
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);

View File

@ -1,71 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* seqno-fence, using a dma-buf to synchronize fencing
*
* Copyright (C) 2012 Texas Instruments
* Copyright (C) 2012-2014 Canonical Ltd
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/seqno-fence.h>
static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
dma_buf_put(f->sync_buf);
if (f->ops->release)
f->ops->release(fence);
else
dma_fence_free(&f->base);
}
static signed long seqno_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
.signaled = seqno_signaled,
.wait = seqno_wait,
.release = seqno_release,
};
EXPORT_SYMBOL(seqno_fence_ops);

View File

@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
config DRM_GEM_SHMEM_HELPER
bool
depends on DRM
depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
depends on DRM
depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
help
Choose this option to get a virtual graphics memory manager,
as used by Mesa's software renderer for enhanced performance.
@ -279,7 +280,7 @@ config DRM_VGEM
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
depends on DRM && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32

View File

@ -1066,8 +1066,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
amdgpu_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@ -1148,6 +1146,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
amdgpu_ttm_backend_unbind(bdev, ttm);
if (gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);

View File

@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
return !malidp_hw_format_is_afbc_only(format);
}
if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}

View File

@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
ret = sp_tx_aux_rd(ctx, 0xf1);
if (ret) {
sp_tx_rst_aux(ctx);
ret = sp_tx_rst_aux(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
} else {
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
return 0;
return ret;
}
static int segments_edid_read(struct anx7625_data *ctx,
@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
return 0;
return ret;
}
static int sp_tx_edid_read(struct anx7625_data *ctx,
@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
segments_edid_read(ctx, count / 2,
pblock_buf, offset);
ret = segments_edid_read(ctx, count / 2,
pblock_buf, offset);
if (ret < 0)
return ret;
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
segments_edid_read(ctx, count / 2,
pblock_buf, offset);
ret = segments_edid_read(ctx, count / 2,
pblock_buf, offset);
if (ret < 0)
return ret;
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
}
/* Reset aux channel */
sp_tx_rst_aux(ctx);
ret = sp_tx_rst_aux(ctx);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
return ret;
}
return (blocks_num + 1);
}

View File

@ -10,6 +10,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@ -162,6 +166,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
return PTR_ERR(pages);
}
/*
* TODO: Allocating WC pages which are correctly flushed is only
* supported on x86. Ideal solution would be a GFP_WC flag, which also
* ttm_pool.c could use.
*/
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
#endif
shmem->pages = pages;
return 0;
@ -203,6 +217,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->pages_use_count > 0)
return;
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
@ -542,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
} else {
page = shmem->pages[page_offset];
ret = vmf_insert_page(vma, vmf->address, page);
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
mutex_unlock(&shmem->pages_lock);
@ -612,7 +631,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return ret;
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

View File

@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}

View File

@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
* @flags: ioctl permission flags.
* @file_priv: Pointer to struct drm_file identifying the caller.
*
* Checks whether the caller is allowed to run an ioctl with the
* indicated permissions.
*
* Returns:
* Zero if allowed, -EACCES otherwise.
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \

View File

@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
/*
* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable
* console.
*/
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
!IS_ENABLED(CONFIG_EXPERT))
request_module_nowait("fbcon");
return drm_dp_aux_dev_init();
}

View File

@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.width = 1280,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@ -140,6 +146,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi Hi10 Pro (CWI529) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@ -205,6 +217,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
}, { /* KD Kurio Smart C15200 2-in-1 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@ -223,10 +242,15 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330 */
}, { /* Lenovo Ideapad D330-10IGM (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
@ -237,6 +261,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
},
.driver_data = (void *)&lcd1280x1920_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

View File

@ -2,7 +2,7 @@
config DRM_GUD
tristate "GUD USB Display"
depends on DRM && USB
depends on DRM && USB && MMU
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER

View File

@ -214,7 +214,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(i915_tt);
}

View File

@ -1278,6 +1278,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
if (slave)
return;
nouveau_ttm_tt_unbind(bdev, ttm);
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
@ -1291,8 +1293,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_unbind(ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}

View File

@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
nouveau_sgdma_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}

View File

@ -393,6 +393,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
depends on DRM_MIPI_DSI
select VIDEOMODE_HELPERS
config DRM_PANEL_SAMSUNG_S6D27A1
tristate "Samsung S6D27A1 DPI panel driver"
depends on OF && SPI && GPIOLIB
select DRM_MIPI_DBI
help
Say Y here if you want to enable support for the Samsung
S6D27A1 DPI 480x800 panel.
This panel can be found in Samsung Galaxy Ace 2
GT-I8160 mobile phone.
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF

View File

@ -39,6 +39,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o

View File

@ -0,0 +1,320 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
* Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
*/
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
#define S6D27A1_DISPCTL 0xF2 /* Display Control */
#define S6D27A1_MANPWR 0xF3 /* Manual Control */
#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
#define S6D27A1_SRCCTL 0xF6 /* Source Control */
#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
static const u8 s6d27a1_dbi_read_commands[] = {
S6D27A1_READID1,
S6D27A1_READID2,
S6D27A1_READID3,
0, /* sentinel */
};
struct s6d27a1 {
struct device *dev;
struct mipi_dbi dbi;
struct drm_panel panel;
struct gpio_desc *reset;
struct regulator_bulk_data regulators[2];
};
static const struct drm_display_mode s6d27a1_480_800_mode = {
/*
* The vendor driver states that the S6D27A1 panel
* has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
*/
.clock = 24960,
.hdisplay = 480,
.hsync_start = 480 + 63,
.hsync_end = 480 + 63 + 2,
.htotal = 480 + 63 + 2 + 63,
.vdisplay = 800,
.vsync_start = 800 + 11,
.vsync_end = 800 + 11 + 2,
.vtotal = 800 + 11 + 2 + 10,
.width_mm = 50,
.height_mm = 84,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
{
return container_of(panel, struct s6d27a1, panel);
}
static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
{
struct mipi_dbi *dbi = &ctx->dbi;
u8 id1, id2, id3;
int ret;
ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 1\n");
return;
}
ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 2\n");
return;
}
ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 3\n");
return;
}
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
}
static int s6d27a1_power_on(struct s6d27a1 *ctx)
{
struct mipi_dbi *dbi = &ctx->dbi;
int ret;
/* Power up */
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
ctx->regulators);
if (ret) {
dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
msleep(20);
/* Assert reset >=1 ms */
gpiod_set_value_cansleep(ctx->reset, 1);
usleep_range(1000, 5000);
/* De-assert reset */
gpiod_set_value_cansleep(ctx->reset, 0);
/* Wait >= 10 ms */
msleep(20);
/*
* Exit sleep mode and initialize display - some hammering is
* necessary.
*/
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
/* Magic to unlock level 2 control of the display */
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
/* Configure resolution to 480RGBx800 */
mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
0x44, 0x05, 0x88, 0x4B, 0x50);
mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
0x01, 0x09, 0x0D, 0x0A, 0x0E,
0x0B, 0x0F, 0x0C, 0x10, 0x01,
0x11, 0x12, 0x13, 0x14, 0x05,
0x06, 0x07, 0x08, 0x01, 0x09,
0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
0x0C, 0x10, 0x01, 0x11, 0x12,
0x13, 0x14);
/* lock the level 2 control */
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
s6d27a1_read_mtp_id(ctx);
return 0;
}
static int s6d27a1_power_off(struct s6d27a1 *ctx)
{
/* Go into RESET and disable regulators */
gpiod_set_value_cansleep(ctx->reset, 1);
return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
ctx->regulators);
}
static int s6d27a1_unprepare(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(120);
return s6d27a1_power_off(to_s6d27a1(panel));
}
static int s6d27a1_disable(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(25);
return 0;
}
static int s6d27a1_prepare(struct drm_panel *panel)
{
return s6d27a1_power_on(to_s6d27a1(panel));
}
static int s6d27a1_enable(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static int s6d27a1_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
if (!mode) {
dev_err(ctx->dev, "failed to add mode\n");
return -ENOMEM;
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
connector->display_info.bus_flags =
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6d27a1_drm_funcs = {
.disable = s6d27a1_disable,
.unprepare = s6d27a1_unprepare,
.prepare = s6d27a1_prepare,
.enable = s6d27a1_enable,
.get_modes = s6d27a1_get_modes,
};
static int s6d27a1_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct s6d27a1 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
/*
* VCI is the analog voltage supply
* VCCIO is the digital I/O voltage supply
*/
ctx->regulators[0].supply = "vci";
ctx->regulators[1].supply = "vccio";
ret = devm_regulator_bulk_get(dev,
ARRAY_SIZE(ctx->regulators),
ctx->regulators);
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset)) {
ret = PTR_ERR(ctx->reset);
return dev_err_probe(dev, ret, "no RESET GPIO\n");
}
ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
spi_set_drvdata(spi, ctx);
drm_panel_add(&ctx->panel);
return 0;
}
static int s6d27a1_remove(struct spi_device *spi)
{
struct s6d27a1 *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
return 0;
}
static const struct of_device_id s6d27a1_match[] = {
{ .compatible = "samsung,s6d27a1", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, s6d27a1_match);
static struct spi_driver s6d27a1_driver = {
.probe = s6d27a1_probe,
.remove = s6d27a1_remove,
.driver = {
.name = "s6d27a1-panel",
.of_match_table = s6d27a1_match,
},
};
module_spi_driver(s6d27a1_driver);
MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
MODULE_LICENSE("GPL v2");

View File

@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(ttm);
}

View File

@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
dma_unmap_page(&pdev->dev, entry->busaddr[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;

View File

@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
{
struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@ -574,6 +571,8 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) {
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct radeon_device *rdev = radeon_get_rdev(bdev);
if (rdev->flags & RADEON_IS_AGP) {
ttm_agp_unbind(ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}

View File

@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else

View File

@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
return true;
/* check for the sector layout bit */
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;

View File

@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB
depends on DRM && USB && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
depends on DRM
depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help

View File

@ -1224,6 +1224,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}

View File

@ -122,17 +122,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unpopulate(bdev, ttm);
if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
}
EXPORT_SYMBOL(ttm_tt_destroy_common);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@ -167,6 +156,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
if (ttm->pages)
kvfree(ttm->pages);
else

View File

@ -4,6 +4,7 @@ config DRM_UDL
depends on DRM
depends on USB
depends on USB_ARCH_HAS_HCD
depends on MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help

View File

@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
@ -50,87 +51,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static const struct drm_gem_object_funcs vgem_gem_object_funcs;
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
kvfree(vgem_obj->pages);
mutex_destroy(&vgem_obj->pages_lock);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
drm_gem_object_release(obj);
kfree(vgem_obj);
}
static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
if (page_offset >= num_pages)
return VM_FAULT_SIGBUS;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset];
ret = 0;
}
mutex_unlock(&obj->pages_lock);
if (ret) {
struct page *page;
page = shmem_read_mapping_page(
file_inode(obj->base.filp)->i_mapping,
page_offset);
if (!IS_ERR(page)) {
vmf->page = page;
ret = 0;
} else switch (PTR_ERR(page)) {
case -ENOSPC:
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
case -EBUSY:
ret = VM_FAULT_RETRY;
break;
case -EFAULT:
case -EINVAL:
ret = VM_FAULT_SIGBUS;
break;
default:
WARN_ON(PTR_ERR(page));
ret = VM_FAULT_SIGBUS;
break;
}
}
return ret;
}
static const struct vm_operations_struct vgem_gem_vm_ops = {
.fault = vgem_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}
static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
obj->base.funcs = &vgem_gem_object_funcs;
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret) {
kfree(obj);
return ERR_PTR(ret);
}
mutex_init(&obj->pages_lock);
return obj;
}
static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
{
drm_gem_object_release(&obj->base);
kfree(obj);
}
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
struct drm_file *file,
unsigned int *handle,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
int ret;
obj = __vgem_gem_create(dev, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
if (ret) {
drm_gem_object_put(&obj->base);
return ERR_PTR(ret);
}
return &obj->base;
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
u64 pitch, size;
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
args->size = gem_object->size;
args->pitch = pitch;
drm_gem_object_put(gem_object);
DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
unsigned long flags = vma->vm_flags;
int ret;
struct drm_gem_shmem_object *obj;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special.
/*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
* coherent memory or dma-buf sharing just wont work.
*/
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
obj->map_wc = true;
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.release = drm_release,
};
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
{
mutex_lock(&bo->pages_lock);
if (bo->pages_pin_count++ == 0) {
struct page **pages;
pages = drm_gem_get_pages(&bo->base);
if (IS_ERR(pages)) {
bo->pages_pin_count--;
mutex_unlock(&bo->pages_lock);
return pages;
}
bo->pages = pages;
}
mutex_unlock(&bo->pages_lock);
return bo->pages;
}
static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
{
mutex_lock(&bo->pages_lock);
if (--bo->pages_pin_count == 0) {
drm_gem_put_pages(&bo->base, bo->pages, true, true);
bo->pages = NULL;
}
mutex_unlock(&bo->pages_lock);
}
static int vgem_prime_pin(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
/* Flush the object from the CPU cache so that importers can rely
* on coherent indirect access via the exported dma-address.
*/
drm_clflush_pages(pages, n_pages);
return 0;
}
static void vgem_prime_unpin(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
vgem_unpin_pages(bo);
}
static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
}
static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
struct drm_vgem_gem_object *obj;
int npages;
obj = __vgem_gem_create(dev, attach->dmabuf->size);
if (IS_ERR(obj))
return ERR_CAST(obj);
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
obj->table = sg;
obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!obj->pages) {
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
}
obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
void *vaddr;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
vunmap(map->vaddr);
vgem_unpin_pages(bo);
}
static int vgem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
if (obj->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!obj->filp)
return -ENODEV;
ret = call_mmap(obj->filp, vma);
if (ret)
return ret;
vma_set_file(vma, obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
return 0;
}
static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
.free = vgem_gem_free_object,
.pin = vgem_prime_pin,
.unpin = vgem_prime_unpin,
.get_sg_table = vgem_prime_get_sg_table,
.vmap = vgem_prime_vmap,
.vunmap = vgem_prime_vunmap,
.vm_ops = &vgem_gem_vm_ops,
};
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
.dumb_create = vgem_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = vgem_prime_import,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_mmap = vgem_prime_mmap,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,

View File

@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
struct device *dev,
enum dma_data_direction dir);
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif

View File

@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
return 0;
}
static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
if (virtio_gpu_is_vram(bo))
return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
return drm_gem_map_dma_buf(attach, dir);
}
static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
if (virtio_gpu_is_vram(bo)) {
virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
return;
}
drm_gem_unmap_dma_buf(attach, sgt, dir);
}
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.map_dma_buf = virtgpu_gem_map_dma_buf,
.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
#include <linux/dma-mapping.h>
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return ret;
}
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
struct device *dev,
enum dma_data_direction dir)
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
struct sg_table *sgt;
dma_addr_t addr;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
// Virtio devices can access the dma-buf via its UUID. Return a stub
// sg_table so the dma-buf API still works.
if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
ret = -EIO;
goto out;
}
return sgt;
}
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (ret)
goto out;
addr = dma_map_resource(dev, vram->vram_node.start,
vram->vram_node.size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
ret = dma_mapping_error(dev, addr);
if (ret)
goto out;
sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
sg_dma_address(sgt->sgl) = addr;
sg_dma_len(sgt->sgl) = vram->vram_node.size;
return sgt;
out:
sg_free_table(sgt);
kfree(sgt);
return ERR_PTR(ret);
}
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
if (sgt->nents) {
dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
sg_dma_len(sgt->sgl), dir,
DMA_ATTR_SKIP_CPU_SYNC);
}
sg_free_table(sgt);
kfree(sgt);
}
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,

View File

@ -522,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
vmw_ttm_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ttm_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
@ -574,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
dma_ttm);
unsigned int i;
vmw_ttm_unbind(bdev, ttm);
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;

View File

@ -167,7 +167,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT

View File

@ -135,13 +135,6 @@ void ttm_tt_fini(struct ttm_tt *ttm);
*/
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_destroy_common:
*
* Called from driver to destroy common path.
*/
void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
*

View File

@ -1,109 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* seqno-fence, using a dma-buf to synchronize fencing
*
* Copyright (C) 2012 Texas Instruments
* Copyright (C) 2012 Canonical Ltd
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
*/
#ifndef __LINUX_SEQNO_FENCE_H
#define __LINUX_SEQNO_FENCE_H
#include <linux/dma-fence.h>
#include <linux/dma-buf.h>
enum seqno_fence_condition {
SEQNO_FENCE_WAIT_GEQUAL,
SEQNO_FENCE_WAIT_NONZERO
};
struct seqno_fence {
struct dma_fence base;
const struct dma_fence_ops *ops;
struct dma_buf *sync_buf;
uint32_t seqno_ofs;
enum seqno_fence_condition condition;
};
extern const struct dma_fence_ops seqno_fence_ops;
/**
* to_seqno_fence - cast a fence to a seqno_fence
* @fence: fence to cast to a seqno_fence
*
* Returns NULL if the fence is not a seqno_fence,
* or the seqno_fence otherwise.
*/
static inline struct seqno_fence *
to_seqno_fence(struct dma_fence *fence)
{
if (fence->ops != &seqno_fence_ops)
return NULL;
return container_of(fence, struct seqno_fence, base);
}
/**
* seqno_fence_init - initialize a seqno fence
* @fence: seqno_fence to initialize
* @lock: pointer to spinlock to use for fence
* @sync_buf: buffer containing the memory location to signal on
* @context: the execution context this fence is a part of
* @seqno_ofs: the offset within @sync_buf
* @seqno: the sequence # to signal on
* @cond: fence wait condition
* @ops: the fence_ops for operations on this seqno fence
*
* This function initializes a struct seqno_fence with passed parameters,
* and takes a reference on sync_buf which is released on fence destruction.
*
* A seqno_fence is a dma_fence which can complete in software when
* enable_signaling is called, but it also completes when
* (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
*
* The seqno_fence will take a refcount on the sync_buf until it's
* destroyed, but actual lifetime of sync_buf may be longer if one of the
* callers take a reference to it.
*
* Certain hardware have instructions to insert this type of wait condition
* in the command stream, so no intervention from software would be needed.
* This type of fence can be destroyed before completed, however a reference
* on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
* dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
* device's vm can be expensive.
*
* It is recommended for creators of seqno_fence to call dma_fence_signal()
* before destruction. This will prevent possible issues from wraparound at
* time of issue vs time of check, since users can check dma_fence_is_signaled()
* before submitting instructions for the hardware to wait on the fence.
* However, when ops.enable_signaling is not called, it doesn't have to be
* done as soon as possible, just before there's any real danger of seqno
* wraparound.
*/
static inline void
seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
struct dma_buf *sync_buf, uint32_t context,
uint32_t seqno_ofs, uint32_t seqno,
enum seqno_fence_condition cond,
const struct dma_fence_ops *ops)
{
BUG_ON(!fence || !sync_buf || !ops);
BUG_ON(!ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
/*
* ops is used in dma_fence_init for get_driver_name, so needs to be
* initialized first
*/
fence->ops = ops;
dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
get_dma_buf(sync_buf);
fence->sync_buf = sync_buf;
fence->seqno_ofs = seqno_ofs;
fence->condition = cond;
}
#endif /* __LINUX_SEQNO_FENCE_H */

View File

@ -373,6 +373,12 @@ extern "C" {
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#define fourcc_mod_get_vendor(modifier) \
(((modifier) >> 56) & 0xff)
#define fourcc_mod_is_vendor(modifier, vendor) \
(fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))