Snap for 11870762 from b43f6c6e6e to android12-5.10-keystone-qcom-release

Change-Id: If129c652fe35ce5b75223f39ac1f2629adfd5cbd
Signed-off-by: Coastguard Worker <android-build-coastguard-worker@google.com>
This commit is contained in:
Android Build Coastguard Worker 2024-05-23 10:00:24 +00:00
commit 9759829da1
391 changed files with 5422 additions and 3419 deletions

View File

@ -14,9 +14,11 @@ allOf:
properties: properties:
compatible: compatible:
enum: items:
- fsl,imx23-ocotp - enum:
- fsl,imx28-ocotp - fsl,imx23-ocotp
- fsl,imx28-ocotp
- const: fsl,ocotp
"#address-cells": "#address-cells":
const: 1 const: 1
@ -40,7 +42,7 @@ additionalProperties: false
examples: examples:
- | - |
ocotp: efuse@8002c000 { ocotp: efuse@8002c000 {
compatible = "fsl,imx28-ocotp"; compatible = "fsl,imx28-ocotp", "fsl,ocotp";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
reg = <0x8002c000 0x2000>; reg = <0x8002c000 0x2000>;

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 205 SUBLEVEL = 209
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

File diff suppressed because it is too large Load Diff

View File

@ -1086,6 +1086,7 @@
devfreq_suspend_device devfreq_suspend_device
devm_clk_register devm_clk_register
devm_devfreq_add_device devm_devfreq_add_device
devm_pm_runtime_enable
devm_regulator_get_exclusive devm_regulator_get_exclusive
dev_pm_opp_find_freq_floor dev_pm_opp_find_freq_floor
dev_pm_opp_get_freq dev_pm_opp_get_freq
@ -1177,7 +1178,6 @@
drm_dp_link_rate_to_bw_code drm_dp_link_rate_to_bw_code
drm_dp_link_train_channel_eq_delay drm_dp_link_train_channel_eq_delay
drm_dp_link_train_clock_recovery_delay drm_dp_link_train_clock_recovery_delay
drm_edid_block_valid
drm_encoder_cleanup drm_encoder_cleanup
drm_encoder_init drm_encoder_init
drm_flip_work_cleanup drm_flip_work_cleanup
@ -1812,6 +1812,7 @@
# preserved by --additions-only # preserved by --additions-only
drm_connector_init_with_ddc drm_connector_init_with_ddc
drm_edid_block_valid
gpiod_direction_input gpiod_direction_input
idr_alloc_u32 idr_alloc_u32
of_clk_get_by_name of_clk_get_by_name

View File

@ -2607,6 +2607,7 @@
__traceiter_android_vh_show_suspend_epoch_val __traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_subpage_dma_contig_alloc __traceiter_android_vh_subpage_dma_contig_alloc
__traceiter_android_vh_timer_calc_index __traceiter_android_vh_timer_calc_index
__traceiter_android_vh_try_fixup_sea
__traceiter_android_vh_ufs_check_int_errors __traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_clock_scaling __traceiter_android_vh_ufs_clock_scaling
__traceiter_android_vh_ufs_compl_command __traceiter_android_vh_ufs_compl_command
@ -2735,6 +2736,7 @@
__tracepoint_android_vh_show_suspend_epoch_val __tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_subpage_dma_contig_alloc __tracepoint_android_vh_subpage_dma_contig_alloc
__tracepoint_android_vh_timer_calc_index __tracepoint_android_vh_timer_calc_index
__tracepoint_android_vh_try_fixup_sea
__tracepoint_android_vh_ufs_check_int_errors __tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_clock_scaling __tracepoint_android_vh_ufs_clock_scaling
__tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_compl_command

View File

@ -61,7 +61,7 @@ struct rt_sigframe {
unsigned int sigret_magic; unsigned int sigret_magic;
}; };
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{ {
int err = 0; int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT #ifndef CONFIG_ISA_ARCOMPACT
@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else #else
v2abi.r58 = v2abi.r59 = 0; v2abi.r58 = v2abi.r59 = 0;
#endif #endif
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi)); err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif #endif
return err; return err;
} }
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{ {
int err = 0; int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT #ifndef CONFIG_ISA_ARCOMPACT

View File

@ -347,6 +347,7 @@ usb: target-module@47400000 {
<SYSC_IDLE_NO>, <SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>, <SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>; <SYSC_IDLE_SMART_WKUP>;
ti,sysc-delay-us = <2>;
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>; clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
clock-names = "fck"; clock-names = "fck";
#address-cells = <1>; #address-cells = <1>;

View File

@ -760,7 +760,7 @@ pwrkey@1c {
xoadc: xoadc@197 { xoadc: xoadc@197 {
compatible = "qcom,pm8921-adc"; compatible = "qcom,pm8921-adc";
reg = <197>; reg = <0x197>;
interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>; interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
#address-cells = <2>; #address-cells = <2>;
#size-cells = <0>; #size-cells = <0>;

View File

@ -3,6 +3,7 @@
menuconfig ARCH_DAVINCI menuconfig ARCH_DAVINCI
bool "TI DaVinci" bool "TI DaVinci"
depends on ARCH_MULTI_V5 depends on ARCH_MULTI_V5
select CPU_ARM926T
select DAVINCI_TIMER select DAVINCI_TIMER
select ZONE_DMA select ZONE_DMA
select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS if PM

View File

@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name; soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family(); soc_dev_attr->family = omap_get_family();
if (!soc_dev_attr->family) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->revision = soc_rev; soc_dev_attr->revision = soc_rev;
soc_dev_attr->custom_attr_group = omap_soc_groups[0]; soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr); soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) { if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->family);
kfree(soc_dev_attr); kfree(soc_dev_attr);
return; return;
} }

View File

@ -804,16 +804,16 @@ static int __init sunxi_mc_smp_init(void)
for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) { for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
ret = of_property_match_string(node, "enable-method", ret = of_property_match_string(node, "enable-method",
sunxi_mc_smp_data[i].enable_method); sunxi_mc_smp_data[i].enable_method);
if (!ret) if (ret >= 0)
break; break;
} }
is_a83t = sunxi_mc_smp_data[i].is_a83t;
of_node_put(node); of_node_put(node);
if (ret) if (ret < 0)
return -ENODEV; return -ENODEV;
is_a83t = sunxi_mc_smp_data[i].is_a83t;
if (!sunxi_mc_smp_cpu_table_init()) if (!sunxi_mc_smp_cpu_table_init())
return -EINVAL; return -EINVAL;

View File

@ -129,7 +129,7 @@ rtc@6f {
compatible = "microchip,mcp7940x"; compatible = "microchip,mcp7940x";
reg = <0x6f>; reg = <0x6f>;
interrupt-parent = <&gpiosb>; interrupt-parent = <&gpiosb>;
interrupts = <5 0>; /* GPIO2_5 */ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
}; };
}; };

View File

@ -38,8 +38,8 @@ leds {
user4 { user4 {
label = "green:user4"; label = "green:user4";
gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>; gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "panic-indicator";
default-state = "off"; default-state = "off";
panic-indicator;
}; };
wlan { wlan {

View File

@ -55,8 +55,8 @@ leds {
user4 { user4 {
label = "green:user4"; label = "green:user4";
gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>; gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "panic-indicator";
default-state = "off"; default-state = "off";
panic-indicator;
}; };
wlan { wlan {

View File

@ -865,7 +865,7 @@ dss: dss@4a00000 {
assigned-clocks = <&k3_clks 67 2>; assigned-clocks = <&k3_clks 67 2>;
assigned-clock-parents = <&k3_clks 67 5>; assigned-clock-parents = <&k3_clks 67 5>;
interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>; interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled"; status = "disabled";

View File

@ -478,6 +478,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_OTG=y CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_HCD_PLATFORM=y

View File

@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
irq = __vgic_its_check_cache(dist, db, devid, eventid); irq = __vgic_its_check_cache(dist, db, devid, eventid);
if (irq)
vgic_get_irq_kref(irq);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq; return irq;
@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags); vgic_queue_irq_unlock(kvm, irq, flags);
vgic_put_irq(kvm, irq);
return 0; return 0;
} }

View File

@ -356,19 +356,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/* /*
* pending_latch is set irrespective of irq type * pending_latch is set irrespective of irq type
* (level or edge) to avoid dependency that VM should * (level or edge) to avoid dependency that VM should
* restore irq config before pending info. * restore irq config before pending info.
*/ */
irq->pending_latch = true; irq->pending_latch = test_bit(i, &val);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
irq->pending_latch);
irq->pending_latch = false; irq->pending_latch = false;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
if (irq->pending_latch)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }

View File

@ -728,6 +728,11 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
{ {
const struct fault_info *inf; const struct fault_info *inf;
unsigned long siaddr; unsigned long siaddr;
bool can_fixup = false;
trace_android_vh_try_fixup_sea(far, esr, regs, &can_fixup);
if (can_fixup && fixup_exception(regs))
return 0;
inf = esr_to_fault_info(esr); inf = esr_to_fault_info(esr);

View File

@ -468,7 +468,6 @@ config MACH_LOONGSON2EF
config MACH_LOONGSON64 config MACH_LOONGSON64
bool "Loongson 64-bit family of machines" bool "Loongson 64-bit family of machines"
select ARCH_DMA_DEFAULT_COHERENT
select ARCH_SPARSEMEM_ENABLE select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
@ -1380,7 +1379,6 @@ config CPU_LOONGSON64
select CPU_SUPPORTS_MSA select CPU_SUPPORTS_MSA
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select DMA_NONCOHERENT
select WEAK_ORDERING select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC select WEAK_REORDERING_BEYOND_LLSC
select MIPS_ASID_BITS_VARIABLE select MIPS_ASID_BITS_VARIABLE

View File

@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
i2c_register_board_info(0, db1200_i2c_devs, i2c_register_board_info(0, db1200_i2c_devs,
ARRAY_SIZE(db1200_i2c_devs)); ARRAY_SIZE(db1200_i2c_devs));
spi_register_board_info(db1200_spi_devs, spi_register_board_info(db1200_spi_devs,
ARRAY_SIZE(db1200_i2c_devs)); ARRAY_SIZE(db1200_spi_devs));
/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI) /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S) * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)

View File

@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
i2c_register_board_info(0, db1550_i2c_devs, i2c_register_board_info(0, db1550_i2c_devs,
ARRAY_SIZE(db1550_i2c_devs)); ARRAY_SIZE(db1550_i2c_devs));
spi_register_board_info(db1550_spi_devs, spi_register_board_info(db1550_spi_devs,
ARRAY_SIZE(db1550_i2c_devs)); ARRAY_SIZE(db1550_spi_devs));
c = clk_get(NULL, "psc0_intclk"); c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) { if (!IS_ERR(c)) {

View File

@ -5,7 +5,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#define dmi_early_remap(x, l) ioremap_cache(x, l) #define dmi_early_remap(x, l) ioremap(x, l)
#define dmi_early_unmap(x, l) iounmap(x) #define dmi_early_unmap(x, l) iounmap(x)
#define dmi_remap(x, l) ioremap_cache(x, l) #define dmi_remap(x, l) ioremap_cache(x, l)
#define dmi_unmap(x) iounmap(x) #define dmi_unmap(x) iounmap(x)

View File

@ -117,8 +117,7 @@ struct irq_source_routing_table {
u64 pci_io_start_addr; u64 pci_io_start_addr;
u64 pci_io_end_addr; u64 pci_io_end_addr;
u64 pci_config_addr; u64 pci_config_addr;
u16 dma_mask_bits; u32 dma_mask_bits;
u16 dma_noncoherent;
} __packed; } __packed;
struct interface_info { struct interface_info {

View File

@ -322,11 +322,11 @@ static void __init bootmem_init(void)
panic("Incorrect memory mapping !!!"); panic("Incorrect memory mapping !!!");
if (max_pfn > PFN_DOWN(HIGHMEM_START)) { if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
max_low_pfn = PFN_DOWN(HIGHMEM_START);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START); highstart_pfn = max_low_pfn;
highend_pfn = max_pfn; highend_pfn = max_pfn;
#else #else
max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn; max_pfn = max_low_pfn;
#endif #endif
} }

View File

@ -13,8 +13,6 @@
* Copyright (C) 2009 Lemote Inc. * Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com * Author: Wu Zhangjin, wuzhangjin@gmail.com
*/ */
#include <linux/dma-map-ops.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
@ -133,14 +131,8 @@ void __init prom_init_env(void)
loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr; loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
if (loongson_sysconf.dma_mask_bits < 32 || if (loongson_sysconf.dma_mask_bits < 32 ||
loongson_sysconf.dma_mask_bits > 64) { loongson_sysconf.dma_mask_bits > 64)
loongson_sysconf.dma_mask_bits = 32; loongson_sysconf.dma_mask_bits = 32;
dma_default_coherent = true;
} else {
dma_default_coherent = !eirq_source->dma_noncoherent;
}
pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm; loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown; loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;

View File

@ -38,14 +38,6 @@ extern char start_virt_trampolines[];
extern char end_virt_trampolines[]; extern char end_virt_trampolines[];
#endif #endif
static inline int in_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
return 1;
return 0;
}
static inline unsigned long kernel_toc_addr(void) static inline unsigned long kernel_toc_addr(void)
{ {
/* Defined by the linker, see vmlinux.lds.S */ /* Defined by the linker, see vmlinux.lds.S */

View File

@ -58,10 +58,10 @@ _GLOBAL(ppc_save_regs)
lbz r0,PACAIRQSOFTMASK(r13) lbz r0,PACAIRQSOFTMASK(r13)
PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3) PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
#endif #endif
/* go up one stack frame for SP */ /* store current SP */
PPC_LL r4,0(r1) PPC_STL r1,1*SZL(r3)
PPC_STL r4,1*SZL(r3)
/* get caller's LR */ /* get caller's LR */
PPC_LL r4,0(r1)
PPC_LL r0,LRSAVE(r4) PPC_LL r0,LRSAVE(r4)
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
mflr r0 mflr r0

View File

@ -38,7 +38,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# so it is only needed for modules, and only for older linkers which # so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs # do not support --save-restore-funcs
ifeq ($(call ld-ifversion, -lt, 225000000, y),y) ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
extra-$(CONFIG_PPC64) += crtsavres.o always-$(CONFIG_PPC64) += crtsavres.o
endif endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \

View File

@ -292,6 +292,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attr_group->attrs = attrs; attr_group->attrs = attrs;
do { do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
if (!ev_val_str)
continue;
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str) if (!dev_str)
continue; continue;
@ -299,6 +301,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attrs[j++] = dev_str; attrs[j++] = dev_str;
if (pmu->events[i].scale) { if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
if (!ev_scale_str)
continue;
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str) if (!dev_str)
continue; continue;
@ -308,6 +312,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
if (pmu->events[i].unit) { if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
if (!ev_unit_str)
continue;
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str) if (!dev_str)
continue; continue;

View File

@ -177,6 +177,7 @@ config ISS4xx
config CURRITUCK config CURRITUCK
bool "IBM Currituck (476fpe) Support" bool "IBM Currituck (476fpe) Support"
depends on PPC_47x depends on PPC_47x
select I2C
select SWIOTLB select SWIOTLB
select 476FPE select 476FPE
select FORCE_PCI select FORCE_PCI

View File

@ -278,6 +278,8 @@ int __init opal_event_init(void)
else else
name = kasprintf(GFP_KERNEL, "opal"); name = kasprintf(GFP_KERNEL, "opal");
if (!name)
continue;
/* Install interrupt handler */ /* Install interrupt handler */
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL); name, NULL);

View File

@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
j = 0; j = 0;
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
if (!pcaps[i].pg.name) {
kfree(pcaps[i].pattrs);
kfree(pcaps[i].pg.attrs);
goto out_pcaps_pattrs;
}
if (has_min) { if (has_min) {
powercap_add_attr(min, "powercap-min", powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]); &pcaps[i].pattrs[j]);

View File

@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->chip = chip; ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip); snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
if (!ent->path.data) {
kfree(ent);
return -ENOMEM;
}
ent->path.size = strlen((char *)ent->path.data); ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root); dir = debugfs_create_dir(ent->name, root);

View File

@ -481,7 +481,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
int lmb_found; int lmb_found;
int rc; int rc;
pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
lmb_found = 0; lmb_found = 0;
for_each_drmem_lmb(lmb) { for_each_drmem_lmb(lmb) {
@ -495,14 +495,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
} }
} }
if (!lmb_found) if (!lmb_found) {
pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
rc = -EINVAL; rc = -EINVAL;
} else if (rc) {
if (rc) pr_debug("Failed to hot-remove memory at %llx\n",
pr_info("Failed to hot-remove memory at %llx\n", lmb->base_addr);
lmb->base_addr); } else {
else pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
pr_info("Memory at %llx was hot-removed\n", lmb->base_addr); }
return rc; return rc;
} }
@ -719,8 +720,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
if (!drmem_lmb_reserved(lmb)) if (!drmem_lmb_reserved(lmb))
continue; continue;
pr_info("Memory at %llx (drc index %x) was hot-added\n", pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index); lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb); drmem_remove_lmb_reservation(lmb);
} }
rc = 0; rc = 0;

View File

@ -76,7 +76,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7) #define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu; struct kernel_fpu;

View File

@ -11,6 +11,8 @@
/* I/O size constraints */ /* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8 #define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128 #define ZPCI_MAX_WRITE_SIZE 128
#define ZPCI_BOUNDARY_SIZE (1 << 12)
#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
/* I/O Map */ /* I/O Map */
#define ZPCI_IOMAP_SHIFT 48 #define ZPCI_IOMAP_SHIFT 48
@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
int zpci_write_block(volatile void __iomem *dst, const void *src, int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len); unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{ {
int count = len > max ? max : len, size = 1; int offset = dst & ZPCI_BOUNDARY_MASK;
int size;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) { size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
dst = dst >> 1; if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
src = src >> 1; return size;
size = size << 1;
} if (size >= 8)
return size; return 8;
return rounddown_pow_of_two(size);
} }
static inline int zpci_memcpy_fromio(void *dst, static inline int zpci_memcpy_fromio(void *dst,
@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0; int size, rc = 0;
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src, size = zpci_get_max_io_size((u64 __force) src,
(u64) dst, n, (u64) dst, n,
ZPCI_MAX_READ_SIZE); ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size); rc = zpci_read_single(dst, src, size);
if (rc) if (rc)
break; break;
@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL; return -EINVAL;
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst, size = zpci_get_max_io_size((u64 __force) dst,
(u64) src, n, (u64) src, n,
ZPCI_MAX_WRITE_SIZE); ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */ if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size); rc = zpci_write_block(dst, src, size);
else else

View File

@ -100,9 +100,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
old_fs = enable_sacf_uaccess(); old_fs = enable_sacf_uaccess();
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst, size = zpci_get_max_io_size((u64 __force) dst,
(u64 __force) src, n, (u64 __force) src, n,
ZPCI_MAX_WRITE_SIZE); ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */ if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status); rc = __pcistb_mio_inuser(dst, src, size, &status);
else else
@ -252,9 +252,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
old_fs = enable_sacf_uaccess(); old_fs = enable_sacf_uaccess();
while (n > 0) { while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src, size = zpci_get_max_io_size((u64 __force) src,
(u64 __force) dst, n, (u64 __force) dst, n,
ZPCI_MAX_READ_SIZE); ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status); rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc) if (rc)
break; break;

View File

@ -429,6 +429,7 @@ CONFIG_HID_WIIMOTE=y
CONFIG_USB_HIDDEV=y CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_HCD_PLATFORM=y

View File

@ -1080,8 +1080,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else { } else {
local_irq_save(flags); local_irq_save(flags);
memcpy(addr, opcode, len); memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core(); sync_core();
local_irq_restore(flags);
/* /*
* Could also do a CLFLUSH here to speed up CPU recovery; but * Could also do a CLFLUSH here to speed up CPU recovery; but

View File

@ -569,7 +569,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{ {
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
int3_emulate_call(regs, regs_get_register(regs, offs)); int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
int3_emulate_jmp(regs, regs_get_register(regs, offs));
} }
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);

View File

@ -24,8 +24,8 @@
static int kvmclock __initdata = 1; static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1; static int kvmclock_vsyscall __initdata = 1;
static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; static int msr_kvm_system_time __ro_after_init;
static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; static int msr_kvm_wall_clock __ro_after_init;
static u64 kvm_sched_clock_offset __ro_after_init; static u64 kvm_sched_clock_offset __ro_after_init;
static int __init parse_no_kvmclock(char *arg) static int __init parse_no_kvmclock(char *arg)
@ -196,7 +196,8 @@ static void kvm_setup_secondary_clock(void)
void kvmclock_disable(void) void kvmclock_disable(void)
{ {
native_write_msr(msr_kvm_system_time, 0, 0); if (msr_kvm_system_time)
native_write_msr(msr_kvm_system_time, 0, 0);
} }
static void __init kvmclock_init_mem(void) static void __init kvmclock_init_mem(void)
@ -292,7 +293,10 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
} else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
} else {
return; return;
} }

View File

@ -6,7 +6,7 @@
*/ */
int num_digits(int val) int num_digits(int val)
{ {
int m = 10; long long m = 10;
int d = 1; int d = 1;
if (val < 0) { if (val < 0) {

View File

@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
# optimization flags. # optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
# When LTO is enabled, llvm emits many text sections, which is not supported
# by kexec. Remove -flto=* flags.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
# When linking purgatory.ro with -r unresolved symbols are not checked, # When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols. # also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib

View File

@ -549,8 +549,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags; struct blk_mq_tags *tags = hctx->sched_tags;
unsigned int shift = tags->bitmap_tags->sb.shift;
dd->async_depth = max(1UL, 3 * q->nr_requests / 4); dd->async_depth = max(1U, 3 * (1U << shift) / 4);
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
} }

View File

@ -7,6 +7,8 @@ FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}" PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}" POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
DTC_FLAGS="${DTC_FLAGS} -@"
MAKE_GOALS="${MAKE_GOALS} MAKE_GOALS="${MAKE_GOALS}
qcom/sdm845-db845c.dtb qcom/sdm845-db845c.dtb
Image.gz Image.gz

View File

@ -1039,9 +1039,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
void af_alg_free_resources(struct af_alg_async_req *areq) void af_alg_free_resources(struct af_alg_async_req *areq)
{ {
struct sock *sk = areq->sk; struct sock *sk = areq->sk;
struct af_alg_ctx *ctx;
af_alg_free_areq_sgls(areq); af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen); sock_kfree_s(sk, areq, areq->areqlen);
ctx = alg_sk(sk)->private;
ctx->inflight = false;
} }
EXPORT_SYMBOL_GPL(af_alg_free_resources); EXPORT_SYMBOL_GPL(af_alg_free_resources);
@ -1105,11 +1109,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen) unsigned int areqlen)
{ {
struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); struct af_alg_ctx *ctx = alg_sk(sk)->private;
struct af_alg_async_req *areq;
/* Only one AIO request can be in flight. */
if (ctx->inflight)
return ERR_PTR(-EBUSY);
areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq)) if (unlikely(!areq))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ctx->inflight = true;
areq->areqlen = areqlen; areq->areqlen = areqlen;
areq->sk = sk; areq->sk = sk;
areq->last_rsgl = NULL; areq->last_rsgl = NULL;

View File

@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_scomp *scomp = *tfm_ctx; struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req); void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch; struct scomp_scratch *scratch;
unsigned int dlen;
int ret; int ret;
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE; req->dlen = SCOMP_SCRATCH_SIZE;
dlen = req->dlen;
scratch = raw_cpu_ptr(&scomp_scratch); scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock); spin_lock(&scratch->lock);
@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
} else if (req->dlen > dlen) {
ret = -ENOSPC;
goto out;
} }
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1); 1);

View File

@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
static u32 err_seq; static u32 err_seq;
estatus = extlog_elog_entry_check(cpu, bank); estatus = extlog_elog_entry_check(cpu, bank);
if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) if (!estatus)
return NOTIFY_DONE; return NOTIFY_DONE;
if (mce->kflags & MCE_HANDLED_CEC) {
estatus->block_status = 0;
return NOTIFY_DONE;
}
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
/* clear record status to enable BIOS to update it again */ /* clear record status to enable BIOS to update it again */
estatus->block_status = 0; estatus->block_status = 0;

View File

@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native) struct acpi_lpit_native *lpit_native)
{ {
info->frequency = lpit_native->counter_frequency ? info->frequency = lpit_native->counter_frequency ?
lpit_native->counter_frequency : tsc_khz * 1000; lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
if (!info->frequency) if (!info->frequency)
info->frequency = 1; info->frequency = 1;

View File

@ -1788,12 +1788,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
return; return;
count++; count++;
acpi_get_parent(device->dev->handle, &acpi_parent); if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
pdev = acpi_get_pci_dev(acpi_parent);
pdev = acpi_get_pci_dev(acpi_parent); if (pdev) {
if (pdev) { parent = &pdev->dev;
parent = &pdev->dev; pci_dev_put(pdev);
pci_dev_put(pdev); }
} }
memset(&props, 0, sizeof(struct backlight_properties)); memset(&props, 0, sizeof(struct backlight_properties));

View File

@ -639,6 +639,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
* @index: Index of the reference to return * @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments * @args: Location to store the returned reference with optional arguments
* (may be NULL)
* *
* Find property with @name, verifify that it is a package containing at least * Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the * one object reference and if so, store the ACPI device object pointer to the
@ -697,6 +698,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret) if (ret)
return ret == -ENODEV ? -EINVAL : ret; return ret == -ENODEV ? -EINVAL : ret;
if (!args)
return 0;
args->fwnode = acpi_fwnode_handle(device); args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0; args->nargs = 0;
return 0; return 0;

View File

@ -455,6 +455,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"), DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
}, },
}, },
{
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
},
},
{ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = { .matches = {

View File

@ -5113,7 +5113,7 @@ static __poll_t binder_poll(struct file *filp,
thread = binder_get_thread(proc); thread = binder_get_thread(proc);
if (!thread) if (!thread)
return POLLERR; return EPOLLERR;
binder_inner_proc_lock(thread->proc); binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL; thread->looper |= BINDER_LOOPER_STATE_POLL;

View File

@ -272,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
} }
if (mm) { if (mm) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput_async(mm);
} }
return 0; return 0;
@ -305,7 +305,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
err_no_vma: err_no_vma:
if (mm) { if (mm) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
mmput(mm); mmput_async(mm);
} }
return vma ? -ENOMEM : -ESRCH; return vma ? -ENOMEM : -ESRCH;
} }
@ -360,8 +360,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
continue; continue;
if (!buffer->async_transaction) if (!buffer->async_transaction)
continue; continue;
total_alloc_size += binder_alloc_buffer_size(alloc, buffer) total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ sizeof(struct binder_buffer);
num_buffers++; num_buffers++;
} }
@ -422,6 +421,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size); alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async); trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
if (is_async && if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) { alloc->free_async_space < size + sizeof(struct binder_buffer)) {
@ -431,9 +433,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free); BUG_ON(!buffer->free);
@ -535,7 +534,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->pid = pid; buffer->pid = pid;
buffer->oneway_spam_suspect = false; buffer->oneway_spam_suspect = false;
if (is_async) { if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer); alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n", "%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space); alloc->pid, size, alloc->free_async_space);
@ -573,7 +572,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
* is the sum of the three given sizes (each rounded up to * is the sum of the three given sizes (each rounded up to
* pointer-sized boundary) * pointer-sized boundary)
* *
* Return: The allocated buffer or %NULL if error * Return: The allocated buffer or %ERR_PTR(-errno) if error
*/ */
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size, size_t data_size,
@ -673,8 +672,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n", "%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space); alloc->pid, size, alloc->free_async_space);
@ -722,7 +720,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
/* /*
* We could eliminate the call to binder_alloc_clear_buf() * We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to * from binder_alloc_deferred_release() by moving this to
* binder_alloc_free_buf_locked(). However, that could * binder_free_buf_locked(). However, that could
* increase contention for the alloc mutex if clear_on_free * increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not * is used frequently for large buffers. The mutex is not
* needed for correctness here. * needed for correctness here.
@ -1013,7 +1011,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget; goto err_mmget;
if (!mmap_read_trylock(mm)) if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed; goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc); vma = find_vma(mm, page_addr);
if (vma && vma != binder_alloc_get_vma(alloc))
goto err_invalid_vma;
list_lru_isolate(lru, item); list_lru_isolate(lru, item);
spin_unlock(lock); spin_unlock(lock);
@ -1039,6 +1039,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY; return LRU_REMOVED_RETRY;
err_invalid_vma:
mmap_read_unlock(mm);
err_mmap_read_lock_failed: err_mmap_read_lock_failed:
mmput_async(mm); mmput_async(mm);
err_mmget: err_mmget:

View File

@ -219,6 +219,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_fixup_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);

View File

@ -1479,6 +1479,28 @@ void pm_runtime_enable(struct device *dev)
} }
EXPORT_SYMBOL_GPL(pm_runtime_enable); EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
*
* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
* you at driver exit time if needed.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
{
pm_runtime_enable(dev);
return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
/** /**
* pm_runtime_forbid - Block runtime PM of a device. * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle. * @dev: Device to handle.

View File

@ -544,6 +544,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (nargs > NR_FWNODE_REFERENCE_ARGS) if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL; return -EINVAL;
if (!args)
return 0;
args->fwnode = software_node_get(refnode); args->fwnode = software_node_get(refnode);
args->nargs = nargs; args->nargs = nargs;

View File

@ -471,7 +471,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
return data; return data;
} }
static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
{ {
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
const unsigned char *p_left = data, *p_h4; const unsigned char *p_left = data, *p_h4;
@ -510,25 +510,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
bt_dev_err(bdev->hdev, bt_dev_err(bdev->hdev,
"Frame reassembly failed (%d)", err); "Frame reassembly failed (%d)", err);
bdev->rx_skb = NULL; bdev->rx_skb = NULL;
return err; return;
} }
sz_left -= sz_h4; sz_left -= sz_h4;
p_left += sz_h4; p_left += sz_h4;
} }
return 0;
} }
static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
size_t count) size_t count)
{ {
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
int err;
err = btmtkuart_recv(bdev->hdev, data, count); btmtkuart_recv(bdev->hdev, data, count);
if (err < 0)
return err;
bdev->hdev->stat.byte_rx += count; bdev->hdev->stat.byte_rx += count;

View File

@ -2093,13 +2093,23 @@ static int sysc_reset(struct sysc *ddata)
sysc_val = sysc_read_sysconfig(ddata); sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask; sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val); sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
/*
* Some devices need a delay before reading registers
* after reset. Presumably a srst_udelay is not needed
* for devices that use a rstctrl register reset.
*/
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
/*
* Flush posted write. For devices needing srst_udelay
* this should trigger an interconnect error if the
* srst_udelay value is needed but not configured.
*/
sysc_val = sysc_read_sysconfig(ddata); sysc_val = sysc_read_sysconfig(ddata);
} }
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk) if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata); ddata->post_reset_quirk(ddata);

View File

@ -888,10 +888,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
r[0] = r_div ? (r_div & 0xff) : 1; r[0] = r_div ? (r_div & 0xff) : 1;
r[1] = (r_div >> 8) & 0xff; r[1] = (r_div >> 8) & 0xff;
r[2] = (r_div >> 16) & 0xff; r[2] = (r_div >> 16) & 0xff;
err = regmap_bulk_write(output->data->regmap, return regmap_bulk_write(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3); SI5341_OUT_R_REG(output), r, 3);
return 0;
} }
static int si5341_output_reparent(struct clk_si5341_output *output, u8 index) static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)

View File

@ -4579,6 +4579,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
if (!ret) { if (!ret) {
devres->clk = clk; devres->clk = clk;
devres->nb = nb; devres->nb = nb;
devres_add(dev, devres);
} else { } else {
devres_free(devres); devres_free(devres);
} }

View File

@ -38,8 +38,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
.config_ctl_hi_val = 0x00002267, .config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024, .config_ctl_hi1_val = 0x00000024,
.test_ctl_val = 0x00000000, .test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000002, .test_ctl_hi_val = 0x00000000,
.test_ctl_hi1_val = 0x00000000, .test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000, .user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805, .user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000d0, .user_ctl_hi1_val = 0x000000d0,

View File

@ -37,6 +37,7 @@ static struct alpha_pll_config video_pll0_config = {
.config_ctl_val = 0x20485699, .config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002267, .config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024, .config_ctl_hi1_val = 0x00000024,
.test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000, .user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805, .user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000D0, .user_ctl_hi1_val = 0x000000D0,
@ -218,6 +219,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
static const struct qcom_reset_map video_cc_sm8150_resets[] = { static const struct qcom_reset_map video_cc_sm8150_resets[] = {
[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 }, [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
[VIDEO_CC_MVS0_BCR] = { 0x870 },
[VIDEO_CC_MVS1_BCR] = { 0x8b0 },
[VIDEO_CC_MVSC_BCR] = { 0x810 },
}; };
static const struct qcom_cc_desc video_cc_sm8150_desc = { static const struct qcom_cc_desc video_cc_sm8150_desc = {

View File

@ -489,7 +489,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS), GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS), GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS), GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS), GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS), GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),

View File

@ -83,7 +83,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
static const struct clk_ops zynqmp_clk_mux_ops = { static const struct clk_ops zynqmp_clk_mux_ops = {
.get_parent = zynqmp_clk_mux_get_parent, .get_parent = zynqmp_clk_mux_get_parent,
.set_parent = zynqmp_clk_mux_set_parent, .set_parent = zynqmp_clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate, .determine_rate = __clk_mux_determine_rate_closest,
}; };
static const struct clk_ops zynqmp_clk_mux_ro_ops = { static const struct clk_ops zynqmp_clk_mux_ro_ops = {

View File

@ -109,49 +109,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value); return DIV_ROUND_UP_ULL(parent_rate, value);
} }
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
struct zynqmp_clk_divider *divider,
int *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
unsigned long div1_prate;
struct clk_hw *div1_parent_hw;
struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
struct zynqmp_clk_divider *pdivider =
to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
if (!div1_parent_hw)
return;
div1_prate = clk_hw_get_rate(div1_parent_hw);
*bestdiv = 1;
for (div1 = 1; div1 <= pdivider->max_div;) {
for (div2 = 1; div2 <= divider->max_div;) {
long new_error = ((div1_prate / div1) / div2) - rate;
if (abs(new_error) < abs(error)) {
*bestdiv = div2;
error = new_error;
}
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div2 = div2 << 1;
else
div2++;
}
if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
div1 = div1 << 1;
else
div1++;
}
}
/** /**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock * zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces * @hw: handle between common and hardware-specific interfaces
@ -170,6 +127,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type; u32 div_type = divider->div_type;
u32 bestdiv; u32 bestdiv;
int ret; int ret;
u8 width;
/* if read only, just return current value */ /* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) { if (divider->flags & CLK_DIVIDER_READ_ONLY) {
@ -189,23 +147,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv); return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
} }
bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags); width = fls(divider->max_div);
/* rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
* In case of two divisors, compute best divider values and return
* divider2 value based on compute value. div1 will be automatically
* set to optimum based on required total divider value.
*/
if (div_type == TYPE_DIV2 &&
(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
}
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
bestdiv = rate % *prate ? 1 : bestdiv; *prate = rate;
bestdiv = min_t(u32, bestdiv, divider->max_div);
*prate = rate * bestdiv;
return rate; return rate;
} }

View File

@ -163,7 +163,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0); struct device_node *np = of_cpu_device_node_get(0);
bool ret = false; bool ret = false;
if (of_get_property(np, "operating-points-v2", NULL)) if (of_property_present(np, "operating-points-v2"))
ret = true; ret = true;
of_node_put(np); of_node_put(np);

View File

@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpu_dev = get_cpu_device(0); cpu_dev = get_cpu_device(0);
if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL)) if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
return -ENODEV; return -ENODEV;
if (of_machine_is_compatible("fsl,imx7ulp")) { if (of_machine_is_compatible("fsl,imx7ulp")) {

View File

@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
u32 val; u32 val;
int ret; int ret;
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val); ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret) if (ret)
return ret; return ret;
@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
u32 val; u32 val;
int ret = 0; int ret = 0;
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val); ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret) if (ret)
return ret; return ret;

View File

@ -244,8 +244,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
#ifdef CONFIG_COMMON_CLK #ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */ /* dummy clock provider as needed by OPP if clocks property is used */
if (of_find_property(dev->of_node, "#clock-cells", NULL)) if (of_property_present(dev->of_node, "#clock-cells")) {
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
if (ret)
return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
}
#endif #endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver); ret = cpufreq_register_driver(&scmi_cpufreq_driver);

View File

@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
struct device_node *np = of_cpu_device_node_get(0); struct device_node *np = of_cpu_device_node_get(0);
bool ret = false; bool ret = false;
if (of_get_property(np, "operating-points-v2", NULL)) if (of_property_present(np, "operating-points-v2"))
ret = true; ret = true;
of_node_put(np); of_node_put(np);

View File

@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len, wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir); dir);
if (dma_mapping_error(wa->dev, wa->dma.address)) if (dma_mapping_error(wa->dev, wa->dma.address)) {
kfree(wa->address);
wa->address = NULL;
return -ENOMEM; return -ENOMEM;
}
wa->dma.length = len; wa->dma.length = len;
} }

View File

@ -1848,9 +1848,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(ctx->fallback.aead, crypto_aead_set_flags(ctx->fallback.aead,
crypto_aead_get_flags(authenc) & crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
crypto_aead_setkey(ctx->fallback.aead, key, keylen);
return 0; return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
} }
static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)

View File

@ -43,7 +43,6 @@
#define FLAGS_MODE_MASK 0x000f #define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0) #define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1) #define FLAGS_CBC BIT(1)
#define FLAGS_NEW_KEY BIT(3)
#define SAHARA_HDR_BASE 0x00800000 #define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0 #define SAHARA_HDR_SKHA_ALG_AES 0
@ -141,8 +140,6 @@ struct sahara_hw_link {
}; };
struct sahara_ctx { struct sahara_ctx {
unsigned long flags;
/* AES-specific context */ /* AES-specific context */
int keylen; int keylen;
u8 key[AES_KEYSIZE_128]; u8 key[AES_KEYSIZE_128];
@ -151,6 +148,7 @@ struct sahara_ctx {
struct sahara_aes_reqctx { struct sahara_aes_reqctx {
unsigned long mode; unsigned long mode;
u8 iv_out[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end struct skcipher_request fallback_req; // keep at the end
}; };
@ -446,27 +444,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret; int ret;
int i, j; int i, j;
int idx = 0; int idx = 0;
u32 len;
/* Copy new key if necessary */ memcpy(dev->key_base, ctx->key, ctx->keylen);
if (ctx->flags & FLAGS_NEW_KEY) {
memcpy(dev->key_base, ctx->key, ctx->keylen);
ctx->flags &= ~FLAGS_NEW_KEY;
if (dev->flags & FLAGS_CBC) { if (dev->flags & FLAGS_CBC) {
dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE; dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
dev->hw_desc[idx]->p1 = dev->iv_phys_base; dev->hw_desc[idx]->p1 = dev->iv_phys_base;
} else { } else {
dev->hw_desc[idx]->len1 = 0; dev->hw_desc[idx]->len1 = 0;
dev->hw_desc[idx]->p1 = 0; dev->hw_desc[idx]->p1 = 0;
}
dev->hw_desc[idx]->len2 = ctx->keylen;
dev->hw_desc[idx]->p2 = dev->key_phys_base;
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
idx++;
} }
dev->hw_desc[idx]->len2 = ctx->keylen;
dev->hw_desc[idx]->p2 = dev->key_phys_base;
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
idx++;
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) { if (dev->nb_in_sg < 0) {
@ -488,24 +483,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (ret != dev->nb_in_sg) { if (ret != dev->nb_in_sg) {
dev_err(dev->device, "couldn't map in sg\n"); dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in; return -EINVAL;
} }
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (ret != dev->nb_out_sg) { if (ret != dev->nb_out_sg) {
dev_err(dev->device, "couldn't map out sg\n"); dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out; goto unmap_in;
} }
/* Create input links */ /* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0]; dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg; sg = dev->in_sg;
len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) { for (i = 0; i < dev->nb_in_sg; i++) {
dev->hw_link[i]->len = sg->length; dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address; dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) { if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0; dev->hw_link[i]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@ -514,12 +512,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */ /* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i]; dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg; sg = dev->out_sg;
len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) { for (j = i; j < dev->nb_out_sg + i; j++) {
dev->hw_link[j]->len = sg->length; dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address; dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) { if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0; dev->hw_link[j]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@ -538,9 +538,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return 0; return 0;
unmap_out:
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
unmap_in: unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@ -548,8 +545,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return -EINVAL; return -EINVAL;
} }
static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
/* Update IV buffer to contain the last ciphertext block */
if (rctx->mode & FLAGS_ENCRYPT) {
sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
ivsize, req->cryptlen - ivsize);
} else {
memcpy(req->iv, rctx->iv_out, ivsize);
}
}
static int sahara_aes_process(struct skcipher_request *req) static int sahara_aes_process(struct skcipher_request *req)
{ {
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_dev *dev = dev_ptr; struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx; struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx; struct sahara_aes_reqctx *rctx;
@ -571,8 +584,17 @@ static int sahara_aes_process(struct skcipher_request *req)
rctx->mode &= FLAGS_MODE_MASK; rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
if ((dev->flags & FLAGS_CBC) && req->iv) if ((dev->flags & FLAGS_CBC) && req->iv) {
memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128); unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
memcpy(dev->iv_base, req->iv, ivsize);
if (!(dev->flags & FLAGS_ENCRYPT)) {
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
rctx->iv_out, ivsize,
req->cryptlen - ivsize);
}
}
/* assign new context to device */ /* assign new context to device */
dev->ctx = ctx; dev->ctx = ctx;
@ -585,16 +607,20 @@ static int sahara_aes_process(struct skcipher_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion, timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS)); msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (!timeout) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!timeout) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
if ((dev->flags & FLAGS_CBC) && req->iv)
sahara_aes_cbc_update_iv(req);
return 0; return 0;
} }
@ -608,7 +634,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */ /* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) { if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen); memcpy(ctx->key, key, keylen);
ctx->flags |= FLAGS_NEW_KEY;
return 0; return 0;
} }
@ -624,12 +649,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(ctx->fallback, key, keylen); return crypto_skcipher_setkey(ctx->fallback, key, keylen);
} }
static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
{
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
return crypto_skcipher_encrypt(&rctx->fallback_req);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr; struct sahara_dev *dev = dev_ptr;
int err = 0; int err = 0;
if (!req->cryptlen)
return 0;
if (unlikely(ctx->keylen != AES_KEYSIZE_128))
return sahara_aes_fallback(req, mode);
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@ -652,81 +705,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct skcipher_request *req) static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT); return sahara_aes_crypt(req, FLAGS_ENCRYPT);
} }
static int sahara_aes_ecb_decrypt(struct skcipher_request *req) static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, 0); return sahara_aes_crypt(req, 0);
} }
static int sahara_aes_cbc_encrypt(struct skcipher_request *req) static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
} }
static int sahara_aes_cbc_decrypt(struct skcipher_request *req) static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{ {
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_CBC); return sahara_aes_crypt(req, FLAGS_CBC);
} }
@ -783,6 +776,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start) int start)
{ {
struct scatterlist *sg; struct scatterlist *sg;
unsigned int len;
unsigned int i; unsigned int i;
int ret; int ret;
@ -804,12 +798,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret) if (!ret)
return -EFAULT; return -EFAULT;
len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) { for (i = start; i < dev->nb_in_sg + start; i++) {
dev->hw_link[i]->len = sg->length; dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address; dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) { if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0; dev->hw_link[i]->next = 0;
} else { } else {
len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg); sg = sg_next(sg);
} }
@ -890,24 +886,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0; return 0;
} }
static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
{
if (!sg || !sg->length)
return nbytes;
while (nbytes && sg) {
if (nbytes <= sg->length) {
sg->length = nbytes;
sg_mark_end(sg);
break;
}
nbytes -= sg->length;
sg = sg_next(sg);
}
return nbytes;
}
static int sahara_sha_prepare_request(struct ahash_request *req) static int sahara_sha_prepare_request(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@ -944,36 +922,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0); hash_later, 0);
} }
/* nbytes should now be multiple of blocksize */ rctx->total = len - hash_later;
req->nbytes = req->nbytes - hash_later;
sahara_walk_and_recalc(req->src, req->nbytes);
/* have data from previous operation and current */ /* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) { if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2); sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
sg_chain(rctx->in_sg_chain, 2, req->src); sg_chain(rctx->in_sg_chain, 2, req->src);
rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain; rctx->in_sg = rctx->in_sg_chain;
req->src = rctx->in_sg_chain;
/* only data from previous operation */ /* only data from previous operation */
} else if (rctx->buf_cnt) { } else if (rctx->buf_cnt) {
if (req->src) rctx->in_sg = rctx->in_sg_chain;
rctx->in_sg = req->src;
else
rctx->in_sg = rctx->in_sg_chain;
/* buf was copied into rembuf above */
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
rctx->total = rctx->buf_cnt;
/* no data from previous operation */ /* no data from previous operation */
} else { } else {
rctx->in_sg = req->src; rctx->in_sg = req->src;
rctx->total = req->nbytes;
req->src = rctx->in_sg;
} }
/* on next call, we only have the remaining data in the buffer */ /* on next call, we only have the remaining data in the buffer */
@ -994,7 +956,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret; return ret;
if (rctx->first) { if (rctx->first) {
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
if (ret)
return ret;
dev->hw_desc[0]->next = 0; dev->hw_desc[0]->next = 0;
rctx->first = 0; rctx->first = 0;
} else { } else {
@ -1002,7 +967,10 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0); sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1]; dev->hw_desc[0]->next = dev->hw_phys_desc[1];
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
if (ret)
return ret;
dev->hw_desc[1]->next = 0; dev->hw_desc[1]->next = 0;
} }
@ -1015,18 +983,19 @@ static int sahara_sha_process(struct ahash_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion, timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS)); msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (!timeout) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
if (rctx->sg_in_idx) if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!timeout) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
memcpy(rctx->context, dev->context_base, rctx->context_size); memcpy(rctx->context, dev->context_base, rctx->context_size);
if (req->result) if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size); memcpy(req->result, rctx->context, rctx->digest_size);
return 0; return 0;
@ -1170,8 +1139,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm) static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{ {
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sahara_sha_reqctx) + sizeof(struct sahara_sha_reqctx));
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
return 0; return 0;
} }

View File

@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver" tristate "VirtIO crypto driver"
depends on VIRTIO depends on VIRTIO
select CRYPTO_AEAD select CRYPTO_AEAD
select CRYPTO_AKCIPHER2
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_ENGINE select CRYPTO_ENGINE
select CRYPTO_RSA
select MPILIB
help help
This driver provides support for virtio crypto device. If you This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto. choose 'M' here, this module will be called virtio_crypto.

View File

@ -2,5 +2,6 @@
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
virtio_crypto-objs := \ virtio_crypto-objs := \
virtio_crypto_algs.o \ virtio_crypto_algs.o \
virtio_crypto_akcipher_algs.o \
virtio_crypto_mgr.o \ virtio_crypto_mgr.o \
virtio_crypto_core.o virtio_crypto_core.o

View File

@ -0,0 +1,591 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Asymmetric algorithms supported by virtio crypto device
*
* Authors: zhenwei pi <pizhenwei@bytedance.com>
* lei he <helei.sig11@bytedance.com>
*
* Copyright 2022 Bytedance CO., LTD.
*/
#include <linux/mpi.h>
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_rsa_ctx {
MPI n;
};
struct virtio_crypto_akcipher_ctx {
struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_akcipher *tfm;
bool session_valid;
__u64 session_id;
union {
struct virtio_crypto_rsa_ctx rsa_ctx;
};
};
struct virtio_crypto_akcipher_request {
struct virtio_crypto_request base;
struct virtio_crypto_akcipher_ctx *akcipher_ctx;
struct akcipher_request *akcipher_req;
void *src_buf;
void *dst_buf;
uint32_t opcode;
};
struct virtio_crypto_akcipher_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
struct akcipher_alg algo;
};
static DEFINE_MUTEX(algs_lock);
static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
kfree(vc_akcipher_req->src_buf);
kfree(vc_akcipher_req->dst_buf);
vc_akcipher_req->src_buf = NULL;
vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
}
static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
{
struct virtio_crypto_akcipher_request *vc_akcipher_req =
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
struct akcipher_request *akcipher_req;
int error;
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
case VIRTIO_CRYPTO_KEY_REJECTED:
error = -EKEYREJECTED;
break;
default:
error = -EIO;
break;
}
akcipher_req = vc_akcipher_req->akcipher_req;
if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
struct virtio_crypto_ctrl_header *header, void *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
struct virtio_crypto *vcrypto = ctx->vcrypto;
uint8_t *pkey;
int err;
unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
pkey = kmemdup(key, keylen, GFP_ATOMIC);
if (!pkey)
return -ENOMEM;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
memcpy(&ctrl->u, para, sizeof(ctrl->u));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&key_sg, pkey, keylen);
sgs[num_out++] = &key_sg;
sg_init_one(&inhdr_sg, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(input->status));
err = -EINVAL;
goto out;
}
ctx->session_id = le64_to_cpu(input->session_id);
ctx->session_valid = true;
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(pkey);
return err;
}
static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
{
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
unsigned int num_out = 0, num_in = 0;
int err;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
if (!ctx->session_valid)
return 0;
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
ctrl = &vc_ctrl_req->ctrl;
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
ctrl->header.queue_id = 0;
destroy_session = &ctrl->u.destroy_session;
destroy_session->session_id = cpu_to_le64(ctx->session_id);
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr_sg;
sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sgs[num_out + num_in++] = &inhdr_sg;
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
if (err < 0)
goto out;
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
ctrl_status->status, destroy_session->session_id);
err = -EINVAL;
goto out;
}
err = 0;
ctx->session_valid = false;
out:
kfree(vc_ctrl_req);
return err;
}
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, struct data_queue *data_vq)
{
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
void *src_buf = NULL, *dst_buf = NULL;
unsigned int num_out = 0, num_in = 0;
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret = -ENOMEM;
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
if (!src_buf)
goto err;
if (verify) {
/* for verify operation, both src and dst data work as OUT direction */
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
} else {
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
sg_init_one(&srcdata_sg, src_buf, src_len);
sgs[num_out++] = &srcdata_sg;
/* dst data */
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
if (!dst_buf)
goto err;
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
sgs[num_out + num_in++] = &dstdata_sg;
}
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
/* in header */
sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
sgs[num_out + num_in++] = &inhdr_sg;
spin_lock_irqsave(&data_vq->lock, flags);
ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
virtqueue_kick(data_vq->vq);
spin_unlock_irqrestore(&data_vq->lock, flags);
if (ret)
goto err;
return 0;
err:
kfree(src_buf);
kfree(dst_buf);
return -ENOMEM;
}
static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
{
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct data_queue *data_vq = vc_req->dataq;
struct virtio_crypto_op_header *header;
struct virtio_crypto_akcipher_data_req *akcipher_req;
int ret;
vc_req->sgs = NULL;
vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
if (!vc_req->req_data)
return -ENOMEM;
/* build request header */
header = &vc_req->req_data->header;
header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header->session_id = cpu_to_le64(ctx->session_id);
/* build request akcipher data */
akcipher_req = &vc_req->req_data->u.akcipher_req;
akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
if (ret < 0) {
kfree_sensitive(vc_req->req_data);
vc_req->req_data = NULL;
return ret;
}
return 0;
}
static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
{
struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
vc_akcipher_req->akcipher_ctx = ctx;
vc_akcipher_req->akcipher_req = req;
vc_akcipher_req->opcode = opcode;
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
}
static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
static int virtio_crypto_rsa_sign(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
}
static int virtio_crypto_rsa_verify(struct akcipher_request *req)
{
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
}
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
bool private,
int padding_algo,
int hash_algo)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
struct virtio_crypto *vcrypto;
struct virtio_crypto_ctrl_header header;
struct virtio_crypto_akcipher_session_para para;
struct rsa_key rsa_key = {0};
int node = virtio_crypto_get_current_node();
uint32_t keytype;
int ret;
/* mpi_free will test n, just free it. */
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
if (private) {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
} else {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
}
if (ret)
return ret;
rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
if (!rsa_ctx->n)
return -ENOMEM;
if (!ctx->vcrypto) {
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
VIRTIO_CRYPTO_AKCIPHER_RSA);
if (!vcrypto) {
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
ctx->vcrypto = vcrypto;
} else {
virtio_crypto_alg_akcipher_close_session(ctx);
}
/* set ctrl header */
header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
header.queue_id = 0;
/* set RSA para */
para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
para.keytype = cpu_to_le32(keytype);
para.keylen = cpu_to_le32(keylen);
para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
}
static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_RAW_PADDING,
VIRTIO_CRYPTO_RSA_NO_HASH);
}
static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen)
{
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
VIRTIO_CRYPTO_RSA_SHA1);
}
static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
return mpi_get_size(rsa_ctx->n);
}
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
ctx->tfm = tfm;
ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
virtio_crypto_alg_akcipher_close_session(ctx);
virtcrypto_dev_put(ctx->vcrypto);
mpi_free(rsa_ctx->n);
rsa_ctx->n = NULL;
}
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
.set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "rsa",
.cra_driver_name = "virtio-crypto-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
},
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
.algo = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.sign = virtio_crypto_rsa_sign,
.verify = virtio_crypto_rsa_verify,
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
.base = {
.cra_name = "pkcs1pad(rsa,sha1)",
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
},
};
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_akcipher_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
virtio_crypto_akcipher_algs[i].algo.base.cra_name);
}
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
{
int i = 0;
mutex_lock(&algs_lock);
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
uint32_t service = virtio_crypto_akcipher_algs[i].service;
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
virtio_crypto_akcipher_algs[i].active_devs--;
}
mutex_unlock(&algs_lock);
}

View File

@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
int encrypt) int encrypt)
{ {
struct scatterlist outhdr, key_sg, inhdr, *sgs[3]; struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
unsigned int tmp;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT; int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err; int err;
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_session_input *input;
struct virtio_crypto_sym_create_session_req *sym_create_session;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
/* /*
* Avoid to do DMA from the stack, switch to using * Avoid to do DMA from the stack, switch to using
@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
if (!cipher_key) if (!cipher_key)
return -ENOMEM; return -ENOMEM;
spin_lock(&vcrypto->ctrl_lock); vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
if (!vc_ctrl_req) {
err = -ENOMEM;
goto out;
}
/* Pad ctrl header */ /* Pad ctrl header */
vcrypto->ctrl.header.opcode = ctrl = &vc_ctrl_req->ctrl;
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION); ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
vcrypto->ctrl.header.algo = cpu_to_le32(alg); ctrl->header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */ /* Set the default dataqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0; ctrl->header.queue_id = 0;
vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR); input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */ /* Pad cipher's parameters */
vcrypto->ctrl.u.sym_create_session.op_type = sym_create_session = &ctrl->u.sym_create_session;
cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo = sym_create_session->u.cipher.para.algo = ctrl->header.algo;
vcrypto->ctrl.header.algo; sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen = sym_create_session->u.cipher.para.op = cpu_to_le32(op);
cpu_to_le32(keylen);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
cpu_to_le32(op);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr; sgs[num_out++] = &outhdr;
/* Set key */ /* Set key */
@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
sgs[num_out++] = &key_sg; sgs[num_out++] = &key_sg;
/* Return status and session id back */ /* Return status and session id back */
sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input)); sg_init_one(&inhdr, input, sizeof(*input));
sgs[num_out + num_in++] = &inhdr; sgs[num_out + num_in++] = &inhdr;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
num_in, vcrypto, GFP_ATOMIC); if (err < 0)
if (err < 0) { goto out;
spin_unlock(&vcrypto->ctrl_lock);
kfree_sensitive(cipher_key);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
/* if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
* Trapping into the hypervisor, so the request should be
* handled immediately.
*/
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Create session failed status: %u\n", pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(vcrypto->input.status)); le32_to_cpu(input->status));
kfree_sensitive(cipher_key); err = -EINVAL;
return -EINVAL; goto out;
} }
if (encrypt) if (encrypt)
ctx->enc_sess_info.session_id = ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
le64_to_cpu(vcrypto->input.session_id);
else else
ctx->dec_sess_info.session_id = ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
le64_to_cpu(vcrypto->input.session_id);
spin_unlock(&vcrypto->ctrl_lock);
err = 0;
out:
kfree(vc_ctrl_req);
kfree_sensitive(cipher_key); kfree_sensitive(cipher_key);
return 0; return err;
} }
static int virtio_crypto_alg_skcipher_close_session( static int virtio_crypto_alg_skcipher_close_session(
@ -206,60 +197,56 @@ static int virtio_crypto_alg_skcipher_close_session(
int encrypt) int encrypt)
{ {
struct scatterlist outhdr, status_sg, *sgs[2]; struct scatterlist outhdr, status_sg, *sgs[2];
unsigned int tmp;
struct virtio_crypto_destroy_session_req *destroy_session; struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto *vcrypto = ctx->vcrypto;
int err; int err;
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
struct virtio_crypto_op_ctrl_req *ctrl;
struct virtio_crypto_inhdr *ctrl_status;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
spin_lock(&vcrypto->ctrl_lock); vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR; if (!vc_ctrl_req)
return -ENOMEM;
ctrl_status = &vc_ctrl_req->ctrl_status;
ctrl_status->status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */ /* Pad ctrl header */
vcrypto->ctrl.header.opcode = ctrl = &vc_ctrl_req->ctrl;
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION); ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */ /* Set the default virtqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0; ctrl->header.queue_id = 0;
destroy_session = &vcrypto->ctrl.u.destroy_session; destroy_session = &ctrl->u.destroy_session;
if (encrypt) if (encrypt)
destroy_session->session_id = destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
cpu_to_le64(ctx->enc_sess_info.session_id);
else else
destroy_session->session_id = destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
cpu_to_le64(ctx->dec_sess_info.session_id);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
sgs[num_out++] = &outhdr; sgs[num_out++] = &outhdr;
/* Return status and session id back */ /* Return status and session id back */
sg_init_one(&status_sg, &vcrypto->ctrl_status.status, sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
sizeof(vcrypto->ctrl_status.status));
sgs[num_out + num_in++] = &status_sg; sgs[num_out + num_in++] = &status_sg;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
num_in, vcrypto, GFP_ATOMIC); if (err < 0)
if (err < 0) { goto out;
spin_unlock(&vcrypto->ctrl_lock);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) && if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
vcrypto->ctrl_status.status, ctrl_status->status, destroy_session->session_id);
destroy_session->session_id);
return -EINVAL; err = -EINVAL;
goto out;
} }
spin_unlock(&vcrypto->ctrl_lock);
return 0; err = 0;
out:
kfree(vc_ctrl_req);
return err;
} }
static int virtio_crypto_alg_skcipher_init_sessions( static int virtio_crypto_alg_skcipher_init_sessions(

View File

@ -10,9 +10,11 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <crypto/aead.h> #include <crypto/aead.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/engine.h> #include <crypto/engine.h>
#include <uapi/linux/virtio_crypto.h>
/* Internal representation of a data virtqueue */ /* Internal representation of a data virtqueue */
@ -27,6 +29,7 @@ struct data_queue {
char name[32]; char name[32];
struct crypto_engine *engine; struct crypto_engine *engine;
struct tasklet_struct done_task;
}; };
struct virtio_crypto { struct virtio_crypto {
@ -56,6 +59,7 @@ struct virtio_crypto {
u32 mac_algo_l; u32 mac_algo_l;
u32 mac_algo_h; u32 mac_algo_h;
u32 aead_algo; u32 aead_algo;
u32 akcipher_algo;
/* Maximum length of cipher key */ /* Maximum length of cipher key */
u32 max_cipher_key_len; u32 max_cipher_key_len;
@ -64,11 +68,6 @@ struct virtio_crypto {
/* Maximum size of per request */ /* Maximum size of per request */
u64 max_size; u64 max_size;
/* Control VQ buffers: protected by the ctrl_lock */
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
unsigned long status; unsigned long status;
atomic_t ref_count; atomic_t ref_count;
struct list_head list; struct list_head list;
@ -84,6 +83,18 @@ struct virtio_crypto_sym_session_info {
__u64 session_id; __u64 session_id;
}; };
/*
* Note: there are padding fields in request, clear them to zero before
* sending to host to avoid to divulge any information.
* Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
*/
struct virtio_crypto_ctrl_request {
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
struct completion compl;
};
struct virtio_crypto_request; struct virtio_crypto_request;
typedef void (*virtio_crypto_data_callback) typedef void (*virtio_crypto_data_callback)
(struct virtio_crypto_request *vc_req, int len); (struct virtio_crypto_request *vc_req, int len);
@ -131,5 +142,10 @@ static inline int virtio_crypto_get_current_node(void)
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto); int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto); void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
unsigned int out_sgs, unsigned int in_sgs,
struct virtio_crypto_ctrl_request *vc_ctrl_req);
#endif /* _VIRTIO_CRYPTO_COMMON_H */ #endif /* _VIRTIO_CRYPTO_COMMON_H */

View File

@ -22,27 +22,78 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
} }
} }
static void virtcrypto_dataq_callback(struct virtqueue *vq) static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
complete(&vc_ctrl_req->compl);
}
static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
{ {
struct virtio_crypto *vcrypto = vq->vdev->priv; struct virtio_crypto *vcrypto = vq->vdev->priv;
struct virtio_crypto_request *vc_req; struct virtio_crypto_ctrl_request *vc_ctrl_req;
unsigned long flags; unsigned long flags;
unsigned int len; unsigned int len;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
virtio_crypto_ctrlq_callback(vc_ctrl_req);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
}
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
}
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
unsigned int out_sgs, unsigned int in_sgs,
struct virtio_crypto_ctrl_request *vc_ctrl_req)
{
int err;
unsigned long flags;
init_completion(&vc_ctrl_req->compl);
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
if (err < 0) {
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
wait_for_completion(&vc_ctrl_req->compl);
return 0;
}
static void virtcrypto_done_task(unsigned long data)
{
struct data_queue *data_vq = (struct data_queue *)data;
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
unsigned int len;
do { do {
virtqueue_disable_cb(vq); virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(
&vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb) if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len); vc_req->alg_cb(vc_req, len);
spin_lock_irqsave(
&vcrypto->data_vq[qid].lock, flags);
} }
} while (!virtqueue_enable_cb(vq)); } while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); }
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct data_queue *dq = &vcrypto->data_vq[vq->index];
tasklet_schedule(&dq->done_task);
} }
static int virtcrypto_find_vqs(struct virtio_crypto *vi) static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@ -73,7 +124,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
goto err_names; goto err_names;
/* Parameters for control virtqueue */ /* Parameters for control virtqueue */
callbacks[total_vqs - 1] = NULL; callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
names[total_vqs - 1] = "controlq"; names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */ /* Allocate/initialize parameters for data virtqueues */
@ -99,6 +150,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM; ret = -ENOMEM;
goto err_engine; goto err_engine;
} }
tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
(unsigned long)&vi->data_vq[i]);
} }
kfree(names); kfree(names);
@ -297,6 +350,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 mac_algo_l = 0; u32 mac_algo_l = 0;
u32 mac_algo_h = 0; u32 mac_algo_h = 0;
u32 aead_algo = 0; u32 aead_algo = 0;
u32 akcipher_algo = 0;
u32 crypto_services = 0; u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@ -348,6 +402,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
mac_algo_h, &mac_algo_h); mac_algo_h, &mac_algo_h);
virtio_cread_le(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
aead_algo, &aead_algo); aead_algo, &aead_algo);
if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
virtio_cread_le(vdev, struct virtio_crypto_config,
akcipher_algo, &akcipher_algo);
/* Add virtio crypto device to global table */ /* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto); err = virtcrypto_devmgr_add_dev(vcrypto);
@ -374,7 +431,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->mac_algo_h = mac_algo_h; vcrypto->mac_algo_h = mac_algo_h;
vcrypto->hash_algo = hash_algo; vcrypto->hash_algo = hash_algo;
vcrypto->aead_algo = aead_algo; vcrypto->aead_algo = aead_algo;
vcrypto->akcipher_algo = akcipher_algo;
dev_info(&vdev->dev, dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
@ -431,11 +488,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
static void virtcrypto_remove(struct virtio_device *vdev) static void virtcrypto_remove(struct virtio_device *vdev)
{ {
struct virtio_crypto *vcrypto = vdev->priv; struct virtio_crypto *vcrypto = vdev->priv;
int i;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto)) if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto); virtcrypto_dev_stop(vcrypto);
for (i = 0; i < vcrypto->max_data_queues; i++)
tasklet_kill(&vcrypto->data_vq[i].done_task);
vdev->config->reset(vdev); vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto); virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto); virtcrypto_clear_crypto_engines(vcrypto);

View File

@ -242,6 +242,12 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
return -EFAULT; return -EFAULT;
} }
if (virtio_crypto_akcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
virtio_crypto_algs_unregister(vcrypto);
return -EFAULT;
}
return 0; return 0;
} }
@ -258,6 +264,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto) void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{ {
virtio_crypto_algs_unregister(vcrypto); virtio_crypto_algs_unregister(vcrypto);
virtio_crypto_akcipher_algs_unregister(vcrypto);
} }
/* /*
@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
case VIRTIO_CRYPTO_SERVICE_AEAD: case VIRTIO_CRYPTO_SERVICE_AEAD:
algo_mask = vcrypto->aead_algo; algo_mask = vcrypto->aead_algo;
break; break;
case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
algo_mask = vcrypto->akcipher_algo;
break;
} }
if (!(algo_mask & (1u << algo))) if (!(algo_mask & (1u << algo)))

View File

@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int); ocx_com_errors, ctx->reg_com_int);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
for (lane = 0; lane < OCX_RX_LANES; lane++) for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) { if (ctx->reg_com_int & BIT(lane)) {
@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
lane, ctx->reg_lane_int[lane], lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]); lane, ctx->reg_lane_stat11[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors, ocx_lane_errors,
ctx->reg_lane_int[lane]); ctx->reg_lane_int[lane]);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
} }
if (ctx->reg_com_int & OCX_COM_INT_CE) if (ctx->reg_com_int & OCX_COM_INT_CE)
@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE, decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int); ocx_com_link_errors, ctx->reg_com_link_int);
strncat(msg, other, OCX_MESSAGE_SIZE); strlcat(msg, other, OCX_MESSAGE_SIZE);
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE) if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg); edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int); decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
strncat(msg, other, L2C_MESSAGE_SIZE); strlcat(msg, other, L2C_MESSAGE_SIZE);
if (ctx->reg_int & mask_ue) if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg); edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);

View File

@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_TI_SLLZ059 0x20 #define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40 #define QUIRK_IR_WAKE 0x40
// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
// while it is probable due to detection of any type of PCIe error.
#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
#if IS_ENABLED(CONFIG_X86)
static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
{
return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
}
#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
{
const struct pci_dev *pcie_to_pci_bridge;
// Detect any type of AMD Ryzen machine.
if (!static_cpu_has(X86_FEATURE_ZEN))
return false;
// Detect VIA VT6306/6307/6308.
if (pdev->vendor != PCI_VENDOR_ID_VIA)
return false;
if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
return false;
// Detect Asmedia ASM1083/1085.
pcie_to_pci_bridge = pdev->bus->self;
if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
return false;
if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
return false;
return true;
}
#else
#define has_reboot_by_cycle_timer_read_quirk(ohci) false
#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
#endif
/* In case of multiple matches in ohci_quirks[], only the first one is used. */ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct { static const struct {
unsigned short vendor, device, revision, flags; unsigned short vendor, device, revision, flags;
@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
s32 diff01, diff12; s32 diff01, diff12;
int i; int i;
if (has_reboot_by_cycle_timer_read_quirk(ohci))
return 0;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) { if (ohci->quirks & QUIRK_CYCLE_TIMER) {
@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev,
if (param_quirks) if (param_quirks)
ohci->quirks = param_quirks; ohci->quirks = param_quirks;
if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
/* /*
* Because dma_alloc_coherent() allocates at least one page, * Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/ * we save space by using a common buffer for the AR request/

View File

@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fw); platform_set_drvdata(pdev, fw);
pr_info("secure-monitor enabled\n"); if (devm_of_platform_populate(dev))
goto out_in_base;
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group)) if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
goto out_in_base; goto out_in_base;
pr_info("secure-monitor enabled\n");
return 0; return 0;
out_in_base: out_in_base:

View File

@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
char debug_name[50] = "ti_sci_debug@"; char debug_name[50];
/* Debug region is optional */ /* Debug region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
/* Setup NULL termination */ /* Setup NULL termination */
info->debug_buffer[info->debug_region_size] = 0; info->debug_buffer[info->debug_region_size] = 0;
info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
sizeof(debug_name) - dev_name(dev));
sizeof("ti_sci_debug@")), info->d = debugfs_create_file(debug_name, 0444, NULL, info,
0444, NULL, info, &ti_sci_debug_fops); &ti_sci_debug_fops);
if (IS_ERR(info->d)) if (IS_ERR(info->d))
return PTR_ERR(info->d); return PTR_ERR(info->d);

View File

@ -459,6 +459,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (!adev->didt_rreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@ -518,6 +521,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (!adev->didt_wreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@ -576,7 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
int r; int r;
if (!adev->smc_rreg) if (!adev->smc_rreg)
return -EPERM; return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
@ -638,7 +644,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
int r; int r;
if (!adev->smc_wreg) if (!adev->smc_wreg)
return -EPERM; return -EOPNOTSUPP;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;

View File

@ -2733,10 +2733,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index]; &non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
if (ps == NULL) { if (ps == NULL)
kfree(adev->pm.dpm.ps);
return -ENOMEM; return -ENOMEM;
}
adev->pm.dpm.ps[i].ps_priv = ps; adev->pm.dpm.ps[i].ps_priv = ps;
k = 0; k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0]; idx = (u8 *)&power_state->v2.clockInfoIndex[0];

View File

@ -7349,10 +7349,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4, kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry), sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL); GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
amdgpu_free_extended_power_table(adev);
return -ENOMEM; return -ENOMEM;
}
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;

View File

@ -1637,7 +1637,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else { } else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n"); dev_err(dev, "failed to parse HPD number\n");
return ret; return -EINVAL;
} }
} }

View File

@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
return 0; return 0;
} }
static int __exit tpd12s015_remove(struct platform_device *pdev) static int tpd12s015_remove(struct platform_device *pdev)
{ {
struct tpd12s015_device *tpd = platform_get_drvdata(pdev); struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
static struct platform_driver tpd12s015_driver = { static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe, .probe = tpd12s015_probe,
.remove = __exit_p(tpd12s015_remove), .remove = tpd12s015_remove,
.driver = { .driver = {
.name = "tpd12s015", .name = "tpd12s015",
.of_match_table = tpd12s015_of_match, .of_match_table = tpd12s015_of_match,

View File

@ -562,8 +562,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set; struct drm_mode_set set;
uint32_t __user *set_connectors_ptr; uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
int ret; int ret, i, num_connectors = 0;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -721,6 +720,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name); connector->name);
connector_set[i] = connector; connector_set[i] = connector;
num_connectors++;
} }
} }
@ -729,7 +729,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y; set.y = crtc_req->y;
set.mode = mode; set.mode = mode;
set.connectors = connector_set; set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors; set.num_connectors = num_connectors;
set.fb = fb; set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev)) if (drm_drv_uses_atomic_modeset(dev))
@ -742,7 +742,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_framebuffer_put(fb); drm_framebuffer_put(fb);
if (connector_set) { if (connector_set) {
for (i = 0; i < crtc_req->count_connectors; i++) { for (i = 0; i < num_connectors; i++) {
if (connector_set[i]) if (connector_set[i])
drm_connector_put(connector_set[i]); drm_connector_put(connector_set[i]);
} }

View File

@ -892,8 +892,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors; goto err_minors;
} }
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_modeset_register_all(dev); ret = drm_modeset_register_all(dev);
if (ret)
goto err_unload;
}
ret = 0; ret = 0;
@ -905,6 +908,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock; goto out_unlock;
err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_minors: err_minors:
remove_compat_control_link(dev); remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_PRIMARY);

View File

@ -108,18 +108,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0; return 0;
if (!priv->mapping) { if (!priv->mapping) {
void *mapping; void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type, mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE); EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA)) else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev); mapping = iommu_get_domain_for_dev(priv->dma_dev);
else
mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping)) if (!mapping)
return PTR_ERR(mapping); return -ENODEV;
priv->mapping = mapping; priv->mapping = mapping;
} }

View File

@ -1849,6 +1849,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret; return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI); crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk; crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder); ret = hdmi_create_connector(encoder);

View File

@ -5584,7 +5584,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp)
intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
drm_dp_set_phy_test_pattern(&intel_dp->aux, data, drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
link_status[DP_DPCD_REV]); intel_dp->dpcd[DP_DPCD_REV]);
} }
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)

View File

@ -268,6 +268,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
{ {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc);
unsigned long flags;
DBG("%s", mdp4_crtc->name); DBG("%s", mdp4_crtc->name);
@ -280,6 +281,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms); mdp4_disable(mdp4_kms);
if (crtc->state->event && !crtc->state->active) {
WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
}
mdp4_crtc->enabled = false; mdp4_crtc->enabled = false;
} }

View File

@ -558,7 +558,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
struct device *dev = &phy->pdev->dev; struct device *dev = &phy->pdev->dev;
int ret; int ret;
pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
ret = clk_prepare_enable(phy->ahb_clk); ret = clk_prepare_enable(phy->ahb_clk);
if (ret) { if (ret) {
@ -708,6 +710,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret)
return ret;
/* PLL init will call into clk_register which requires /* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock. * register access, so we need to enable power and ahb clock.
*/ */

View File

@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int static int
nv04_fence_emit(struct nouveau_fence *fence) nv04_fence_emit(struct nouveau_fence *fence)
{ {
struct nvif_push *push = fence->channel->chan.push; struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2); int ret = PUSH_WAIT(push, 2);
if (ret == 0) { if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno); PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);

View File

@ -32,7 +32,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */ type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);

Some files were not shown because too many files have changed in this diff Show More