Merge tag 'ASB-2024-04-05_12-5.10' of https://android.googlesource.com/kernel/common into android13-5.10-waipio
https://source.android.com/docs/security/bulletin/2024-04-01 * tag 'ASB-2024-04-05_12-5.10' of https://android.googlesource.com/kernel/common: (442 commits) FROMLIST: binder: check offset alignment in binder_get_object() ANDROID: enable CONFIG_USB_XHCI_PCI_RENESAS in gki_defconfig UPSTREAM: usb: dwc3: core: set force_gen1 bit in USB31 devices if max speed is SS ANDROID: userfaultfd: abort uffdio ops if mmap_lock is contended ANDROID: userfaultfd: add MMAP_TRYLOCK mode for COPY/ZEROPAGE UPSTREAM: coresight: etm4x: Remove bogous __exit annotation for some functions UPSTREAM: ASoC: hdmi-codec: register hpd callback on component probe UPSTREAM: usb: typec: tcpm: Fix NULL pointer dereference in tcpm_pd_svdm() UPSTREAM: mm/damon/vaddr-test: fix memory leak in damon_do_test_apply_three_regions() UPSTREAM: task_work: add kerneldoc annotation for 'data' argument UPSTREAM: x86/purgatory: Remove LTO flags UPSTREAM: tcpm: Avoid soft reset when partner does not support get_status UPSTREAM: block/mq-deadline: use correct way to throttling write requests UPSTREAM: usb: typec: tcpm: Fix response to vsafe0V event UPSTREAM: clk: Fix memory leak in devm_clk_notifier_register() UPSTREAM: selftests: damon: add config file ANDROID: abi_gki_aarch64_qcom: Export trace_android_vh_try_fixup_sea ANDROID: arm64: Call fixup_exception() within do_sea() ANDROID: userfaultfd: allow SPF for UFFD_FEATURE_SIGBUS on private+anon ANDROID: GKI: db845c: Update symbols list and ABI ... Conflicts: Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml Documentation/devicetree/bindings~HEAD arch/powerpc/lib/Makefile Change-Id: I6524da89cb855824fa28a95396d3683af4a10046
This commit is contained in:
commit
42fc85e576
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 205
|
||||
SUBLEVEL = 209
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1086,6 +1086,7 @@
|
||||
devfreq_suspend_device
|
||||
devm_clk_register
|
||||
devm_devfreq_add_device
|
||||
devm_pm_runtime_enable
|
||||
devm_regulator_get_exclusive
|
||||
dev_pm_opp_find_freq_floor
|
||||
dev_pm_opp_get_freq
|
||||
@ -1177,7 +1178,6 @@
|
||||
drm_dp_link_rate_to_bw_code
|
||||
drm_dp_link_train_channel_eq_delay
|
||||
drm_dp_link_train_clock_recovery_delay
|
||||
drm_edid_block_valid
|
||||
drm_encoder_cleanup
|
||||
drm_encoder_init
|
||||
drm_flip_work_cleanup
|
||||
@ -1812,6 +1812,7 @@
|
||||
|
||||
# preserved by --additions-only
|
||||
drm_connector_init_with_ddc
|
||||
drm_edid_block_valid
|
||||
gpiod_direction_input
|
||||
idr_alloc_u32
|
||||
of_clk_get_by_name
|
||||
|
@ -2607,6 +2607,7 @@
|
||||
__traceiter_android_vh_show_suspend_epoch_val
|
||||
__traceiter_android_vh_subpage_dma_contig_alloc
|
||||
__traceiter_android_vh_timer_calc_index
|
||||
__traceiter_android_vh_try_fixup_sea
|
||||
__traceiter_android_vh_ufs_check_int_errors
|
||||
__traceiter_android_vh_ufs_clock_scaling
|
||||
__traceiter_android_vh_ufs_compl_command
|
||||
@ -2735,6 +2736,7 @@
|
||||
__tracepoint_android_vh_show_suspend_epoch_val
|
||||
__tracepoint_android_vh_subpage_dma_contig_alloc
|
||||
__tracepoint_android_vh_timer_calc_index
|
||||
__tracepoint_android_vh_try_fixup_sea
|
||||
__tracepoint_android_vh_ufs_check_int_errors
|
||||
__tracepoint_android_vh_ufs_clock_scaling
|
||||
__tracepoint_android_vh_ufs_compl_command
|
||||
|
@ -61,7 +61,7 @@ struct rt_sigframe {
|
||||
unsigned int sigret_magic;
|
||||
};
|
||||
|
||||
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
#else
|
||||
v2abi.r58 = v2abi.r59 = 0;
|
||||
#endif
|
||||
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
|
||||
err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
|
@ -347,6 +347,7 @@ usb: target-module@47400000 {
|
||||
<SYSC_IDLE_NO>,
|
||||
<SYSC_IDLE_SMART>,
|
||||
<SYSC_IDLE_SMART_WKUP>;
|
||||
ti,sysc-delay-us = <2>;
|
||||
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
|
@ -760,7 +760,7 @@ pwrkey@1c {
|
||||
|
||||
xoadc: xoadc@197 {
|
||||
compatible = "qcom,pm8921-adc";
|
||||
reg = <197>;
|
||||
reg = <0x197>;
|
||||
interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <0>;
|
||||
|
@ -3,6 +3,7 @@
|
||||
menuconfig ARCH_DAVINCI
|
||||
bool "TI DaVinci"
|
||||
depends on ARCH_MULTI_V5
|
||||
select CPU_ARM926T
|
||||
select DAVINCI_TIMER
|
||||
select ZONE_DMA
|
||||
select PM_GENERIC_DOMAINS if PM
|
||||
|
@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
|
||||
|
||||
soc_dev_attr->machine = soc_name;
|
||||
soc_dev_attr->family = omap_get_family();
|
||||
if (!soc_dev_attr->family) {
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
soc_dev_attr->revision = soc_rev;
|
||||
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
|
||||
|
||||
soc_dev = soc_device_register(soc_dev_attr);
|
||||
if (IS_ERR(soc_dev)) {
|
||||
kfree(soc_dev_attr->family);
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
|
@ -804,16 +804,16 @@ static int __init sunxi_mc_smp_init(void)
|
||||
for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
|
||||
ret = of_property_match_string(node, "enable-method",
|
||||
sunxi_mc_smp_data[i].enable_method);
|
||||
if (!ret)
|
||||
if (ret >= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
is_a83t = sunxi_mc_smp_data[i].is_a83t;
|
||||
|
||||
of_node_put(node);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return -ENODEV;
|
||||
|
||||
is_a83t = sunxi_mc_smp_data[i].is_a83t;
|
||||
|
||||
if (!sunxi_mc_smp_cpu_table_init())
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -129,7 +129,7 @@ rtc@6f {
|
||||
compatible = "microchip,mcp7940x";
|
||||
reg = <0x6f>;
|
||||
interrupt-parent = <&gpiosb>;
|
||||
interrupts = <5 0>; /* GPIO2_5 */
|
||||
interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -38,8 +38,8 @@ leds {
|
||||
user4 {
|
||||
label = "green:user4";
|
||||
gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
|
||||
linux,default-trigger = "panic-indicator";
|
||||
default-state = "off";
|
||||
panic-indicator;
|
||||
};
|
||||
|
||||
wlan {
|
||||
|
@ -55,8 +55,8 @@ leds {
|
||||
user4 {
|
||||
label = "green:user4";
|
||||
gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
|
||||
linux,default-trigger = "panic-indicator";
|
||||
default-state = "off";
|
||||
panic-indicator;
|
||||
};
|
||||
|
||||
wlan {
|
||||
|
@ -865,7 +865,7 @@ dss: dss@4a00000 {
|
||||
assigned-clocks = <&k3_clks 67 2>;
|
||||
assigned-clock-parents = <&k3_clks 67 5>;
|
||||
|
||||
interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
status = "disabled";
|
||||
|
||||
|
@ -478,6 +478,7 @@ CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_OTG=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PCI_RENESAS=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
|
||||
irq = __vgic_its_check_cache(dist, db, devid, eventid);
|
||||
if (irq)
|
||||
vgic_get_irq_kref(irq);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
return irq;
|
||||
@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->pending_latch = true;
|
||||
vgic_queue_irq_unlock(kvm, irq, flags);
|
||||
vgic_put_irq(kvm, irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -356,19 +356,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
if (test_bit(i, &val)) {
|
||||
/*
|
||||
* pending_latch is set irrespective of irq type
|
||||
* (level or edge) to avoid dependency that VM should
|
||||
* restore irq config before pending info.
|
||||
*/
|
||||
irq->pending_latch = true;
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
} else {
|
||||
|
||||
/*
|
||||
* pending_latch is set irrespective of irq type
|
||||
* (level or edge) to avoid dependency that VM should
|
||||
* restore irq config before pending info.
|
||||
*/
|
||||
irq->pending_latch = test_bit(i, &val);
|
||||
|
||||
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||
irq_set_irqchip_state(irq->host_irq,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
irq->pending_latch);
|
||||
irq->pending_latch = false;
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
}
|
||||
|
||||
if (irq->pending_latch)
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
else
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
|
@ -728,6 +728,11 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf;
|
||||
unsigned long siaddr;
|
||||
bool can_fixup = false;
|
||||
|
||||
trace_android_vh_try_fixup_sea(far, esr, regs, &can_fixup);
|
||||
if (can_fixup && fixup_exception(regs))
|
||||
return 0;
|
||||
|
||||
inf = esr_to_fault_info(esr);
|
||||
|
||||
|
@ -468,7 +468,6 @@ config MACH_LOONGSON2EF
|
||||
|
||||
config MACH_LOONGSON64
|
||||
bool "Loongson 64-bit family of machines"
|
||||
select ARCH_DMA_DEFAULT_COHERENT
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
@ -1380,7 +1379,6 @@ config CPU_LOONGSON64
|
||||
select CPU_SUPPORTS_MSA
|
||||
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select DMA_NONCOHERENT
|
||||
select WEAK_ORDERING
|
||||
select WEAK_REORDERING_BEYOND_LLSC
|
||||
select MIPS_ASID_BITS_VARIABLE
|
||||
|
@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
|
||||
i2c_register_board_info(0, db1200_i2c_devs,
|
||||
ARRAY_SIZE(db1200_i2c_devs));
|
||||
spi_register_board_info(db1200_spi_devs,
|
||||
ARRAY_SIZE(db1200_i2c_devs));
|
||||
ARRAY_SIZE(db1200_spi_devs));
|
||||
|
||||
/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
|
||||
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
|
||||
|
@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
|
||||
i2c_register_board_info(0, db1550_i2c_devs,
|
||||
ARRAY_SIZE(db1550_i2c_devs));
|
||||
spi_register_board_info(db1550_spi_devs,
|
||||
ARRAY_SIZE(db1550_i2c_devs));
|
||||
ARRAY_SIZE(db1550_spi_devs));
|
||||
|
||||
c = clk_get(NULL, "psc0_intclk");
|
||||
if (!IS_ERR(c)) {
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#define dmi_early_remap(x, l) ioremap_cache(x, l)
|
||||
#define dmi_early_remap(x, l) ioremap(x, l)
|
||||
#define dmi_early_unmap(x, l) iounmap(x)
|
||||
#define dmi_remap(x, l) ioremap_cache(x, l)
|
||||
#define dmi_unmap(x) iounmap(x)
|
||||
|
@ -117,8 +117,7 @@ struct irq_source_routing_table {
|
||||
u64 pci_io_start_addr;
|
||||
u64 pci_io_end_addr;
|
||||
u64 pci_config_addr;
|
||||
u16 dma_mask_bits;
|
||||
u16 dma_noncoherent;
|
||||
u32 dma_mask_bits;
|
||||
} __packed;
|
||||
|
||||
struct interface_info {
|
||||
|
@ -322,11 +322,11 @@ static void __init bootmem_init(void)
|
||||
panic("Incorrect memory mapping !!!");
|
||||
|
||||
if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
|
||||
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
highstart_pfn = PFN_DOWN(HIGHMEM_START);
|
||||
highstart_pfn = max_low_pfn;
|
||||
highend_pfn = max_pfn;
|
||||
#else
|
||||
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
||||
max_pfn = max_low_pfn;
|
||||
#endif
|
||||
}
|
||||
|
@ -13,8 +13,6 @@
|
||||
* Copyright (C) 2009 Lemote Inc.
|
||||
* Author: Wu Zhangjin, wuzhangjin@gmail.com
|
||||
*/
|
||||
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <asm/bootinfo.h>
|
||||
@ -133,14 +131,8 @@ void __init prom_init_env(void)
|
||||
loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
|
||||
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
|
||||
if (loongson_sysconf.dma_mask_bits < 32 ||
|
||||
loongson_sysconf.dma_mask_bits > 64) {
|
||||
loongson_sysconf.dma_mask_bits > 64)
|
||||
loongson_sysconf.dma_mask_bits = 32;
|
||||
dma_default_coherent = true;
|
||||
} else {
|
||||
dma_default_coherent = !eirq_source->dma_noncoherent;
|
||||
}
|
||||
|
||||
pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
|
||||
|
||||
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
|
||||
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
|
||||
|
@ -38,14 +38,6 @@ extern char start_virt_trampolines[];
|
||||
extern char end_virt_trampolines[];
|
||||
#endif
|
||||
|
||||
static inline int in_kernel_text(unsigned long addr)
|
||||
{
|
||||
if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_toc_addr(void)
|
||||
{
|
||||
/* Defined by the linker, see vmlinux.lds.S */
|
||||
|
@ -58,10 +58,10 @@ _GLOBAL(ppc_save_regs)
|
||||
lbz r0,PACAIRQSOFTMASK(r13)
|
||||
PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
|
||||
#endif
|
||||
/* go up one stack frame for SP */
|
||||
PPC_LL r4,0(r1)
|
||||
PPC_STL r4,1*SZL(r3)
|
||||
/* store current SP */
|
||||
PPC_STL r1,1*SZL(r3)
|
||||
/* get caller's LR */
|
||||
PPC_LL r4,0(r1)
|
||||
PPC_LL r0,LRSAVE(r4)
|
||||
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
|
||||
mflr r0
|
||||
|
@ -38,7 +38,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
||||
# so it is only needed for modules, and only for older linkers which
|
||||
# do not support --save-restore-funcs
|
||||
ifeq ($(call ld-ifversion, -lt, 22500, y),y)
|
||||
extra-$(CONFIG_PPC64) += crtsavres.o
|
||||
always-$(CONFIG_PPC64) += crtsavres.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
|
||||
|
@ -292,6 +292,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
|
||||
attr_group->attrs = attrs;
|
||||
do {
|
||||
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
|
||||
if (!ev_val_str)
|
||||
continue;
|
||||
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
|
||||
if (!dev_str)
|
||||
continue;
|
||||
@ -299,6 +301,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
|
||||
attrs[j++] = dev_str;
|
||||
if (pmu->events[i].scale) {
|
||||
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
|
||||
if (!ev_scale_str)
|
||||
continue;
|
||||
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
|
||||
if (!dev_str)
|
||||
continue;
|
||||
@ -308,6 +312,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
|
||||
|
||||
if (pmu->events[i].unit) {
|
||||
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
|
||||
if (!ev_unit_str)
|
||||
continue;
|
||||
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
|
||||
if (!dev_str)
|
||||
continue;
|
||||
|
@ -177,6 +177,7 @@ config ISS4xx
|
||||
config CURRITUCK
|
||||
bool "IBM Currituck (476fpe) Support"
|
||||
depends on PPC_47x
|
||||
select I2C
|
||||
select SWIOTLB
|
||||
select 476FPE
|
||||
select FORCE_PCI
|
||||
|
@ -278,6 +278,8 @@ int __init opal_event_init(void)
|
||||
else
|
||||
name = kasprintf(GFP_KERNEL, "opal");
|
||||
|
||||
if (!name)
|
||||
continue;
|
||||
/* Install interrupt handler */
|
||||
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
|
||||
name, NULL);
|
||||
|
@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
|
||||
|
||||
j = 0;
|
||||
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
|
||||
if (!pcaps[i].pg.name) {
|
||||
kfree(pcaps[i].pattrs);
|
||||
kfree(pcaps[i].pg.attrs);
|
||||
goto out_pcaps_pattrs;
|
||||
}
|
||||
|
||||
if (has_min) {
|
||||
powercap_add_attr(min, "powercap-min",
|
||||
&pcaps[i].pattrs[j]);
|
||||
|
@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
|
||||
ent->chip = chip;
|
||||
snprintf(ent->name, 16, "%08x", chip);
|
||||
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
|
||||
if (!ent->path.data) {
|
||||
kfree(ent);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ent->path.size = strlen((char *)ent->path.data);
|
||||
|
||||
dir = debugfs_create_dir(ent->name, root);
|
||||
|
@ -481,7 +481,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
|
||||
int lmb_found;
|
||||
int rc;
|
||||
|
||||
pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
|
||||
pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
|
||||
|
||||
lmb_found = 0;
|
||||
for_each_drmem_lmb(lmb) {
|
||||
@ -495,14 +495,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
|
||||
}
|
||||
}
|
||||
|
||||
if (!lmb_found)
|
||||
if (!lmb_found) {
|
||||
pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
|
||||
rc = -EINVAL;
|
||||
|
||||
if (rc)
|
||||
pr_info("Failed to hot-remove memory at %llx\n",
|
||||
lmb->base_addr);
|
||||
else
|
||||
pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
|
||||
} else if (rc) {
|
||||
pr_debug("Failed to hot-remove memory at %llx\n",
|
||||
lmb->base_addr);
|
||||
} else {
|
||||
pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -719,8 +720,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
|
||||
if (!drmem_lmb_reserved(lmb))
|
||||
continue;
|
||||
|
||||
pr_info("Memory at %llx (drc index %x) was hot-added\n",
|
||||
lmb->base_addr, lmb->drc_index);
|
||||
pr_debug("Memory at %llx (drc index %x) was hot-added\n",
|
||||
lmb->base_addr, lmb->drc_index);
|
||||
drmem_remove_lmb_reservation(lmb);
|
||||
}
|
||||
rc = 0;
|
||||
|
@ -76,7 +76,7 @@ static inline int test_fp_ctl(u32 fpc)
|
||||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
|
||||
|
||||
struct kernel_fpu;
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
/* I/O size constraints */
|
||||
#define ZPCI_MAX_READ_SIZE 8
|
||||
#define ZPCI_MAX_WRITE_SIZE 128
|
||||
#define ZPCI_BOUNDARY_SIZE (1 << 12)
|
||||
#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
|
||||
|
||||
/* I/O Map */
|
||||
#define ZPCI_IOMAP_SHIFT 48
|
||||
@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
|
||||
int zpci_write_block(volatile void __iomem *dst, const void *src,
|
||||
unsigned long len);
|
||||
|
||||
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
|
||||
static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
|
||||
{
|
||||
int count = len > max ? max : len, size = 1;
|
||||
int offset = dst & ZPCI_BOUNDARY_MASK;
|
||||
int size;
|
||||
|
||||
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
|
||||
dst = dst >> 1;
|
||||
src = src >> 1;
|
||||
size = size << 1;
|
||||
}
|
||||
return size;
|
||||
size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
|
||||
if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
|
||||
return size;
|
||||
|
||||
if (size >= 8)
|
||||
return 8;
|
||||
return rounddown_pow_of_two(size);
|
||||
}
|
||||
|
||||
static inline int zpci_memcpy_fromio(void *dst,
|
||||
@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
|
||||
int size, rc = 0;
|
||||
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) src,
|
||||
(u64) dst, n,
|
||||
ZPCI_MAX_READ_SIZE);
|
||||
size = zpci_get_max_io_size((u64 __force) src,
|
||||
(u64) dst, n,
|
||||
ZPCI_MAX_READ_SIZE);
|
||||
rc = zpci_read_single(dst, src, size);
|
||||
if (rc)
|
||||
break;
|
||||
@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
|
||||
return -EINVAL;
|
||||
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) dst,
|
||||
(u64) src, n,
|
||||
ZPCI_MAX_WRITE_SIZE);
|
||||
size = zpci_get_max_io_size((u64 __force) dst,
|
||||
(u64) src, n,
|
||||
ZPCI_MAX_WRITE_SIZE);
|
||||
if (size > 8) /* main path */
|
||||
rc = zpci_write_block(dst, src, size);
|
||||
else
|
||||
|
@ -100,9 +100,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
|
||||
|
||||
old_fs = enable_sacf_uaccess();
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) dst,
|
||||
(u64 __force) src, n,
|
||||
ZPCI_MAX_WRITE_SIZE);
|
||||
size = zpci_get_max_io_size((u64 __force) dst,
|
||||
(u64 __force) src, n,
|
||||
ZPCI_MAX_WRITE_SIZE);
|
||||
if (size > 8) /* main path */
|
||||
rc = __pcistb_mio_inuser(dst, src, size, &status);
|
||||
else
|
||||
@ -252,9 +252,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
|
||||
|
||||
old_fs = enable_sacf_uaccess();
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) src,
|
||||
(u64 __force) dst, n,
|
||||
ZPCI_MAX_READ_SIZE);
|
||||
size = zpci_get_max_io_size((u64 __force) src,
|
||||
(u64 __force) dst, n,
|
||||
ZPCI_MAX_READ_SIZE);
|
||||
rc = __pcilg_mio_inuser(dst, src, size, &status);
|
||||
if (rc)
|
||||
break;
|
||||
|
@ -429,6 +429,7 @@ CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PCI_RENESAS=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
@ -1080,8 +1080,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
|
||||
} else {
|
||||
local_irq_save(flags);
|
||||
memcpy(addr, opcode, len);
|
||||
local_irq_restore(flags);
|
||||
sync_core();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* Could also do a CLFLUSH here to speed up CPU recovery; but
|
||||
|
@ -569,7 +569,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
|
||||
|
||||
int3_emulate_call(regs, regs_get_register(regs, offs));
|
||||
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
|
||||
int3_emulate_jmp(regs, regs_get_register(regs, offs));
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
|
||||
|
||||
|
@ -24,8 +24,8 @@
|
||||
|
||||
static int kvmclock __initdata = 1;
|
||||
static int kvmclock_vsyscall __initdata = 1;
|
||||
static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
|
||||
static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
|
||||
static int msr_kvm_system_time __ro_after_init;
|
||||
static int msr_kvm_wall_clock __ro_after_init;
|
||||
static u64 kvm_sched_clock_offset __ro_after_init;
|
||||
|
||||
static int __init parse_no_kvmclock(char *arg)
|
||||
@ -196,7 +196,8 @@ static void kvm_setup_secondary_clock(void)
|
||||
|
||||
void kvmclock_disable(void)
|
||||
{
|
||||
native_write_msr(msr_kvm_system_time, 0, 0);
|
||||
if (msr_kvm_system_time)
|
||||
native_write_msr(msr_kvm_system_time, 0, 0);
|
||||
}
|
||||
|
||||
static void __init kvmclock_init_mem(void)
|
||||
@ -292,7 +293,10 @@ void __init kvmclock_init(void)
|
||||
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
|
||||
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
|
||||
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
|
||||
} else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
|
||||
} else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
|
||||
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
||||
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
*/
|
||||
int num_digits(int val)
|
||||
{
|
||||
int m = 10;
|
||||
long long m = 10;
|
||||
int d = 1;
|
||||
|
||||
if (val < 0) {
|
||||
|
@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When LTO is enabled, llvm emits many text sections, which is not supported
|
||||
# by kexec. Remove -flto=* flags.
|
||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
|
||||
|
@ -549,8 +549,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags->sb.shift;
|
||||
|
||||
dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
|
||||
dd->async_depth = max(1U, 3 * (1U << shift) / 4);
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
|
||||
}
|
||||
|
@ -7,6 +7,8 @@ FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment
|
||||
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
|
||||
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
|
||||
|
||||
DTC_FLAGS="${DTC_FLAGS} -@"
|
||||
|
||||
MAKE_GOALS="${MAKE_GOALS}
|
||||
qcom/sdm845-db845c.dtb
|
||||
Image.gz
|
||||
|
@ -1039,9 +1039,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
|
||||
void af_alg_free_resources(struct af_alg_async_req *areq)
|
||||
{
|
||||
struct sock *sk = areq->sk;
|
||||
struct af_alg_ctx *ctx;
|
||||
|
||||
af_alg_free_areq_sgls(areq);
|
||||
sock_kfree_s(sk, areq, areq->areqlen);
|
||||
|
||||
ctx = alg_sk(sk)->private;
|
||||
ctx->inflight = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_free_resources);
|
||||
|
||||
@ -1105,11 +1109,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
|
||||
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
|
||||
unsigned int areqlen)
|
||||
{
|
||||
struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
|
||||
struct af_alg_ctx *ctx = alg_sk(sk)->private;
|
||||
struct af_alg_async_req *areq;
|
||||
|
||||
/* Only one AIO request can be in flight. */
|
||||
if (ctx->inflight)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
|
||||
if (unlikely(!areq))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx->inflight = true;
|
||||
|
||||
areq->areqlen = areqlen;
|
||||
areq->sk = sk;
|
||||
areq->last_rsgl = NULL;
|
||||
|
@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
struct crypto_scomp *scomp = *tfm_ctx;
|
||||
void **ctx = acomp_request_ctx(req);
|
||||
struct scomp_scratch *scratch;
|
||||
unsigned int dlen;
|
||||
int ret;
|
||||
|
||||
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
|
||||
@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
|
||||
req->dlen = SCOMP_SCRATCH_SIZE;
|
||||
|
||||
dlen = req->dlen;
|
||||
|
||||
scratch = raw_cpu_ptr(&scomp_scratch);
|
||||
spin_lock(&scratch->lock);
|
||||
|
||||
@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else if (req->dlen > dlen) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
|
||||
1);
|
||||
|
@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
||||
static u32 err_seq;
|
||||
|
||||
estatus = extlog_elog_entry_check(cpu, bank);
|
||||
if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
|
||||
if (!estatus)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (mce->kflags & MCE_HANDLED_CEC) {
|
||||
estatus->block_status = 0;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
|
||||
/* clear record status to enable BIOS to update it again */
|
||||
estatus->block_status = 0;
|
||||
|
@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
|
||||
struct acpi_lpit_native *lpit_native)
|
||||
{
|
||||
info->frequency = lpit_native->counter_frequency ?
|
||||
lpit_native->counter_frequency : tsc_khz * 1000;
|
||||
lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
|
||||
if (!info->frequency)
|
||||
info->frequency = 1;
|
||||
|
||||
|
@ -1788,12 +1788,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
|
||||
return;
|
||||
count++;
|
||||
|
||||
acpi_get_parent(device->dev->handle, &acpi_parent);
|
||||
|
||||
pdev = acpi_get_pci_dev(acpi_parent);
|
||||
if (pdev) {
|
||||
parent = &pdev->dev;
|
||||
pci_dev_put(pdev);
|
||||
if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
|
||||
pdev = acpi_get_pci_dev(acpi_parent);
|
||||
if (pdev) {
|
||||
parent = &pdev->dev;
|
||||
pci_dev_put(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
memset(&props, 0, sizeof(struct backlight_properties));
|
||||
|
@ -639,6 +639,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
|
||||
* @index: Index of the reference to return
|
||||
* @num_args: Maximum number of arguments after each reference
|
||||
* @args: Location to store the returned reference with optional arguments
|
||||
* (may be NULL)
|
||||
*
|
||||
* Find property with @name, verifify that it is a package containing at least
|
||||
* one object reference and if so, store the ACPI device object pointer to the
|
||||
@ -697,6 +698,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||
if (ret)
|
||||
return ret == -ENODEV ? -EINVAL : ret;
|
||||
|
||||
if (!args)
|
||||
return 0;
|
||||
|
||||
args->fwnode = acpi_fwnode_handle(device);
|
||||
args->nargs = 0;
|
||||
return 0;
|
||||
|
@ -455,6 +455,13 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
|
@ -1800,8 +1800,10 @@ static size_t binder_get_object(struct binder_proc *proc,
|
||||
size_t object_size = 0;
|
||||
|
||||
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
||||
if (offset > buffer->data_size || read_size < sizeof(*hdr))
|
||||
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
|
||||
!IS_ALIGNED(offset, sizeof(u32)))
|
||||
return 0;
|
||||
|
||||
if (u) {
|
||||
if (copy_from_user(object, u + offset, read_size))
|
||||
return 0;
|
||||
@ -5111,7 +5113,7 @@ static __poll_t binder_poll(struct file *filp,
|
||||
|
||||
thread = binder_get_thread(proc);
|
||||
if (!thread)
|
||||
return POLLERR;
|
||||
return EPOLLERR;
|
||||
|
||||
binder_inner_proc_lock(thread->proc);
|
||||
thread->looper |= BINDER_LOOPER_STATE_POLL;
|
||||
|
@ -272,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
}
|
||||
if (mm) {
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
mmput_async(mm);
|
||||
}
|
||||
return 0;
|
||||
|
||||
@ -305,7 +305,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
err_no_vma:
|
||||
if (mm) {
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
mmput_async(mm);
|
||||
}
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
@ -360,8 +360,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
continue;
|
||||
if (!buffer->async_transaction)
|
||||
continue;
|
||||
total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
|
||||
+ sizeof(struct binder_buffer);
|
||||
total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
|
||||
num_buffers++;
|
||||
}
|
||||
|
||||
@ -422,6 +421,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
alloc->pid, extra_buffers_size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
/* Pad 0-size buffers so they get assigned unique addresses */
|
||||
size = max(size, sizeof(void *));
|
||||
|
||||
trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
|
||||
if (is_async &&
|
||||
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
|
||||
@ -431,9 +433,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
|
||||
/* Pad 0-size buffers so they get assigned unique addresses */
|
||||
size = max(size, sizeof(void *));
|
||||
|
||||
while (n) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
BUG_ON(!buffer->free);
|
||||
@ -535,7 +534,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
buffer->pid = pid;
|
||||
buffer->oneway_spam_suspect = false;
|
||||
if (is_async) {
|
||||
alloc->free_async_space -= size + sizeof(struct binder_buffer);
|
||||
alloc->free_async_space -= size;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
|
||||
"%d: binder_alloc_buf size %zd async free %zd\n",
|
||||
alloc->pid, size, alloc->free_async_space);
|
||||
@ -573,7 +572,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
* is the sum of the three given sizes (each rounded up to
|
||||
* pointer-sized boundary)
|
||||
*
|
||||
* Return: The allocated buffer or %NULL if error
|
||||
* Return: The allocated buffer or %ERR_PTR(-errno) if error
|
||||
*/
|
||||
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
@ -673,8 +672,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
||||
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
|
||||
|
||||
if (buffer->async_transaction) {
|
||||
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
|
||||
|
||||
alloc->free_async_space += buffer_size;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
|
||||
"%d: binder_free_buf size %zd async free %zd\n",
|
||||
alloc->pid, size, alloc->free_async_space);
|
||||
@ -722,7 +720,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
|
||||
/*
|
||||
* We could eliminate the call to binder_alloc_clear_buf()
|
||||
* from binder_alloc_deferred_release() by moving this to
|
||||
* binder_alloc_free_buf_locked(). However, that could
|
||||
* binder_free_buf_locked(). However, that could
|
||||
* increase contention for the alloc mutex if clear_on_free
|
||||
* is used frequently for large buffers. The mutex is not
|
||||
* needed for correctness here.
|
||||
@ -1013,7 +1011,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
goto err_mmget;
|
||||
if (!mmap_read_trylock(mm))
|
||||
goto err_mmap_read_lock_failed;
|
||||
vma = binder_alloc_get_vma(alloc);
|
||||
vma = find_vma(mm, page_addr);
|
||||
if (vma && vma != binder_alloc_get_vma(alloc))
|
||||
goto err_invalid_vma;
|
||||
|
||||
list_lru_isolate(lru, item);
|
||||
spin_unlock(lock);
|
||||
@ -1039,6 +1039,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
mutex_unlock(&alloc->mutex);
|
||||
return LRU_REMOVED_RETRY;
|
||||
|
||||
err_invalid_vma:
|
||||
mmap_read_unlock(mm);
|
||||
err_mmap_read_lock_failed:
|
||||
mmput_async(mm);
|
||||
err_mmget:
|
||||
|
@ -219,6 +219,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_fixup_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
|
||||
|
@ -1479,6 +1479,28 @@ void pm_runtime_enable(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_runtime_enable);
|
||||
|
||||
static void pm_runtime_disable_action(void *data)
|
||||
{
|
||||
pm_runtime_dont_use_autosuspend(data);
|
||||
pm_runtime_disable(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
|
||||
*
|
||||
* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
|
||||
* you at driver exit time if needed.
|
||||
*
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
int devm_pm_runtime_enable(struct device *dev)
|
||||
{
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
|
||||
|
||||
/**
|
||||
* pm_runtime_forbid - Block runtime PM of a device.
|
||||
* @dev: Device to handle.
|
||||
|
@ -544,6 +544,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
|
||||
if (nargs > NR_FWNODE_REFERENCE_ARGS)
|
||||
return -EINVAL;
|
||||
|
||||
if (!args)
|
||||
return 0;
|
||||
|
||||
args->fwnode = software_node_get(refnode);
|
||||
args->nargs = nargs;
|
||||
|
||||
|
@ -471,7 +471,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
|
||||
return data;
|
||||
}
|
||||
|
||||
static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||
static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||
{
|
||||
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
|
||||
const unsigned char *p_left = data, *p_h4;
|
||||
@ -510,25 +510,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||
bt_dev_err(bdev->hdev,
|
||||
"Frame reassembly failed (%d)", err);
|
||||
bdev->rx_skb = NULL;
|
||||
return err;
|
||||
return;
|
||||
}
|
||||
|
||||
sz_left -= sz_h4;
|
||||
p_left += sz_h4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
|
||||
size_t count)
|
||||
{
|
||||
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
|
||||
int err;
|
||||
|
||||
err = btmtkuart_recv(bdev->hdev, data, count);
|
||||
if (err < 0)
|
||||
return err;
|
||||
btmtkuart_recv(bdev->hdev, data, count);
|
||||
|
||||
bdev->hdev->stat.byte_rx += count;
|
||||
|
||||
|
@ -2093,13 +2093,23 @@ static int sysc_reset(struct sysc *ddata)
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
/* Flush posted write */
|
||||
|
||||
/*
|
||||
* Some devices need a delay before reading registers
|
||||
* after reset. Presumably a srst_udelay is not needed
|
||||
* for devices that use a rstctrl register reset.
|
||||
*/
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
/*
|
||||
* Flush posted write. For devices needing srst_udelay
|
||||
* this should trigger an interconnect error if the
|
||||
* srst_udelay value is needed but not configured.
|
||||
*/
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
}
|
||||
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
if (ddata->post_reset_quirk)
|
||||
ddata->post_reset_quirk(ddata);
|
||||
|
||||
|
@ -888,10 +888,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
r[0] = r_div ? (r_div & 0xff) : 1;
|
||||
r[1] = (r_div >> 8) & 0xff;
|
||||
r[2] = (r_div >> 16) & 0xff;
|
||||
err = regmap_bulk_write(output->data->regmap,
|
||||
return regmap_bulk_write(output->data->regmap,
|
||||
SI5341_OUT_R_REG(output), r, 3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
|
||||
|
@ -4579,6 +4579,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
|
||||
if (!ret) {
|
||||
devres->clk = clk;
|
||||
devres->nb = nb;
|
||||
devres_add(dev, devres);
|
||||
} else {
|
||||
devres_free(devres);
|
||||
}
|
||||
|
@ -38,8 +38,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
|
||||
.config_ctl_hi_val = 0x00002267,
|
||||
.config_ctl_hi1_val = 0x00000024,
|
||||
.test_ctl_val = 0x00000000,
|
||||
.test_ctl_hi_val = 0x00000002,
|
||||
.test_ctl_hi1_val = 0x00000000,
|
||||
.test_ctl_hi_val = 0x00000000,
|
||||
.test_ctl_hi1_val = 0x00000020,
|
||||
.user_ctl_val = 0x00000000,
|
||||
.user_ctl_hi_val = 0x00000805,
|
||||
.user_ctl_hi1_val = 0x000000d0,
|
||||
|
@ -37,6 +37,7 @@ static struct alpha_pll_config video_pll0_config = {
|
||||
.config_ctl_val = 0x20485699,
|
||||
.config_ctl_hi_val = 0x00002267,
|
||||
.config_ctl_hi1_val = 0x00000024,
|
||||
.test_ctl_hi1_val = 0x00000020,
|
||||
.user_ctl_val = 0x00000000,
|
||||
.user_ctl_hi_val = 0x00000805,
|
||||
.user_ctl_hi1_val = 0x000000D0,
|
||||
@ -218,6 +219,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
|
||||
|
||||
static const struct qcom_reset_map video_cc_sm8150_resets[] = {
|
||||
[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
|
||||
[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
|
||||
[VIDEO_CC_MVS0_BCR] = { 0x870 },
|
||||
[VIDEO_CC_MVS1_BCR] = { 0x8b0 },
|
||||
[VIDEO_CC_MVSC_BCR] = { 0x810 },
|
||||
};
|
||||
|
||||
static const struct qcom_cc_desc video_cc_sm8150_desc = {
|
||||
|
@ -489,7 +489,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
||||
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
|
||||
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
|
||||
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
|
||||
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
||||
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
|
||||
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
|
||||
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
|
||||
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
|
||||
|
@ -83,7 +83,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
||||
static const struct clk_ops zynqmp_clk_mux_ops = {
|
||||
.get_parent = zynqmp_clk_mux_get_parent,
|
||||
.set_parent = zynqmp_clk_mux_set_parent,
|
||||
.determine_rate = __clk_mux_determine_rate,
|
||||
.determine_rate = __clk_mux_determine_rate_closest,
|
||||
};
|
||||
|
||||
static const struct clk_ops zynqmp_clk_mux_ro_ops = {
|
||||
|
@ -109,49 +109,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
|
||||
return DIV_ROUND_UP_ULL(parent_rate, value);
|
||||
}
|
||||
|
||||
static void zynqmp_get_divider2_val(struct clk_hw *hw,
|
||||
unsigned long rate,
|
||||
struct zynqmp_clk_divider *divider,
|
||||
int *bestdiv)
|
||||
{
|
||||
int div1;
|
||||
int div2;
|
||||
long error = LONG_MAX;
|
||||
unsigned long div1_prate;
|
||||
struct clk_hw *div1_parent_hw;
|
||||
struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
|
||||
struct zynqmp_clk_divider *pdivider =
|
||||
to_zynqmp_clk_divider(div2_parent_hw);
|
||||
|
||||
if (!pdivider)
|
||||
return;
|
||||
|
||||
div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
|
||||
if (!div1_parent_hw)
|
||||
return;
|
||||
|
||||
div1_prate = clk_hw_get_rate(div1_parent_hw);
|
||||
*bestdiv = 1;
|
||||
for (div1 = 1; div1 <= pdivider->max_div;) {
|
||||
for (div2 = 1; div2 <= divider->max_div;) {
|
||||
long new_error = ((div1_prate / div1) / div2) - rate;
|
||||
|
||||
if (abs(new_error) < abs(error)) {
|
||||
*bestdiv = div2;
|
||||
error = new_error;
|
||||
}
|
||||
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
|
||||
div2 = div2 << 1;
|
||||
else
|
||||
div2++;
|
||||
}
|
||||
if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
|
||||
div1 = div1 << 1;
|
||||
else
|
||||
div1++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
|
||||
* @hw: handle between common and hardware-specific interfaces
|
||||
@ -170,6 +127,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
|
||||
u32 div_type = divider->div_type;
|
||||
u32 bestdiv;
|
||||
int ret;
|
||||
u8 width;
|
||||
|
||||
/* if read only, just return current value */
|
||||
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
|
||||
@ -189,23 +147,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
|
||||
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
|
||||
}
|
||||
|
||||
bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
|
||||
width = fls(divider->max_div);
|
||||
|
||||
/*
|
||||
* In case of two divisors, compute best divider values and return
|
||||
* divider2 value based on compute value. div1 will be automatically
|
||||
* set to optimum based on required total divider value.
|
||||
*/
|
||||
if (div_type == TYPE_DIV2 &&
|
||||
(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
|
||||
zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
|
||||
}
|
||||
rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
|
||||
|
||||
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
|
||||
bestdiv = rate % *prate ? 1 : bestdiv;
|
||||
|
||||
bestdiv = min_t(u32, bestdiv, divider->max_div);
|
||||
*prate = rate * bestdiv;
|
||||
if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
|
||||
*prate = rate;
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
|
||||
struct device_node *np = of_cpu_device_node_get(0);
|
||||
bool ret = false;
|
||||
|
||||
if (of_get_property(np, "operating-points-v2", NULL))
|
||||
if (of_property_present(np, "operating-points-v2"))
|
||||
ret = true;
|
||||
|
||||
of_node_put(np);
|
||||
|
@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
|
||||
if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
|
||||
if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
|
||||
return -ENODEV;
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx7ulp")) {
|
||||
|
@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
|
||||
if (of_property_present(dev->of_node, "nvmem-cells")) {
|
||||
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
|
||||
if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
|
||||
if (of_property_present(dev->of_node, "nvmem-cells")) {
|
||||
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -244,8 +244,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
||||
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
/* dummy clock provider as needed by OPP if clocks property is used */
|
||||
if (of_find_property(dev->of_node, "#clock-cells", NULL))
|
||||
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
|
||||
if (of_property_present(dev->of_node, "#clock-cells")) {
|
||||
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
|
||||
|
@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
|
||||
struct device_node *np = of_cpu_device_node_get(0);
|
||||
bool ret = false;
|
||||
|
||||
if (of_get_property(np, "operating-points-v2", NULL))
|
||||
if (of_property_present(np, "operating-points-v2"))
|
||||
ret = true;
|
||||
|
||||
of_node_put(np);
|
||||
|
@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
|
||||
|
||||
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
|
||||
dir);
|
||||
if (dma_mapping_error(wa->dev, wa->dma.address))
|
||||
if (dma_mapping_error(wa->dev, wa->dma.address)) {
|
||||
kfree(wa->address);
|
||||
wa->address = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
wa->dma.length = len;
|
||||
}
|
||||
|
@ -1848,9 +1848,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
|
||||
crypto_aead_set_flags(ctx->fallback.aead,
|
||||
crypto_aead_get_flags(authenc) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_setkey(ctx->fallback.aead, key, keylen);
|
||||
|
||||
return 0;
|
||||
return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
|
||||
}
|
||||
|
||||
static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||
|
@ -43,7 +43,6 @@
|
||||
#define FLAGS_MODE_MASK 0x000f
|
||||
#define FLAGS_ENCRYPT BIT(0)
|
||||
#define FLAGS_CBC BIT(1)
|
||||
#define FLAGS_NEW_KEY BIT(3)
|
||||
|
||||
#define SAHARA_HDR_BASE 0x00800000
|
||||
#define SAHARA_HDR_SKHA_ALG_AES 0
|
||||
@ -141,8 +140,6 @@ struct sahara_hw_link {
|
||||
};
|
||||
|
||||
struct sahara_ctx {
|
||||
unsigned long flags;
|
||||
|
||||
/* AES-specific context */
|
||||
int keylen;
|
||||
u8 key[AES_KEYSIZE_128];
|
||||
@ -151,6 +148,7 @@ struct sahara_ctx {
|
||||
|
||||
struct sahara_aes_reqctx {
|
||||
unsigned long mode;
|
||||
u8 iv_out[AES_BLOCK_SIZE];
|
||||
struct skcipher_request fallback_req; // keep at the end
|
||||
};
|
||||
|
||||
@ -446,27 +444,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
|
||||
int ret;
|
||||
int i, j;
|
||||
int idx = 0;
|
||||
u32 len;
|
||||
|
||||
/* Copy new key if necessary */
|
||||
if (ctx->flags & FLAGS_NEW_KEY) {
|
||||
memcpy(dev->key_base, ctx->key, ctx->keylen);
|
||||
ctx->flags &= ~FLAGS_NEW_KEY;
|
||||
memcpy(dev->key_base, ctx->key, ctx->keylen);
|
||||
|
||||
if (dev->flags & FLAGS_CBC) {
|
||||
dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
|
||||
dev->hw_desc[idx]->p1 = dev->iv_phys_base;
|
||||
} else {
|
||||
dev->hw_desc[idx]->len1 = 0;
|
||||
dev->hw_desc[idx]->p1 = 0;
|
||||
}
|
||||
dev->hw_desc[idx]->len2 = ctx->keylen;
|
||||
dev->hw_desc[idx]->p2 = dev->key_phys_base;
|
||||
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
|
||||
|
||||
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
|
||||
|
||||
idx++;
|
||||
if (dev->flags & FLAGS_CBC) {
|
||||
dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
|
||||
dev->hw_desc[idx]->p1 = dev->iv_phys_base;
|
||||
} else {
|
||||
dev->hw_desc[idx]->len1 = 0;
|
||||
dev->hw_desc[idx]->p1 = 0;
|
||||
}
|
||||
dev->hw_desc[idx]->len2 = ctx->keylen;
|
||||
dev->hw_desc[idx]->p2 = dev->key_phys_base;
|
||||
dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
|
||||
dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
|
||||
|
||||
idx++;
|
||||
|
||||
|
||||
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
|
||||
if (dev->nb_in_sg < 0) {
|
||||
@ -488,24 +483,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
|
||||
DMA_TO_DEVICE);
|
||||
if (ret != dev->nb_in_sg) {
|
||||
dev_err(dev->device, "couldn't map in sg\n");
|
||||
goto unmap_in;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
|
||||
DMA_FROM_DEVICE);
|
||||
if (ret != dev->nb_out_sg) {
|
||||
dev_err(dev->device, "couldn't map out sg\n");
|
||||
goto unmap_out;
|
||||
goto unmap_in;
|
||||
}
|
||||
|
||||
/* Create input links */
|
||||
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
|
||||
sg = dev->in_sg;
|
||||
len = dev->total;
|
||||
for (i = 0; i < dev->nb_in_sg; i++) {
|
||||
dev->hw_link[i]->len = sg->length;
|
||||
dev->hw_link[i]->len = min(len, sg->length);
|
||||
dev->hw_link[i]->p = sg->dma_address;
|
||||
if (i == (dev->nb_in_sg - 1)) {
|
||||
dev->hw_link[i]->next = 0;
|
||||
} else {
|
||||
len -= min(len, sg->length);
|
||||
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
@ -514,12 +512,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
|
||||
/* Create output links */
|
||||
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
|
||||
sg = dev->out_sg;
|
||||
len = dev->total;
|
||||
for (j = i; j < dev->nb_out_sg + i; j++) {
|
||||
dev->hw_link[j]->len = sg->length;
|
||||
dev->hw_link[j]->len = min(len, sg->length);
|
||||
dev->hw_link[j]->p = sg->dma_address;
|
||||
if (j == (dev->nb_out_sg + i - 1)) {
|
||||
dev->hw_link[j]->next = 0;
|
||||
} else {
|
||||
len -= min(len, sg->length);
|
||||
dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
@ -538,9 +538,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_out:
|
||||
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
|
||||
DMA_FROM_DEVICE);
|
||||
unmap_in:
|
||||
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
|
||||
DMA_TO_DEVICE);
|
||||
@ -548,8 +545,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
/* Update IV buffer to contain the last ciphertext block */
|
||||
if (rctx->mode & FLAGS_ENCRYPT) {
|
||||
sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
|
||||
ivsize, req->cryptlen - ivsize);
|
||||
} else {
|
||||
memcpy(req->iv, rctx->iv_out, ivsize);
|
||||
}
|
||||
}
|
||||
|
||||
static int sahara_aes_process(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct sahara_dev *dev = dev_ptr;
|
||||
struct sahara_ctx *ctx;
|
||||
struct sahara_aes_reqctx *rctx;
|
||||
@ -571,8 +584,17 @@ static int sahara_aes_process(struct skcipher_request *req)
|
||||
rctx->mode &= FLAGS_MODE_MASK;
|
||||
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
|
||||
|
||||
if ((dev->flags & FLAGS_CBC) && req->iv)
|
||||
memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
|
||||
if ((dev->flags & FLAGS_CBC) && req->iv) {
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
memcpy(dev->iv_base, req->iv, ivsize);
|
||||
|
||||
if (!(dev->flags & FLAGS_ENCRYPT)) {
|
||||
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
|
||||
rctx->iv_out, ivsize,
|
||||
req->cryptlen - ivsize);
|
||||
}
|
||||
}
|
||||
|
||||
/* assign new context to device */
|
||||
dev->ctx = ctx;
|
||||
@ -585,16 +607,20 @@ static int sahara_aes_process(struct skcipher_request *req)
|
||||
|
||||
timeout = wait_for_completion_timeout(&dev->dma_completion,
|
||||
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
|
||||
if (!timeout) {
|
||||
dev_err(dev->device, "AES timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (!timeout) {
|
||||
dev_err(dev->device, "AES timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if ((dev->flags & FLAGS_CBC) && req->iv)
|
||||
sahara_aes_cbc_update_iv(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -608,7 +634,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
/* SAHARA only supports 128bit keys */
|
||||
if (keylen == AES_KEYSIZE_128) {
|
||||
memcpy(ctx->key, key, keylen);
|
||||
ctx->flags |= FLAGS_NEW_KEY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -624,12 +649,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
return crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
}
|
||||
|
||||
static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
|
||||
if (mode & FLAGS_ENCRYPT)
|
||||
return crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
|
||||
return crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
struct sahara_dev *dev = dev_ptr;
|
||||
int err = 0;
|
||||
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128))
|
||||
return sahara_aes_fallback(req, mode);
|
||||
|
||||
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
|
||||
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
|
||||
|
||||
@ -652,81 +705,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
||||
|
||||
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
return crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
return crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
return sahara_aes_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
return crypto_skcipher_encrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
|
||||
}
|
||||
|
||||
static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct sahara_ctx *ctx = crypto_skcipher_ctx(
|
||||
crypto_skcipher_reqtfm(req));
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
return crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
return sahara_aes_crypt(req, FLAGS_CBC);
|
||||
}
|
||||
|
||||
@ -783,6 +776,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
|
||||
int start)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
unsigned int len;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
@ -804,12 +798,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
|
||||
if (!ret)
|
||||
return -EFAULT;
|
||||
|
||||
len = rctx->total;
|
||||
for (i = start; i < dev->nb_in_sg + start; i++) {
|
||||
dev->hw_link[i]->len = sg->length;
|
||||
dev->hw_link[i]->len = min(len, sg->length);
|
||||
dev->hw_link[i]->p = sg->dma_address;
|
||||
if (i == (dev->nb_in_sg + start - 1)) {
|
||||
dev->hw_link[i]->next = 0;
|
||||
} else {
|
||||
len -= min(len, sg->length);
|
||||
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
@ -890,24 +886,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
|
||||
{
|
||||
if (!sg || !sg->length)
|
||||
return nbytes;
|
||||
|
||||
while (nbytes && sg) {
|
||||
if (nbytes <= sg->length) {
|
||||
sg->length = nbytes;
|
||||
sg_mark_end(sg);
|
||||
break;
|
||||
}
|
||||
nbytes -= sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int sahara_sha_prepare_request(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
@ -944,36 +922,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
|
||||
hash_later, 0);
|
||||
}
|
||||
|
||||
/* nbytes should now be multiple of blocksize */
|
||||
req->nbytes = req->nbytes - hash_later;
|
||||
|
||||
sahara_walk_and_recalc(req->src, req->nbytes);
|
||||
|
||||
rctx->total = len - hash_later;
|
||||
/* have data from previous operation and current */
|
||||
if (rctx->buf_cnt && req->nbytes) {
|
||||
sg_init_table(rctx->in_sg_chain, 2);
|
||||
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
|
||||
|
||||
sg_chain(rctx->in_sg_chain, 2, req->src);
|
||||
|
||||
rctx->total = req->nbytes + rctx->buf_cnt;
|
||||
rctx->in_sg = rctx->in_sg_chain;
|
||||
|
||||
req->src = rctx->in_sg_chain;
|
||||
/* only data from previous operation */
|
||||
} else if (rctx->buf_cnt) {
|
||||
if (req->src)
|
||||
rctx->in_sg = req->src;
|
||||
else
|
||||
rctx->in_sg = rctx->in_sg_chain;
|
||||
/* buf was copied into rembuf above */
|
||||
rctx->in_sg = rctx->in_sg_chain;
|
||||
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
|
||||
rctx->total = rctx->buf_cnt;
|
||||
/* no data from previous operation */
|
||||
} else {
|
||||
rctx->in_sg = req->src;
|
||||
rctx->total = req->nbytes;
|
||||
req->src = rctx->in_sg;
|
||||
}
|
||||
|
||||
/* on next call, we only have the remaining data in the buffer */
|
||||
@ -994,7 +956,10 @@ static int sahara_sha_process(struct ahash_request *req)
|
||||
return ret;
|
||||
|
||||
if (rctx->first) {
|
||||
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
|
||||
ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->hw_desc[0]->next = 0;
|
||||
rctx->first = 0;
|
||||
} else {
|
||||
@ -1002,7 +967,10 @@ static int sahara_sha_process(struct ahash_request *req)
|
||||
|
||||
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
|
||||
dev->hw_desc[0]->next = dev->hw_phys_desc[1];
|
||||
sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
|
||||
ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->hw_desc[1]->next = 0;
|
||||
}
|
||||
|
||||
@ -1015,18 +983,19 @@ static int sahara_sha_process(struct ahash_request *req)
|
||||
|
||||
timeout = wait_for_completion_timeout(&dev->dma_completion,
|
||||
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
|
||||
if (!timeout) {
|
||||
dev_err(dev->device, "SHA timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (rctx->sg_in_idx)
|
||||
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (!timeout) {
|
||||
dev_err(dev->device, "SHA timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
memcpy(rctx->context, dev->context_base, rctx->context_size);
|
||||
|
||||
if (req->result)
|
||||
if (req->result && rctx->last)
|
||||
memcpy(req->result, rctx->context, rctx->digest_size);
|
||||
|
||||
return 0;
|
||||
@ -1170,8 +1139,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
|
||||
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct sahara_sha_reqctx) +
|
||||
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
|
||||
sizeof(struct sahara_sha_reqctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
|
||||
tristate "VirtIO crypto driver"
|
||||
depends on VIRTIO
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AKCIPHER2
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_ENGINE
|
||||
select CRYPTO_RSA
|
||||
select MPILIB
|
||||
help
|
||||
This driver provides support for virtio crypto device. If you
|
||||
choose 'M' here, this module will be called virtio_crypto.
|
||||
|
@ -2,5 +2,6 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
|
||||
virtio_crypto-objs := \
|
||||
virtio_crypto_algs.o \
|
||||
virtio_crypto_akcipher_algs.o \
|
||||
virtio_crypto_mgr.o \
|
||||
virtio_crypto_core.o
|
||||
|
591
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
Normal file
591
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
Normal file
@ -0,0 +1,591 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/* Asymmetric algorithms supported by virtio crypto device
|
||||
*
|
||||
* Authors: zhenwei pi <pizhenwei@bytedance.com>
|
||||
* lei he <helei.sig11@bytedance.com>
|
||||
*
|
||||
* Copyright 2022 Bytedance CO., LTD.
|
||||
*/
|
||||
|
||||
#include <linux/mpi.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/akcipher.h>
|
||||
#include <crypto/internal/rsa.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <uapi/linux/virtio_crypto.h>
|
||||
#include "virtio_crypto_common.h"
|
||||
|
||||
struct virtio_crypto_rsa_ctx {
|
||||
MPI n;
|
||||
};
|
||||
|
||||
struct virtio_crypto_akcipher_ctx {
|
||||
struct crypto_engine_ctx enginectx;
|
||||
struct virtio_crypto *vcrypto;
|
||||
struct crypto_akcipher *tfm;
|
||||
bool session_valid;
|
||||
__u64 session_id;
|
||||
union {
|
||||
struct virtio_crypto_rsa_ctx rsa_ctx;
|
||||
};
|
||||
};
|
||||
|
||||
struct virtio_crypto_akcipher_request {
|
||||
struct virtio_crypto_request base;
|
||||
struct virtio_crypto_akcipher_ctx *akcipher_ctx;
|
||||
struct akcipher_request *akcipher_req;
|
||||
void *src_buf;
|
||||
void *dst_buf;
|
||||
uint32_t opcode;
|
||||
};
|
||||
|
||||
struct virtio_crypto_akcipher_algo {
|
||||
uint32_t algonum;
|
||||
uint32_t service;
|
||||
unsigned int active_devs;
|
||||
struct akcipher_alg algo;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(algs_lock);
|
||||
|
||||
static void virtio_crypto_akcipher_finalize_req(
|
||||
struct virtio_crypto_akcipher_request *vc_akcipher_req,
|
||||
struct akcipher_request *req, int err)
|
||||
{
|
||||
kfree(vc_akcipher_req->src_buf);
|
||||
kfree(vc_akcipher_req->dst_buf);
|
||||
vc_akcipher_req->src_buf = NULL;
|
||||
vc_akcipher_req->dst_buf = NULL;
|
||||
virtcrypto_clear_request(&vc_akcipher_req->base);
|
||||
|
||||
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
|
||||
}
|
||||
|
||||
static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
|
||||
{
|
||||
struct virtio_crypto_akcipher_request *vc_akcipher_req =
|
||||
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
|
||||
struct akcipher_request *akcipher_req;
|
||||
int error;
|
||||
|
||||
switch (vc_req->status) {
|
||||
case VIRTIO_CRYPTO_OK:
|
||||
error = 0;
|
||||
break;
|
||||
case VIRTIO_CRYPTO_INVSESS:
|
||||
case VIRTIO_CRYPTO_ERR:
|
||||
error = -EINVAL;
|
||||
break;
|
||||
case VIRTIO_CRYPTO_BADMSG:
|
||||
error = -EBADMSG;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_KEY_REJECTED:
|
||||
error = -EKEYREJECTED;
|
||||
break;
|
||||
|
||||
default:
|
||||
error = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
akcipher_req = vc_akcipher_req->akcipher_req;
|
||||
if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
|
||||
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
|
||||
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
|
||||
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
|
||||
}
|
||||
|
||||
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
|
||||
struct virtio_crypto_ctrl_header *header, void *para,
|
||||
const uint8_t *key, unsigned int keylen)
|
||||
{
|
||||
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
uint8_t *pkey;
|
||||
int err;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
struct virtio_crypto_op_ctrl_req *ctrl;
|
||||
struct virtio_crypto_session_input *input;
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
||||
|
||||
pkey = kmemdup(key, keylen, GFP_ATOMIC);
|
||||
if (!pkey)
|
||||
return -ENOMEM;
|
||||
|
||||
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
||||
if (!vc_ctrl_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ctrl = &vc_ctrl_req->ctrl;
|
||||
memcpy(&ctrl->header, header, sizeof(ctrl->header));
|
||||
memcpy(&ctrl->u, para, sizeof(ctrl->u));
|
||||
input = &vc_ctrl_req->input;
|
||||
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
||||
|
||||
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
|
||||
sgs[num_out++] = &outhdr_sg;
|
||||
|
||||
sg_init_one(&key_sg, pkey, keylen);
|
||||
sgs[num_out++] = &key_sg;
|
||||
|
||||
sg_init_one(&inhdr_sg, input, sizeof(*input));
|
||||
sgs[num_out + num_in++] = &inhdr_sg;
|
||||
|
||||
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
|
||||
pr_err("virtio_crypto: Create session failed status: %u\n",
|
||||
le32_to_cpu(input->status));
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ctx->session_id = le64_to_cpu(input->session_id);
|
||||
ctx->session_valid = true;
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
kfree(vc_ctrl_req);
|
||||
kfree_sensitive(pkey);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
|
||||
{
|
||||
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
|
||||
struct virtio_crypto_destroy_session_req *destroy_session;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
int err;
|
||||
struct virtio_crypto_op_ctrl_req *ctrl;
|
||||
struct virtio_crypto_inhdr *ctrl_status;
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
||||
|
||||
if (!ctx->session_valid)
|
||||
return 0;
|
||||
|
||||
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
||||
if (!vc_ctrl_req)
|
||||
return -ENOMEM;
|
||||
|
||||
ctrl_status = &vc_ctrl_req->ctrl_status;
|
||||
ctrl_status->status = VIRTIO_CRYPTO_ERR;
|
||||
ctrl = &vc_ctrl_req->ctrl;
|
||||
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
|
||||
ctrl->header.queue_id = 0;
|
||||
|
||||
destroy_session = &ctrl->u.destroy_session;
|
||||
destroy_session->session_id = cpu_to_le64(ctx->session_id);
|
||||
|
||||
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
|
||||
sgs[num_out++] = &outhdr_sg;
|
||||
|
||||
sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
|
||||
sgs[num_out + num_in++] = &inhdr_sg;
|
||||
|
||||
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
|
||||
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
|
||||
ctrl_status->status, destroy_session->session_id);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
ctx->session_valid = false;
|
||||
|
||||
out:
|
||||
kfree(vc_ctrl_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
|
||||
struct akcipher_request *req, struct data_queue *data_vq)
|
||||
{
|
||||
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
|
||||
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
|
||||
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
|
||||
void *src_buf = NULL, *dst_buf = NULL;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
int node = dev_to_node(&vcrypto->vdev->dev);
|
||||
unsigned long flags;
|
||||
int ret = -ENOMEM;
|
||||
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
|
||||
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
|
||||
|
||||
/* out header */
|
||||
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
|
||||
sgs[num_out++] = &outhdr_sg;
|
||||
|
||||
/* src data */
|
||||
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
|
||||
if (!src_buf)
|
||||
goto err;
|
||||
|
||||
if (verify) {
|
||||
/* for verify operation, both src and dst data work as OUT direction */
|
||||
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
|
||||
sg_init_one(&srcdata_sg, src_buf, src_len);
|
||||
sgs[num_out++] = &srcdata_sg;
|
||||
} else {
|
||||
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
|
||||
sg_init_one(&srcdata_sg, src_buf, src_len);
|
||||
sgs[num_out++] = &srcdata_sg;
|
||||
|
||||
/* dst data */
|
||||
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
|
||||
if (!dst_buf)
|
||||
goto err;
|
||||
|
||||
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
|
||||
sgs[num_out + num_in++] = &dstdata_sg;
|
||||
}
|
||||
|
||||
vc_akcipher_req->src_buf = src_buf;
|
||||
vc_akcipher_req->dst_buf = dst_buf;
|
||||
|
||||
/* in header */
|
||||
sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
|
||||
sgs[num_out + num_in++] = &inhdr_sg;
|
||||
|
||||
spin_lock_irqsave(&data_vq->lock, flags);
|
||||
ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
|
||||
virtqueue_kick(data_vq->vq);
|
||||
spin_unlock_irqrestore(&data_vq->lock, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(src_buf);
|
||||
kfree(dst_buf);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
|
||||
{
|
||||
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
|
||||
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
|
||||
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
||||
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
struct data_queue *data_vq = vc_req->dataq;
|
||||
struct virtio_crypto_op_header *header;
|
||||
struct virtio_crypto_akcipher_data_req *akcipher_req;
|
||||
int ret;
|
||||
|
||||
vc_req->sgs = NULL;
|
||||
vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
|
||||
GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
|
||||
if (!vc_req->req_data)
|
||||
return -ENOMEM;
|
||||
|
||||
/* build request header */
|
||||
header = &vc_req->req_data->header;
|
||||
header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
|
||||
header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
||||
header->session_id = cpu_to_le64(ctx->session_id);
|
||||
|
||||
/* build request akcipher data */
|
||||
akcipher_req = &vc_req->req_data->u.akcipher_req;
|
||||
akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
|
||||
akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
|
||||
|
||||
ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
|
||||
if (ret < 0) {
|
||||
kfree_sensitive(vc_req->req_data);
|
||||
vc_req->req_data = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
|
||||
{
|
||||
struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
|
||||
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
|
||||
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
|
||||
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
/* Use the first data virtqueue as default */
|
||||
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
||||
|
||||
vc_req->dataq = data_vq;
|
||||
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
|
||||
vc_akcipher_req->akcipher_ctx = ctx;
|
||||
vc_akcipher_req->akcipher_req = req;
|
||||
vc_akcipher_req->opcode = opcode;
|
||||
|
||||
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
|
||||
{
|
||||
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
|
||||
{
|
||||
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_sign(struct akcipher_request *req)
|
||||
{
|
||||
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_verify(struct akcipher_request *req)
|
||||
{
|
||||
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
|
||||
const void *key,
|
||||
unsigned int keylen,
|
||||
bool private,
|
||||
int padding_algo,
|
||||
int hash_algo)
|
||||
{
|
||||
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
||||
struct virtio_crypto *vcrypto;
|
||||
struct virtio_crypto_ctrl_header header;
|
||||
struct virtio_crypto_akcipher_session_para para;
|
||||
struct rsa_key rsa_key = {0};
|
||||
int node = virtio_crypto_get_current_node();
|
||||
uint32_t keytype;
|
||||
int ret;
|
||||
|
||||
/* mpi_free will test n, just free it. */
|
||||
mpi_free(rsa_ctx->n);
|
||||
rsa_ctx->n = NULL;
|
||||
|
||||
if (private) {
|
||||
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
|
||||
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
|
||||
} else {
|
||||
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
|
||||
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
|
||||
if (!rsa_ctx->n)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!ctx->vcrypto) {
|
||||
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
||||
VIRTIO_CRYPTO_AKCIPHER_RSA);
|
||||
if (!vcrypto) {
|
||||
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctx->vcrypto = vcrypto;
|
||||
} else {
|
||||
virtio_crypto_alg_akcipher_close_session(ctx);
|
||||
}
|
||||
|
||||
/* set ctrl header */
|
||||
header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
|
||||
header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
||||
header.queue_id = 0;
|
||||
|
||||
/* set RSA para */
|
||||
para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
||||
para.keytype = cpu_to_le32(keytype);
|
||||
para.keylen = cpu_to_le32(keylen);
|
||||
para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
|
||||
para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
|
||||
|
||||
return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
|
||||
const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
|
||||
VIRTIO_CRYPTO_RSA_RAW_PADDING,
|
||||
VIRTIO_CRYPTO_RSA_NO_HASH);
|
||||
}
|
||||
|
||||
|
||||
static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
|
||||
const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
|
||||
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
|
||||
VIRTIO_CRYPTO_RSA_SHA1);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
|
||||
const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
|
||||
VIRTIO_CRYPTO_RSA_RAW_PADDING,
|
||||
VIRTIO_CRYPTO_RSA_NO_HASH);
|
||||
}
|
||||
|
||||
static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
|
||||
const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
|
||||
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
|
||||
VIRTIO_CRYPTO_RSA_SHA1);
|
||||
}
|
||||
|
||||
static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
|
||||
{
|
||||
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
||||
|
||||
return mpi_get_size(rsa_ctx->n);
|
||||
}
|
||||
|
||||
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
|
||||
{
|
||||
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
|
||||
ctx->tfm = tfm;
|
||||
ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
|
||||
ctx->enginectx.op.prepare_request = NULL;
|
||||
ctx->enginectx.op.unprepare_request = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
{
|
||||
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
||||
|
||||
virtio_crypto_alg_akcipher_close_session(ctx);
|
||||
virtcrypto_dev_put(ctx->vcrypto);
|
||||
mpi_free(rsa_ctx->n);
|
||||
rsa_ctx->n = NULL;
|
||||
}
|
||||
|
||||
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
|
||||
{
|
||||
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
|
||||
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
||||
.algo = {
|
||||
.encrypt = virtio_crypto_rsa_encrypt,
|
||||
.decrypt = virtio_crypto_rsa_decrypt,
|
||||
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
|
||||
.set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
|
||||
.max_size = virtio_crypto_rsa_max_size,
|
||||
.init = virtio_crypto_rsa_init_tfm,
|
||||
.exit = virtio_crypto_rsa_exit_tfm,
|
||||
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
|
||||
.base = {
|
||||
.cra_name = "rsa",
|
||||
.cra_driver_name = "virtio-crypto-rsa",
|
||||
.cra_priority = 150,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
|
||||
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
||||
.algo = {
|
||||
.encrypt = virtio_crypto_rsa_encrypt,
|
||||
.decrypt = virtio_crypto_rsa_decrypt,
|
||||
.sign = virtio_crypto_rsa_sign,
|
||||
.verify = virtio_crypto_rsa_verify,
|
||||
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
|
||||
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
|
||||
.max_size = virtio_crypto_rsa_max_size,
|
||||
.init = virtio_crypto_rsa_init_tfm,
|
||||
.exit = virtio_crypto_rsa_exit_tfm,
|
||||
.reqsize = sizeof(struct virtio_crypto_akcipher_request),
|
||||
.base = {
|
||||
.cra_name = "pkcs1pad(rsa,sha1)",
|
||||
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
|
||||
.cra_priority = 150,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
|
||||
{
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
|
||||
mutex_lock(&algs_lock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
|
||||
uint32_t service = virtio_crypto_akcipher_algs[i].service;
|
||||
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
|
||||
|
||||
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
||||
continue;
|
||||
|
||||
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
|
||||
ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
virtio_crypto_akcipher_algs[i].active_devs++;
|
||||
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
|
||||
virtio_crypto_akcipher_algs[i].algo.base.cra_name);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&algs_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
mutex_lock(&algs_lock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
|
||||
uint32_t service = virtio_crypto_akcipher_algs[i].service;
|
||||
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
|
||||
|
||||
if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
|
||||
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
||||
continue;
|
||||
|
||||
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
|
||||
crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
|
||||
|
||||
virtio_crypto_akcipher_algs[i].active_devs--;
|
||||
}
|
||||
|
||||
mutex_unlock(&algs_lock);
|
||||
}
|
@ -118,11 +118,14 @@ static int virtio_crypto_alg_skcipher_init_session(
|
||||
int encrypt)
|
||||
{
|
||||
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
|
||||
unsigned int tmp;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
|
||||
int err;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
struct virtio_crypto_op_ctrl_req *ctrl;
|
||||
struct virtio_crypto_session_input *input;
|
||||
struct virtio_crypto_sym_create_session_req *sym_create_session;
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
||||
|
||||
/*
|
||||
* Avoid to do DMA from the stack, switch to using
|
||||
@ -133,26 +136,29 @@ static int virtio_crypto_alg_skcipher_init_session(
|
||||
if (!cipher_key)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&vcrypto->ctrl_lock);
|
||||
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
||||
if (!vc_ctrl_req) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Pad ctrl header */
|
||||
vcrypto->ctrl.header.opcode =
|
||||
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
|
||||
vcrypto->ctrl.header.algo = cpu_to_le32(alg);
|
||||
ctrl = &vc_ctrl_req->ctrl;
|
||||
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
|
||||
ctrl->header.algo = cpu_to_le32(alg);
|
||||
/* Set the default dataqueue id to 0 */
|
||||
vcrypto->ctrl.header.queue_id = 0;
|
||||
ctrl->header.queue_id = 0;
|
||||
|
||||
vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
||||
input = &vc_ctrl_req->input;
|
||||
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
||||
/* Pad cipher's parameters */
|
||||
vcrypto->ctrl.u.sym_create_session.op_type =
|
||||
cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
|
||||
vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
|
||||
vcrypto->ctrl.header.algo;
|
||||
vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
|
||||
cpu_to_le32(keylen);
|
||||
vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
|
||||
cpu_to_le32(op);
|
||||
sym_create_session = &ctrl->u.sym_create_session;
|
||||
sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
|
||||
sym_create_session->u.cipher.para.algo = ctrl->header.algo;
|
||||
sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
|
||||
sym_create_session->u.cipher.para.op = cpu_to_le32(op);
|
||||
|
||||
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
|
||||
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
|
||||
sgs[num_out++] = &outhdr;
|
||||
|
||||
/* Set key */
|
||||
@ -160,45 +166,30 @@ static int virtio_crypto_alg_skcipher_init_session(
|
||||
sgs[num_out++] = &key_sg;
|
||||
|
||||
/* Return status and session id back */
|
||||
sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
|
||||
sg_init_one(&inhdr, input, sizeof(*input));
|
||||
sgs[num_out + num_in++] = &inhdr;
|
||||
|
||||
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
|
||||
num_in, vcrypto, GFP_ATOMIC);
|
||||
if (err < 0) {
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
kfree_sensitive(cipher_key);
|
||||
return err;
|
||||
}
|
||||
virtqueue_kick(vcrypto->ctrl_vq);
|
||||
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Trapping into the hypervisor, so the request should be
|
||||
* handled immediately.
|
||||
*/
|
||||
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
|
||||
!virtqueue_is_broken(vcrypto->ctrl_vq))
|
||||
cpu_relax();
|
||||
|
||||
if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
|
||||
pr_err("virtio_crypto: Create session failed status: %u\n",
|
||||
le32_to_cpu(vcrypto->input.status));
|
||||
kfree_sensitive(cipher_key);
|
||||
return -EINVAL;
|
||||
le32_to_cpu(input->status));
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (encrypt)
|
||||
ctx->enc_sess_info.session_id =
|
||||
le64_to_cpu(vcrypto->input.session_id);
|
||||
ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
|
||||
else
|
||||
ctx->dec_sess_info.session_id =
|
||||
le64_to_cpu(vcrypto->input.session_id);
|
||||
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
kfree(vc_ctrl_req);
|
||||
kfree_sensitive(cipher_key);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtio_crypto_alg_skcipher_close_session(
|
||||
@ -206,60 +197,56 @@ static int virtio_crypto_alg_skcipher_close_session(
|
||||
int encrypt)
|
||||
{
|
||||
struct scatterlist outhdr, status_sg, *sgs[2];
|
||||
unsigned int tmp;
|
||||
struct virtio_crypto_destroy_session_req *destroy_session;
|
||||
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
||||
int err;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
struct virtio_crypto_op_ctrl_req *ctrl;
|
||||
struct virtio_crypto_inhdr *ctrl_status;
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
||||
|
||||
spin_lock(&vcrypto->ctrl_lock);
|
||||
vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
|
||||
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
||||
if (!vc_ctrl_req)
|
||||
return -ENOMEM;
|
||||
|
||||
ctrl_status = &vc_ctrl_req->ctrl_status;
|
||||
ctrl_status->status = VIRTIO_CRYPTO_ERR;
|
||||
/* Pad ctrl header */
|
||||
vcrypto->ctrl.header.opcode =
|
||||
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
|
||||
ctrl = &vc_ctrl_req->ctrl;
|
||||
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
|
||||
/* Set the default virtqueue id to 0 */
|
||||
vcrypto->ctrl.header.queue_id = 0;
|
||||
ctrl->header.queue_id = 0;
|
||||
|
||||
destroy_session = &vcrypto->ctrl.u.destroy_session;
|
||||
destroy_session = &ctrl->u.destroy_session;
|
||||
|
||||
if (encrypt)
|
||||
destroy_session->session_id =
|
||||
cpu_to_le64(ctx->enc_sess_info.session_id);
|
||||
destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
|
||||
else
|
||||
destroy_session->session_id =
|
||||
cpu_to_le64(ctx->dec_sess_info.session_id);
|
||||
destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
|
||||
|
||||
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
|
||||
sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
|
||||
sgs[num_out++] = &outhdr;
|
||||
|
||||
/* Return status and session id back */
|
||||
sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
|
||||
sizeof(vcrypto->ctrl_status.status));
|
||||
sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
|
||||
sgs[num_out + num_in++] = &status_sg;
|
||||
|
||||
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
|
||||
num_in, vcrypto, GFP_ATOMIC);
|
||||
if (err < 0) {
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
return err;
|
||||
}
|
||||
virtqueue_kick(vcrypto->ctrl_vq);
|
||||
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
|
||||
!virtqueue_is_broken(vcrypto->ctrl_vq))
|
||||
cpu_relax();
|
||||
|
||||
if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
|
||||
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
|
||||
vcrypto->ctrl_status.status,
|
||||
destroy_session->session_id);
|
||||
ctrl_status->status, destroy_session->session_id);
|
||||
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
spin_unlock(&vcrypto->ctrl_lock);
|
||||
|
||||
return 0;
|
||||
err = 0;
|
||||
out:
|
||||
kfree(vc_ctrl_req);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtio_crypto_alg_skcipher_init_sessions(
|
||||
|
@ -10,9 +10,11 @@
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/engine.h>
|
||||
#include <uapi/linux/virtio_crypto.h>
|
||||
|
||||
|
||||
/* Internal representation of a data virtqueue */
|
||||
@ -27,6 +29,7 @@ struct data_queue {
|
||||
char name[32];
|
||||
|
||||
struct crypto_engine *engine;
|
||||
struct tasklet_struct done_task;
|
||||
};
|
||||
|
||||
struct virtio_crypto {
|
||||
@ -56,6 +59,7 @@ struct virtio_crypto {
|
||||
u32 mac_algo_l;
|
||||
u32 mac_algo_h;
|
||||
u32 aead_algo;
|
||||
u32 akcipher_algo;
|
||||
|
||||
/* Maximum length of cipher key */
|
||||
u32 max_cipher_key_len;
|
||||
@ -64,11 +68,6 @@ struct virtio_crypto {
|
||||
/* Maximum size of per request */
|
||||
u64 max_size;
|
||||
|
||||
/* Control VQ buffers: protected by the ctrl_lock */
|
||||
struct virtio_crypto_op_ctrl_req ctrl;
|
||||
struct virtio_crypto_session_input input;
|
||||
struct virtio_crypto_inhdr ctrl_status;
|
||||
|
||||
unsigned long status;
|
||||
atomic_t ref_count;
|
||||
struct list_head list;
|
||||
@ -84,6 +83,18 @@ struct virtio_crypto_sym_session_info {
|
||||
__u64 session_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: there are padding fields in request, clear them to zero before
|
||||
* sending to host to avoid to divulge any information.
|
||||
* Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
|
||||
*/
|
||||
struct virtio_crypto_ctrl_request {
|
||||
struct virtio_crypto_op_ctrl_req ctrl;
|
||||
struct virtio_crypto_session_input input;
|
||||
struct virtio_crypto_inhdr ctrl_status;
|
||||
struct completion compl;
|
||||
};
|
||||
|
||||
struct virtio_crypto_request;
|
||||
typedef void (*virtio_crypto_data_callback)
|
||||
(struct virtio_crypto_request *vc_req, int len);
|
||||
@ -131,5 +142,10 @@ static inline int virtio_crypto_get_current_node(void)
|
||||
|
||||
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
|
||||
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
|
||||
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
|
||||
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
|
||||
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
|
||||
unsigned int out_sgs, unsigned int in_sgs,
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req);
|
||||
|
||||
#endif /* _VIRTIO_CRYPTO_COMMON_H */
|
||||
|
@ -22,27 +22,78 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
|
||||
}
|
||||
}
|
||||
|
||||
static void virtcrypto_dataq_callback(struct virtqueue *vq)
|
||||
static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
|
||||
{
|
||||
complete(&vc_ctrl_req->compl);
|
||||
}
|
||||
|
||||
static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
|
||||
{
|
||||
struct virtio_crypto *vcrypto = vq->vdev->priv;
|
||||
struct virtio_crypto_request *vc_req;
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
unsigned int qid = vq->index;
|
||||
|
||||
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
|
||||
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
|
||||
do {
|
||||
virtqueue_disable_cb(vq);
|
||||
while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
|
||||
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
|
||||
virtio_crypto_ctrlq_callback(vc_ctrl_req);
|
||||
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
|
||||
}
|
||||
if (unlikely(virtqueue_is_broken(vq)))
|
||||
break;
|
||||
} while (!virtqueue_enable_cb(vq));
|
||||
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
|
||||
}
|
||||
|
||||
int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
|
||||
unsigned int out_sgs, unsigned int in_sgs,
|
||||
struct virtio_crypto_ctrl_request *vc_ctrl_req)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
init_completion(&vc_ctrl_req->compl);
|
||||
|
||||
spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
|
||||
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
|
||||
if (err < 0) {
|
||||
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
virtqueue_kick(vcrypto->ctrl_vq);
|
||||
spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
|
||||
|
||||
wait_for_completion(&vc_ctrl_req->compl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtcrypto_done_task(unsigned long data)
|
||||
{
|
||||
struct data_queue *data_vq = (struct data_queue *)data;
|
||||
struct virtqueue *vq = data_vq->vq;
|
||||
struct virtio_crypto_request *vc_req;
|
||||
unsigned int len;
|
||||
|
||||
do {
|
||||
virtqueue_disable_cb(vq);
|
||||
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
|
||||
spin_unlock_irqrestore(
|
||||
&vcrypto->data_vq[qid].lock, flags);
|
||||
if (vc_req->alg_cb)
|
||||
vc_req->alg_cb(vc_req, len);
|
||||
spin_lock_irqsave(
|
||||
&vcrypto->data_vq[qid].lock, flags);
|
||||
}
|
||||
} while (!virtqueue_enable_cb(vq));
|
||||
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
|
||||
}
|
||||
|
||||
static void virtcrypto_dataq_callback(struct virtqueue *vq)
|
||||
{
|
||||
struct virtio_crypto *vcrypto = vq->vdev->priv;
|
||||
struct data_queue *dq = &vcrypto->data_vq[vq->index];
|
||||
|
||||
tasklet_schedule(&dq->done_task);
|
||||
}
|
||||
|
||||
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
|
||||
@ -73,7 +124,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
|
||||
goto err_names;
|
||||
|
||||
/* Parameters for control virtqueue */
|
||||
callbacks[total_vqs - 1] = NULL;
|
||||
callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
|
||||
names[total_vqs - 1] = "controlq";
|
||||
|
||||
/* Allocate/initialize parameters for data virtqueues */
|
||||
@ -99,6 +150,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
|
||||
ret = -ENOMEM;
|
||||
goto err_engine;
|
||||
}
|
||||
tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
|
||||
(unsigned long)&vi->data_vq[i]);
|
||||
}
|
||||
|
||||
kfree(names);
|
||||
@ -297,6 +350,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
|
||||
u32 mac_algo_l = 0;
|
||||
u32 mac_algo_h = 0;
|
||||
u32 aead_algo = 0;
|
||||
u32 akcipher_algo = 0;
|
||||
u32 crypto_services = 0;
|
||||
|
||||
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
|
||||
@ -348,6 +402,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
|
||||
mac_algo_h, &mac_algo_h);
|
||||
virtio_cread_le(vdev, struct virtio_crypto_config,
|
||||
aead_algo, &aead_algo);
|
||||
if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
|
||||
virtio_cread_le(vdev, struct virtio_crypto_config,
|
||||
akcipher_algo, &akcipher_algo);
|
||||
|
||||
/* Add virtio crypto device to global table */
|
||||
err = virtcrypto_devmgr_add_dev(vcrypto);
|
||||
@ -374,7 +431,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
|
||||
vcrypto->mac_algo_h = mac_algo_h;
|
||||
vcrypto->hash_algo = hash_algo;
|
||||
vcrypto->aead_algo = aead_algo;
|
||||
|
||||
vcrypto->akcipher_algo = akcipher_algo;
|
||||
|
||||
dev_info(&vdev->dev,
|
||||
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
|
||||
@ -431,11 +488,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
|
||||
static void virtcrypto_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_crypto *vcrypto = vdev->priv;
|
||||
int i;
|
||||
|
||||
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
|
||||
|
||||
if (virtcrypto_dev_started(vcrypto))
|
||||
virtcrypto_dev_stop(vcrypto);
|
||||
for (i = 0; i < vcrypto->max_data_queues; i++)
|
||||
tasklet_kill(&vcrypto->data_vq[i].done_task);
|
||||
vdev->config->reset(vdev);
|
||||
virtcrypto_free_unused_reqs(vcrypto);
|
||||
virtcrypto_clear_crypto_engines(vcrypto);
|
||||
|
@ -242,6 +242,12 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (virtio_crypto_akcipher_algs_register(vcrypto)) {
|
||||
pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
|
||||
virtio_crypto_algs_unregister(vcrypto);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -258,6 +264,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
|
||||
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
|
||||
{
|
||||
virtio_crypto_algs_unregister(vcrypto);
|
||||
virtio_crypto_akcipher_algs_unregister(vcrypto);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
|
||||
case VIRTIO_CRYPTO_SERVICE_AEAD:
|
||||
algo_mask = vcrypto->aead_algo;
|
||||
break;
|
||||
|
||||
case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
|
||||
algo_mask = vcrypto->akcipher_algo;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(algo_mask & (1u << algo)))
|
||||
|
@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
|
||||
decode_register(other, OCX_OTHER_SIZE,
|
||||
ocx_com_errors, ctx->reg_com_int);
|
||||
|
||||
strncat(msg, other, OCX_MESSAGE_SIZE);
|
||||
strlcat(msg, other, OCX_MESSAGE_SIZE);
|
||||
|
||||
for (lane = 0; lane < OCX_RX_LANES; lane++)
|
||||
if (ctx->reg_com_int & BIT(lane)) {
|
||||
@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
|
||||
lane, ctx->reg_lane_int[lane],
|
||||
lane, ctx->reg_lane_stat11[lane]);
|
||||
|
||||
strncat(msg, other, OCX_MESSAGE_SIZE);
|
||||
strlcat(msg, other, OCX_MESSAGE_SIZE);
|
||||
|
||||
decode_register(other, OCX_OTHER_SIZE,
|
||||
ocx_lane_errors,
|
||||
ctx->reg_lane_int[lane]);
|
||||
strncat(msg, other, OCX_MESSAGE_SIZE);
|
||||
strlcat(msg, other, OCX_MESSAGE_SIZE);
|
||||
}
|
||||
|
||||
if (ctx->reg_com_int & OCX_COM_INT_CE)
|
||||
@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
|
||||
decode_register(other, OCX_OTHER_SIZE,
|
||||
ocx_com_link_errors, ctx->reg_com_link_int);
|
||||
|
||||
strncat(msg, other, OCX_MESSAGE_SIZE);
|
||||
strlcat(msg, other, OCX_MESSAGE_SIZE);
|
||||
|
||||
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
|
||||
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
|
||||
@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
|
||||
|
||||
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
|
||||
|
||||
strncat(msg, other, L2C_MESSAGE_SIZE);
|
||||
strlcat(msg, other, L2C_MESSAGE_SIZE);
|
||||
|
||||
if (ctx->reg_int & mask_ue)
|
||||
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
|
||||
|
@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
|
||||
#define QUIRK_TI_SLLZ059 0x20
|
||||
#define QUIRK_IR_WAKE 0x40
|
||||
|
||||
// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
|
||||
// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
|
||||
// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
|
||||
// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
|
||||
// while it is probable due to detection of any type of PCIe error.
|
||||
#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
|
||||
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
|
||||
static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
|
||||
{
|
||||
return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
|
||||
}
|
||||
|
||||
#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
|
||||
|
||||
static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
|
||||
{
|
||||
const struct pci_dev *pcie_to_pci_bridge;
|
||||
|
||||
// Detect any type of AMD Ryzen machine.
|
||||
if (!static_cpu_has(X86_FEATURE_ZEN))
|
||||
return false;
|
||||
|
||||
// Detect VIA VT6306/6307/6308.
|
||||
if (pdev->vendor != PCI_VENDOR_ID_VIA)
|
||||
return false;
|
||||
if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
|
||||
return false;
|
||||
|
||||
// Detect Asmedia ASM1083/1085.
|
||||
pcie_to_pci_bridge = pdev->bus->self;
|
||||
if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
|
||||
return false;
|
||||
if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
#define has_reboot_by_cycle_timer_read_quirk(ohci) false
|
||||
#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
|
||||
#endif
|
||||
|
||||
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
|
||||
static const struct {
|
||||
unsigned short vendor, device, revision, flags;
|
||||
@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
|
||||
s32 diff01, diff12;
|
||||
int i;
|
||||
|
||||
if (has_reboot_by_cycle_timer_read_quirk(ohci))
|
||||
return 0;
|
||||
|
||||
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
|
||||
|
||||
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
|
||||
@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev,
|
||||
if (param_quirks)
|
||||
ohci->quirks = param_quirks;
|
||||
|
||||
if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
|
||||
ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
|
||||
|
||||
/*
|
||||
* Because dma_alloc_coherent() allocates at least one page,
|
||||
* we save space by using a common buffer for the AR request/
|
||||
|
@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, fw);
|
||||
|
||||
pr_info("secure-monitor enabled\n");
|
||||
if (devm_of_platform_populate(dev))
|
||||
goto out_in_base;
|
||||
|
||||
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
|
||||
goto out_in_base;
|
||||
|
||||
pr_info("secure-monitor enabled\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_in_base:
|
||||
|
@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
char debug_name[50] = "ti_sci_debug@";
|
||||
char debug_name[50];
|
||||
|
||||
/* Debug region is optional */
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
|
||||
/* Setup NULL termination */
|
||||
info->debug_buffer[info->debug_region_size] = 0;
|
||||
|
||||
info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
|
||||
sizeof(debug_name) -
|
||||
sizeof("ti_sci_debug@")),
|
||||
0444, NULL, info, &ti_sci_debug_fops);
|
||||
snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
|
||||
dev_name(dev));
|
||||
info->d = debugfs_create_file(debug_name, 0444, NULL, info,
|
||||
&ti_sci_debug_fops);
|
||||
if (IS_ERR(info->d))
|
||||
return PTR_ERR(info->d);
|
||||
|
||||
|
@ -459,6 +459,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_rreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
@ -518,6 +521,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_wreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
@ -576,7 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
int r;
|
||||
|
||||
if (!adev->smc_rreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
@ -638,7 +644,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
int r;
|
||||
|
||||
if (!adev->smc_wreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
@ -2733,10 +2733,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(adev->pm.dpm.ps);
|
||||
if (ps == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.ps[i].ps_priv = ps;
|
||||
k = 0;
|
||||
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
|
||||
|
@ -7349,10 +7349,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
||||
kcalloc(4,
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
|
||||
|
@ -1637,7 +1637,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
||||
} else {
|
||||
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
|
||||
dev_err(dev, "failed to parse HPD number\n");
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __exit tpd12s015_remove(struct platform_device *pdev)
|
||||
static int tpd12s015_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
|
||||
|
||||
@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
|
||||
|
||||
static struct platform_driver tpd12s015_driver = {
|
||||
.probe = tpd12s015_probe,
|
||||
.remove = __exit_p(tpd12s015_remove),
|
||||
.remove = tpd12s015_remove,
|
||||
.driver = {
|
||||
.name = "tpd12s015",
|
||||
.of_match_table = tpd12s015_of_match,
|
||||
|
@ -562,8 +562,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
struct drm_mode_set set;
|
||||
uint32_t __user *set_connectors_ptr;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret;
|
||||
int i;
|
||||
int ret, i, num_connectors = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EOPNOTSUPP;
|
||||
@ -721,6 +720,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
connector->name);
|
||||
|
||||
connector_set[i] = connector;
|
||||
num_connectors++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -729,7 +729,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
set.y = crtc_req->y;
|
||||
set.mode = mode;
|
||||
set.connectors = connector_set;
|
||||
set.num_connectors = crtc_req->count_connectors;
|
||||
set.num_connectors = num_connectors;
|
||||
set.fb = fb;
|
||||
|
||||
if (drm_drv_uses_atomic_modeset(dev))
|
||||
@ -742,7 +742,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
drm_framebuffer_put(fb);
|
||||
|
||||
if (connector_set) {
|
||||
for (i = 0; i < crtc_req->count_connectors; i++) {
|
||||
for (i = 0; i < num_connectors; i++) {
|
||||
if (connector_set[i])
|
||||
drm_connector_put(connector_set[i]);
|
||||
}
|
||||
|
@ -892,8 +892,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
||||
goto err_minors;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_modeset_register_all(dev);
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_modeset_register_all(dev);
|
||||
if (ret)
|
||||
goto err_unload;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -905,6 +908,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
goto out_unlock;
|
||||
|
||||
err_unload:
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
err_minors:
|
||||
remove_compat_control_link(dev);
|
||||
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
|
||||
|
@ -108,18 +108,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
|
||||
return 0;
|
||||
|
||||
if (!priv->mapping) {
|
||||
void *mapping;
|
||||
void *mapping = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type,
|
||||
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
|
||||
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
|
||||
mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
||||
else
|
||||
mapping = ERR_PTR(-ENODEV);
|
||||
|
||||
if (IS_ERR(mapping))
|
||||
return PTR_ERR(mapping);
|
||||
if (!mapping)
|
||||
return -ENODEV;
|
||||
priv->mapping = mapping;
|
||||
}
|
||||
|
||||
|
@ -1849,6 +1849,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
||||
return ret;
|
||||
|
||||
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
|
||||
if (IS_ERR(crtc))
|
||||
return PTR_ERR(crtc);
|
||||
crtc->pipe_clk = &hdata->phy_clk;
|
||||
|
||||
ret = hdmi_create_connector(encoder);
|
||||
|
@ -5584,7 +5584,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp)
|
||||
intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
|
||||
|
||||
drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
|
||||
link_status[DP_DPCD_REV]);
|
||||
intel_dp->dpcd[DP_DPCD_REV]);
|
||||
}
|
||||
|
||||
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
|
||||
|
@ -268,6 +268,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
DBG("%s", mdp4_crtc->name);
|
||||
|
||||
@ -280,6 +281,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
|
||||
mdp4_disable(mdp4_kms);
|
||||
|
||||
if (crtc->state->event && !crtc->state->active) {
|
||||
WARN_ON(mdp4_crtc->event);
|
||||
spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
crtc->state->event = NULL;
|
||||
spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
mdp4_crtc->enabled = false;
|
||||
}
|
||||
|
||||
|
@ -558,7 +558,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
|
||||
struct device *dev = &phy->pdev->dev;
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(phy->ahb_clk);
|
||||
if (ret) {
|
||||
@ -708,6 +710,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = devm_pm_runtime_enable(&pdev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PLL init will call into clk_register which requires
|
||||
* register access, so we need to enable power and ahb clock.
|
||||
*/
|
||||
|
@ -39,7 +39,7 @@ struct nv04_fence_priv {
|
||||
static int
|
||||
nv04_fence_emit(struct nouveau_fence *fence)
|
||||
{
|
||||
struct nvif_push *push = fence->channel->chan.push;
|
||||
struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
|
||||
int ret = PUSH_WAIT(push, 2);
|
||||
if (ret == 0) {
|
||||
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
|
||||
|
@ -32,7 +32,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
|
||||
|
||||
type |= 0x00000001; /* PAGE_ALL */
|
||||
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
|
||||
type |= 0x00000004; /* HUB_ONLY */
|
||||
type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
|
||||
|
@ -111,6 +111,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
|
||||
return ret;
|
||||
}
|
||||
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
|
||||
regulator_disable(ctx->iovcc);
|
||||
regulator_disable(ctx->vdd);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user