Linux 5.4-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl2su/AeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGvm4H/1jkheCrvB/GJS69
 wd18vizAg+eFmNCzxlGVhpQTKGymNRy+g6clnoli3cNJ3pSVKcYgVyB3oXaONIhp
 g/ANudnBjTdjqYgJzfLij5AGecrGwDpF3YL0kuKrCB63s2I/HwQGYy/aPrYY8emy
 gAYdaf1DGRu5/DIIB6soTo/TnpKoAyTE+XY5MaPSug++t/Flov19tlU40IZxXW94
 bjTXbm0yklrsIx+LL5mYYGGnygSTCF66JjFg1qhDCBQaS2MZ21h1ZgaOtGZTwZcc
 WgEiqLC5S1Iyj96zir1t78RcVQ4RzgvDbhUOgIqUFsYAO2wOicvxyFE3Hj8rPOKd
 uGgVPRM=
 =xgZa
 -----END PGP SIGNATURE-----

Merge 5.4-rc4 into android-mainline

Linux 5.4-rc4

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I0edccd72fad8b6443b24c8c1005b66d6b8f532ce
This commit is contained in:
Greg Kroah-Hartman 2019-10-26 19:24:41 +02:00
commit 9be46ff3b6
358 changed files with 3146 additions and 1828 deletions

View File

@ -107,6 +107,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+

View File

@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
Both KASAN modes work with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
To augment reports with last allocation and freeing stack of the physical page,
it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
To disable instrumentation for specific files or directories, add a line
similar to the following to the respective kernel Makefile:

View File

@ -1,8 +1,11 @@
* Advanced Interrupt Controller (AIC)
Required properties:
- compatible: Should be "atmel,<chip>-aic"
<chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
- compatible: Should be:
- "atmel,<chip>-aic" where <chip> can be "at91rm9200", "sama5d2",
"sama5d3" or "sama5d4"
- "microchip,<chip>-aic" where <chip> can be "sam9x60"
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).

View File

@ -36,8 +36,10 @@ Support
=======
For general Linux networking support, please use the netdev mailing
list, which is monitored by Pensando personnel::
netdev@vger.kernel.org
For more specific support needs, please use the Pensando driver support
email::
drivers@pensando.io
drivers@pensando.io

View File

@ -92,16 +92,16 @@ under some conditions.
Part III: Registering a Network Device to DIM
==============================================
Net DIM API exposes the main function net_dim(struct net_dim *dim,
struct net_dim_sample end_sample). This function is the entry point to the Net
Net DIM API exposes the main function net_dim(struct dim *dim,
struct dim_sample end_sample). This function is the entry point to the Net
DIM algorithm and has to be called every time the driver would like to check if
it should change interrupt moderation parameters. The driver should provide two
data structures: struct net_dim and struct net_dim_sample. Struct net_dim
data structures: struct dim and struct dim_sample. Struct dim
describes the state of DIM for a specific object (RX queue, TX queue,
other queues, etc.). This includes the current selected profile, previous data
samples, the callback function provided by the driver and more.
Struct net_dim_sample describes a data sample, which will be compared to the
data sample stored in struct net_dim in order to decide on the algorithm's next
Struct dim_sample describes a data sample, which will be compared to the
data sample stored in struct dim in order to decide on the algorithm's next
step. The sample should include bytes, packets and interrupts, measured by
the driver.
@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each
interrupt. Since Net DIM has a built-in moderation and it might decide to skip
iterations under certain conditions, there is no need to moderate the net_dim()
calls as well. As mentioned above, the driver needs to provide an object of type
struct net_dim to the net_dim() function call. It is advised for each entity
using Net DIM to hold a struct net_dim as part of its data structure and use it
as the main Net DIM API object. The struct net_dim_sample should hold the latest
struct dim to the net_dim() function call. It is advised for each entity
using Net DIM to hold a struct dim as part of its data structure and use it
as the main Net DIM API object. The struct dim_sample should hold the latest
bytes, packets and interrupts count. No need to perform any calculations, just
include the raw data.
@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear.
my_driver.c:
#include <linux/net_dim.h>
#include <linux/dim.h>
/* Callback for net DIM to schedule on a decision to change moderation */
void my_driver_do_dim_work(struct work_struct *work)
{
/* Get struct net_dim from struct work_struct */
struct net_dim *dim = container_of(work, struct net_dim,
work);
/* Get struct dim from struct work_struct */
struct dim *dim = container_of(work, struct dim,
work);
/* Do interrupt moderation related stuff */
...
/* Signal net DIM work is done and it should move to next iteration */
dim->state = NET_DIM_START_MEASURE;
dim->state = DIM_START_MEASURE;
}
/* My driver's interrupt handler */
@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
{
...
/* A struct to hold current measured data */
struct net_dim_sample dim_sample;
struct dim_sample dim_sample;
...
/* Initiate data sample struct with current data */
net_dim_sample(my_entity->events,
my_entity->packets,
my_entity->bytes,
&dim_sample);
dim_update_sample(my_entity->events,
my_entity->packets,
my_entity->bytes,
&dim_sample);
/* Call net DIM */
net_dim(&my_entity->dim, dim_sample);
...

View File

@ -9122,7 +9122,7 @@ F: drivers/auxdisplay/ks0108.c
F: include/linux/ks0108.h
L3MDEV
M: David Ahern <dsa@cumulusnetworks.com>
M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: net/l3mdev
@ -10255,7 +10255,7 @@ MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@openwrt.org>
M: John Crispin <john@phrozen.org>
M: Sean Wang <sean.wang@mediatek.com>
M: Nelson Chang <nelson.chang@mediatek.com>
M: Mark Lee <Mark-MC.Lee@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
@ -12312,12 +12312,15 @@ F: arch/parisc/
F: Documentation/parisc/
F: drivers/parisc/
F: drivers/char/agp/parisc-agp.c
F: drivers/input/misc/hp_sdc_rtc.c
F: drivers/input/serio/gscps2.c
F: drivers/input/serio/hp_sdc*
F: drivers/parport/parport_gsc.*
F: drivers/tty/serial/8250/8250_gsc.c
F: drivers/video/fbdev/sti*
F: drivers/video/console/sti*
F: drivers/video/logo/logo_parisc*
F: include/linux/hp_sdc.h
PARMAN
M: Jiri Pirko <jiri@mellanox.com>
@ -13361,7 +13364,7 @@ S: Maintained
F: drivers/scsi/qla1280.[ch]
QLOGIC QLA2XXX FC-SCSI DRIVER
M: qla2xxx-upstream@qlogic.com
M: hmadhani@marvell.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
@ -17430,7 +17433,7 @@ F: include/linux/regulator/
K: regulator_get_optional
VRF
M: David Ahern <dsa@cumulusnetworks.com>
M: David Ahern <dsahern@kernel.org>
M: Shrijeet Mukherjee <shrijeet@gmail.com>
L: netdev@vger.kernel.org
S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Nesting Opossum
# *DOCUMENTATION*
@ -1041,7 +1041,7 @@ export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/package/Makefile
# used by scripts/Makefile.package
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)

View File

@ -66,9 +66,21 @@
pinctrl-1 = <&ephy_leds_pins>;
status = "okay";
gmac0: mac@0 {
compatible = "mediatek,eth-mac";
reg = <0>;
phy-mode = "2500base-x";
fixed-link {
speed = <2500>;
full-duplex;
pause;
};
};
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
phy-mode = "gmii";
phy-handle = <&phy0>;
};
@ -78,7 +90,6 @@
phy0: ethernet-phy@0 {
reg = <0>;
phy-mode = "gmii";
};
};
};

View File

@ -468,14 +468,12 @@
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b128000 0x3000>;
#clock-cells = <1>;
mediatek,physpeed = "2500";
};
sgmiisys1: syscon@1b130000 {
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b130000 0x3000>;
#clock-cells = <1>;
mediatek,physpeed = "2500";
};
};
};

View File

@ -616,6 +616,23 @@ config CAVIUM_ERRATUM_30115
If unsure, say Y.
config CAVIUM_TX2_ERRATUM_219
bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
default y
help
On Cavium ThunderX2, a load, store or prefetch instruction between a
TTBR update and the corresponding context synchronizing operation can
cause a spurious Data Abort to be delivered to any hardware thread in
the CPU core.
Work around the issue by avoiding the problematic code sequence and
trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
trap handler performs the corresponding register access, skips the
instruction and ensures context synchronization by virtue of the
exception return.
If unsure, say Y.
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y

View File

@ -78,10 +78,9 @@ alternative_else_nop_endif
/*
* Remove the address tag from a virtual address, if present.
*/
.macro clear_address_tag, dst, addr
tst \addr, #(1 << 55)
bic \dst, \addr, #(0xff << 56)
csel \dst, \dst, \addr, eq
.macro untagged_addr, dst, addr
sbfx \dst, \addr, #0, #56
and \dst, \dst, \addr
.endm
#endif

View File

@ -52,7 +52,9 @@
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_NCAPS 45
#define ARM64_NCAPS 47
#endif /* __ASM_CPUCAPS_H */

View File

@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void)
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) \
#define __untagged_addr(addr) \
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
#define untagged_addr(addr) ({ \
u64 __addr = (__force u64)addr; \
__addr &= __untagged_addr(__addr); \
(__force __typeof__(addr))__addr; \
})
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
#define __tag_shifted(tag) 0UL

View File

@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else

View File

@ -212,7 +212,7 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
#define SYS_PAR_EL1_F BIT(1)
#define SYS_PAR_EL1_F BIT(0)
#define SYS_PAR_EL1_FST GENMASK(6, 1)
/*** Statistical Profiling Extension ***/

View File

@ -12,6 +12,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
#include <asm/smp_plat.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0);
}
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
{},
};
static bool __maybe_unused
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
int scope)
{
int i;
if (!is_affected_midr_range_list(entry, scope) ||
!is_hyp_mode_available())
return false;
for_each_possible_cpu(i) {
if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
return true;
}
return false;
}
#ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = {
@ -851,6 +876,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
},
#endif
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
{
.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
.matches = needs_tx2_tvm_workaround,
},
{
.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
},
#endif
{
}

View File

@ -176,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
ARM64_FTR_END,
};

View File

@ -604,7 +604,7 @@ el1_da:
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
bl preempt_schedule_irq // irq en/disable is done inside
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
@ -808,7 +808,7 @@ el0_da:
mrs x26, far_el1
ct_user_exit_irqoff
enable_daif
clear_address_tag x0, x26
untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
@ -1071,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
#else
ldr x30, =vectors
#endif
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
prfm plil1strm, [x30, #(1b - tramp_vectors)]
alternative_else_nop_endif
msr vbar_el1, x30
add x30, x30, #(1b - tramp_vectors)
isb

View File

@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
gfp_t mask)
{
int rc = 0;
pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
memcpy((void *)dst, src_start, length);
__flush_icache_range(dst, dst + length);
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
trans_pgd = allocator(mask);
if (!trans_pgd) {
rc = -ENOMEM;
goto out;
}
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = allocator(mask);
if (!pudp) {

View File

@ -17,6 +17,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
@ -44,6 +45,7 @@
#include <asm/alternative.h>
#include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
@ -631,3 +633,19 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
asmlinkage void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (static_branch_likely(&arm64_const_caps_ready))
preempt_schedule_irq();
}

View File

@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
if (vcpu->arch.hcr_el2 & HCR_VSE)
vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
if (vcpu->arch.hcr_el2 & HCR_VSE) {
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
if (has_vhe())
deactivate_traps_vhe();
@ -380,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true;
}
static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
u64 val = vcpu_get_reg(vcpu, rt);
/*
* The normal sysreg handling code expects to see the traps,
* let's not do anything here.
*/
if (vcpu->arch.hcr_el2 & HCR_TVM)
return false;
switch (sysreg) {
case SYS_SCTLR_EL1:
write_sysreg_el1(val, SYS_SCTLR);
break;
case SYS_TTBR0_EL1:
write_sysreg_el1(val, SYS_TTBR0);
break;
case SYS_TTBR1_EL1:
write_sysreg_el1(val, SYS_TTBR1);
break;
case SYS_TCR_EL1:
write_sysreg_el1(val, SYS_TCR);
break;
case SYS_ESR_EL1:
write_sysreg_el1(val, SYS_ESR);
break;
case SYS_FAR_EL1:
write_sysreg_el1(val, SYS_FAR);
break;
case SYS_AFSR0_EL1:
write_sysreg_el1(val, SYS_AFSR0);
break;
case SYS_AFSR1_EL1:
write_sysreg_el1(val, SYS_AFSR1);
break;
case SYS_MAIR_EL1:
write_sysreg_el1(val, SYS_MAIR);
break;
case SYS_AMAIR_EL1:
write_sysreg_el1(val, SYS_AMAIR);
break;
case SYS_CONTEXTIDR_EL1:
write_sysreg_el1(val, SYS_CONTEXTIDR);
break;
default:
return false;
}
__kvm_skip_instr(vcpu);
return true;
}
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@ -399,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
return true;
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.

View File

@ -268,8 +268,12 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
par = read_sysreg(par_el1);
local_irq_restore(flags);
/*
* If we now have a valid translation, treat the translation fault as
* spurious.
*/
if (!(par & SYS_PAR_EL1_F))
return false;
return true;
/*
* If we got a different type of fault from the AT instruction,

View File

@ -22,7 +22,7 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define __read_mostly __section(.data..read_mostly)
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */

View File

@ -52,7 +52,7 @@
})
#ifdef CONFIG_SMP
# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
# define __lock_aligned __section(.data..lock_aligned)
#endif
#endif /* __PARISC_LDCW_H */

View File

@ -3,7 +3,7 @@
* arch/parisc/mm/ioremap.c
*
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
* (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
* (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
vfree(addr);
vunmap(addr);
return NULL;
}
@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
EXPORT_SYMBOL(__ioremap);
void iounmap(const volatile void __iomem *addr)
void iounmap(const volatile void __iomem *io_addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
if (is_vmalloc_addr((void *)addr))
vunmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);

View File

@ -13,6 +13,7 @@
compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
chosen {
stdout-path = "serial0";
};
cpus {

View File

@ -87,14 +87,6 @@ extern pgd_t swapper_pg_dir[];
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define FIXADDR_TOP VMALLOC_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
@ -108,6 +100,14 @@ extern pgd_t swapper_pg_dir[];
#define vmemmap ((struct page *)VMEMMAP_START)
#define FIXADDR_TOP (VMEMMAP_START)
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.

View File

@ -10,10 +10,6 @@
#include <linux/mm_types.h>
#include <asm/smp.h>
/*
* Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
* cache as well, so a 'fence.i' is not necessary.
*/
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");

View File

@ -124,24 +124,24 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage void do_trap_break(struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (user_mode(regs)) {
force_sig_fault(SIGTRAP, TRAP_BRKPT,
(void __user *)(regs->sepc));
return;
}
#ifdef CONFIG_GENERIC_BUG
{
enum bug_trap_type type;
type = report_bug(regs->sepc, regs);
switch (type) {
#ifdef CONFIG_GENERIC_BUG
case BUG_TRAP_TYPE_WARN:
if (type == BUG_TRAP_TYPE_WARN) {
regs->sepc += get_break_insn_length(regs->sepc);
return;
case BUG_TRAP_TYPE_BUG:
#endif /* CONFIG_GENERIC_BUG */
default:
die(regs, "Kernel BUG");
}
} else {
force_sig_fault(SIGTRAP, TRAP_BRKPT,
(void __user *)(regs->sepc));
}
#endif /* CONFIG_GENERIC_BUG */
die(regs, "Kernel BUG");
}
#ifdef CONFIG_GENERIC_BUG

View File

@ -29,7 +29,6 @@ config SPARC
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
select HAVE_FAST_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP

View File

@ -20,30 +20,6 @@
*/
struct mem_vector immovable_mem[MAX_NUMNODES*2];
/*
* Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
* digits, and '\0' for termination.
*/
#define MAX_ADDR_LEN 19
static acpi_physical_address get_cmdline_acpi_rsdp(void)
{
acpi_physical_address addr = 0;
#ifdef CONFIG_KEXEC
char val[MAX_ADDR_LEN] = { };
int ret;
ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
if (ret < 0)
return 0;
if (kstrtoull(val, 16, &addr))
return 0;
#endif
return addr;
}
/*
* Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
* ACPI_TABLE_GUID are found, take the former, which has more features.
@ -298,6 +274,30 @@ acpi_physical_address get_rsdp_addr(void)
}
#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
/*
* Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
* digits, and '\0' for termination.
*/
#define MAX_ADDR_LEN 19
static acpi_physical_address get_cmdline_acpi_rsdp(void)
{
acpi_physical_address addr = 0;
#ifdef CONFIG_KEXEC
char val[MAX_ADDR_LEN] = { };
int ret;
ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
if (ret < 0)
return 0;
if (kstrtoull(val, 16, &addr))
return 0;
#endif
return addr;
}
/* Compute SRAT address from RSDP. */
static unsigned long get_acpi_srat_table(void)
{

View File

@ -345,6 +345,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
unsigned long needed_size;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@ -379,26 +380,38 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
/*
* The memory hole needed for the kernel is the larger of either
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*
* On X86_64, the memory is mapped with PMD pages. Round the
* size up so that the full extent of PMD pages mapped is
* included in the check against the valid memory table
* entries. This ensures the full mapped area is usable RAM
* and doesn't include any reserved areas.
*/
needed_size = max(output_len, kernel_total_size);
#ifdef CONFIG_X86_64
needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
#endif
/* Report initial kernel position details. */
debug_putaddr(input_data);
debug_putaddr(input_len);
debug_putaddr(output);
debug_putaddr(output_len);
debug_putaddr(kernel_total_size);
debug_putaddr(needed_size);
#ifdef CONFIG_X86_64
/* Report address of 32-bit trampoline */
debug_putaddr(trampoline_32bit);
#endif
/*
* The memory hole needed for the kernel is the larger of either
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
choose_random_location((unsigned long)input_data, input_len,
(unsigned long *)&output,
max(output_len, kernel_total_size),
needed_size,
&virt_addr);
/* Validate memory location choices. */

View File

@ -260,11 +260,21 @@ void __init hv_apic_init(void)
}
if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
pr_info("Hyper-V: Using MSR based APIC access\n");
pr_info("Hyper-V: Using enlightened APIC (%s mode)",
x2apic_enabled() ? "x2apic" : "xapic");
/*
* With x2apic, architectural x2apic MSRs are equivalent to the
* respective synthetic MSRs, so there's no need to override
* the apic accessors. The only exception is
* hv_apic_eoi_write, because it benefits from lazy EOI when
* available, but it works for both xapic and x2apic modes.
*/
apic_set_eoi_write(hv_apic_eoi_write);
apic->read = hv_apic_read;
apic->write = hv_apic_write;
apic->icr_write = hv_apic_icr_write;
apic->icr_read = hv_apic_icr_read;
if (!x2apic_enabled()) {
apic->read = hv_apic_read;
apic->write = hv_apic_write;
apic->icr_write = hv_apic_icr_write;
apic->icr_read = hv_apic_icr_read;
}
}
}

View File

@ -156,7 +156,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
cpumask_clear_cpu(dead_cpu, &cmsk->mask);
if (cmsk)
cpumask_clear_cpu(dead_cpu, &cmsk->mask);
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
return 0;
}

View File

@ -216,6 +216,10 @@ static void __init ms_hyperv_init_platform(void)
int hv_host_info_ecx;
int hv_host_info_edx;
#ifdef CONFIG_PARAVIRT
pv_info.name = "Hyper-V";
#endif
/*
* Extract the features and hints
*/

View File

@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
* we might write invalid pmds, when the kernel is relocated
* cleanup_highmap() fixes this up along with the mappings
* beyond _end.
*
* Only the region occupied by the kernel image has so far
* been checked against the table of usable memory regions
* provided by the firmware, so invalidate pages outside that
* region. A page table entry that maps to a reserved area of
* memory would allow processor speculation into that area,
* and on some hardware (particularly the UV platform) even
* speculative access to some reserved areas is caught as an
* error, causing the BIOS to halt the system.
*/
pmd = fixup_pointer(level2_kernel_pgt, physaddr);
for (i = 0; i < PTRS_PER_PMD; i++) {
/* invalidate pages before the kernel image */
for (i = 0; i < pmd_index((unsigned long)_text); i++)
pmd[i] &= ~_PAGE_PRESENT;
/* fixup pages that are part of the kernel image */
for (; i <= pmd_index((unsigned long)_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
}
/* invalidate pages after the kernel image */
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;
/*
* Fixup phys_base - remove the memory encryption mask to obtain

View File

@ -56,7 +56,7 @@
reg = <0xf0100000 0x03f00000>;
// BUS_ADDRESS(3) CPU_PHYSICAL(1) SIZE(2)
ranges = <0x01000000 0x0 0xf0000000 0xf0000000 0x0 0x00010000>,
ranges = <0x01000000 0x0 0x00000000 0xf0000000 0x0 0x00010000>,
<0x02000000 0x0 0xf4000000 0xf4000000 0x0 0x08000000>;
// PCI_DEVICE(3) INT#(1) CONTROLLER(PHANDLE) CONTROLLER_DATA(2)

View File

@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (~mask), "a" (p)
: "a" (mask), "a" (p)
: "memory");
}

View File

@ -100,7 +100,7 @@ do { \
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr, &__v64, 8); \
retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
break; \
} \
default: __put_user_bad(); \
@ -132,14 +132,14 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
" _bbci.l %3, 0, 1f \n" \
" movi %0, %4 \n" \
" _bbci.l %[addr], 0, 1f \n" \
" movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
" _bbsi.l %3, 0, 0f \n" \
" _bbci.l %3, 1, 1f \n" \
"0: movi %0, %4 \n" \
" _bbsi.l %[addr], 0, 0f \n" \
" _bbci.l %[addr], 1, 1f \n" \
"0: movi %[err], %[efault] \n" \
" _j 2f \n"
@ -151,40 +151,40 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
#define __put_user_asm(x, addr, err, align, insn, cb) \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"1: "insn" %[x], %[addr], 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
" movi %1, 2b \n" \
" movi %0, %4 \n" \
" jx %1 \n" \
" movi %[tmp], 2b \n" \
" movi %[err], %[efault] \n" \
" jx %[tmp] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:"=r" (err), "=r" (cb) \
:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
:[err] "+r"(err_), [tmp] "=r"(cb) \
:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
long __gu_err; \
__get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(__gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (access_ok(__gu_addr, size)) \
__get_user_size((x), __gu_addr, (size), __gu_err); \
else \
(x) = 0; \
__gu_err; \
})
@ -198,8 +198,17 @@ do { \
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
case 8: retval = __copy_from_user(&x, ptr, 8); break; \
default: (x) = __get_user_bad(); \
case 8: { \
u64 __x; \
if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
retval = -EFAULT; \
(x) = 0; \
} else { \
(x) = *(__force __typeof__((ptr)))&__x; \
} \
break; \
} \
default: (x) = 0; __get_user_bad(); \
} \
} while (0)
@ -208,25 +217,28 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
#define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
" movi %1, 2b \n" \
" movi %2, 0 \n" \
" movi %0, %4 \n" \
" jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:"=r" (err), "=r" (cb), "=r" (x) \
:"r" (addr), "i" (-EFAULT), "0" (err))
#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
do { \
u32 __x = 0; \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %[x], %[addr], 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
" movi %[tmp], 2b \n" \
" movi %[err], %[efault] \n" \
" jx %[tmp] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \
} while (0)
/*

View File

@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
// FIXME EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
extern long common_exception_return;
EXPORT_SYMBOL(common_exception_return);

View File

@ -1362,7 +1362,7 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg;
struct blkcg_gq *blkg, *pinned_blkg = NULL;
int ret;
if (blkcg_policy_enabled(q, pol))
@ -1370,49 +1370,82 @@ int blkcg_activate_policy(struct request_queue *q,
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
if (!pd_prealloc) {
ret = -ENOMEM;
goto out_bypass_end;
}
}
retry:
spin_lock_irq(&q->queue_lock);
/* blkg_list is pushed at the head, reverse walk to init parents first */
/* blkg_list is pushed at the head, reverse walk to allocate parents first */
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (blkg->pd[pol->plid])
continue;
pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
if (!pd)
swap(pd, pd_prealloc);
/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
if (blkg == pinned_blkg) {
pd = pd_prealloc;
pd_prealloc = NULL;
} else {
pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
blkg->blkcg);
}
if (!pd) {
/*
* GFP_NOWAIT failed. Free the existing one and
* prealloc for @blkg w/ GFP_KERNEL.
*/
if (pinned_blkg)
blkg_put(pinned_blkg);
blkg_get(blkg);
pinned_blkg = blkg;
spin_unlock_irq(&q->queue_lock);
goto pd_prealloc;
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
blkg->blkcg);
if (pd_prealloc)
goto retry;
else
goto enomem;
}
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
if (pol->pd_init_fn)
pol->pd_init_fn(pd);
}
/* all allocated, init in the same order */
if (pol->pd_init_fn)
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]);
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
spin_unlock_irq(&q->queue_lock);
out_bypass_end:
out:
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
if (pinned_blkg)
blkg_put(pinned_blkg);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
enomem:
/* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
}
spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM;
goto out;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

View File

@ -108,16 +108,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
struct rq_qos *cur, *prev = NULL;
for (cur = q->rq_qos; cur; cur = cur->next) {
if (cur == rqos) {
if (prev)
prev->next = rqos->next;
else
q->rq_qos = cur;
struct rq_qos **cur;
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) {
*cur = rqos->next;
break;
}
prev = cur;
}
blk_mq_debugfs_unregister_rqos(rqos);

View File

@ -616,7 +616,8 @@ int elevator_switch_mq(struct request_queue *q,
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
if (!q->mq_ops ||
(q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
return false;
return true;
}

View File

@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
pcc_data[pcc_ss_id]->refcount--;
if (!pcc_data[pcc_ss_id]->refcount) {
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
kfree(pcc_data[pcc_ss_id]);
pcc_data[pcc_ss_id] = NULL;
}
}
}

View File

@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
p->flags, p->processor_PD, p->memory_PD);
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");

View File

@ -162,21 +162,23 @@ void acpi_processor_ppc_init(int cpu)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
if (!pr)
return;
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
&pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
INT_MAX);
if (ret < 0) {
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
return;
}
}
void acpi_processor_ppc_exit(int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
dev_pm_qos_remove_request(&pr->perflib_req);
if (pr)
dev_pm_qos_remove_request(&pr->perflib_req);
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)

View File

@ -130,21 +130,23 @@ void acpi_thermal_cpufreq_init(int cpu)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
if (!pr)
return;
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
&pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
INT_MAX);
if (ret < 0) {
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
return;
}
}
void acpi_thermal_cpufreq_exit(int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
dev_pm_qos_remove_request(&pr->thermal_req);
if (pr)
dev_pm_qos_remove_request(&pr->thermal_req);
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)

View File

@ -361,19 +361,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=196907
* Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
* S0 Idle firmware interface.
*/
{
.callback = init_default_s3,
.ident = "Dell XPS13 9360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
},
},
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see

View File

@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
/* Skip applying the quirk on Denverton and beyond */
if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
return;
/*

View File

@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@ -3404,6 +3405,8 @@ void device_shutdown(void)
wait_for_device_probe();
device_block_probing();
cpufreq_suspend();
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.

View File

@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
if (!pfn_to_online_page(pfn))
return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}

View File

@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
if (ret > 0)
if (ret > 0) {
ret = rbd_dev->acquire_err;
else if (!ret)
ret = -ETIMEDOUT;
} else {
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
if (!ret)
ret = -ETIMEDOUT;
}
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);

View File

@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct file *file;
struct zram *zram = dev_to_zram(dev);
struct file *file = zram->backing_dev;
char *p;
ssize_t ret;
down_read(&zram->init_lock);
if (!zram->backing_dev) {
file = zram->backing_dev;
if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;

View File

@ -2754,14 +2754,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
/*
* Stop cpufreq at shutdown to make sure it isn't holding any locks
* or mutexes when secondary CPUs are halted.
*/
static struct syscore_ops cpufreq_syscore_ops = {
.shutdown = cpufreq_suspend,
};
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@ -2773,8 +2765,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
module_param(off, int, 0444);

View File

@ -471,7 +471,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
if (pfence_excl)
*pfence_excl = fence_excl;
else if (fence_excl)
shared[++shared_count] = fence_excl;
shared[shared_count++] = fence_excl;
if (!shared_count) {
kfree(shared);

View File

@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
bytes = ~0ull;
else if (size & 0x8000)
bytes = (u64)(size & 0x7fff) << 10;
else if (size != 0x7fff)
else if (size != 0x7fff || dm->length < 0x20)
bytes = (u64)size << 20;
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;

View File

@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
chip->irq_eoi(data);
}
static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
static int intel_mid_irq_init_hw(struct gpio_chip *chip)
{
struct intel_mid_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned base;
@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
reg = gpio_reg(&priv->chip, base, GEDR);
writel(~0, reg);
}
return 0;
}
static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq = &priv->chip.irq;
girq->chip = &intel_mid_irqchip;
girq->init_hw = intel_mid_irq_init_hw;
girq->parent_handler = intel_mid_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
intel_mid_irq_init_hw(priv);
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);

View File

@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
{
struct lp_gpio *lg = gpiochip_get_data(chip);
unsigned long reg;
unsigned base;
@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
outl(0xffffffff, reg);
}
return 0;
}
static int lp_gpio_probe(struct platform_device *pdev)
@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
girq = &gc->irq;
girq->chip = &lp_irqchip;
girq->init_hw = lp_gpio_irq_init_hw;
girq->parent_handler = lp_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = (unsigned)irq_rc->start;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
lp_gpio_irq_init_hw(lg);
girq->handler = handle_bad_irq;
}
ret = devm_gpiochip_add_data(dev, gc, lg);

View File

@ -362,8 +362,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@ -375,6 +376,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@ -447,6 +450,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@ -455,11 +459,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = pdev->irq;
girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
mrfld_irq_init_hw(priv);
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {

View File

@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
machine_gpiochip_add(chip);
ret = gpiochip_irqchip_init_hw(chip);
if (ret)
goto err_remove_acpi_chip;
ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name)
* The following is irqchip helper code for gpiochips.
*/
static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
{
struct gpio_irq_chip *girq = &gc->irq;
if (!girq->init_hw)
return 0;
return girq->init_hw(gc);
}
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
struct gpio_irq_chip *girq = &gc->irq;
@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
{
return 0;
}
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
{
return 0;
}
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
return 0;

View File

@ -1048,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
dev_info(&pdev->dev,
"SI support provided by radeon.\n");
dev_info(&pdev->dev,
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
if (!amdgpu_cik_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
dev_info(&pdev->dev,
"CIK support provided by radeon.\n");
dev_info(&pdev->dev,
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
/* Get rid of things like offb */
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret)

View File

@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct amdgpu_device *adev;
int r, acpi_status;
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
dev_info(dev->dev,
"SI support provided by radeon.\n");
dev_info(dev->dev,
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
if (!amdgpu_cik_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
dev_info(dev->dev,
"CIK support provided by radeon.\n");
dev_info(dev->dev,
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
);
return -ENODEV;
}
}
#endif
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) {
return -ENOMEM;

View File

@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}

View File

@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;

View File

@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;

View File

@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Lenovo G50 */
{ "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },

View File

@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin,
port_name(p), port_name(port));
port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
* Give child device order the priority, first come first
* served.
* Give inverse child device order the priority,
* last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dvi = false;
info->supports_hdmi = false;
info->alternate_ddc_pin = 0;
@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel,
port_name(p), port_name(port));
port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
* Give child device order the priority, first come first
* served.
* Give inverse child device order the priority,
* last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dp = false;
info->alternate_aux_channel = 0;
}

View File

@ -364,6 +364,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (!obj)
return -ENOENT;
if (i915_gem_object_never_bind_ggtt(obj)) {
ret = -ENODEV;
goto out;
}
ret = create_mmap_offset(obj);
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
i915_gem_object_put(obj);
return ret;
}

View File

@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{

View File

@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
#define I915_GEM_OBJECT_NO_GGTT BIT(3)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set

View File

@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,

View File

@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
static void mark_eio(struct i915_request *rq)
{
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@ -1236,6 +1243,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
submit = true;
last = rq;
}
i915_request_put(rq);
/*
* Hmm, we have a bunch of virtual engine requests,
@ -2574,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
list_for_each_entry(rq, &engine->active.requests, sched.link)
mark_eio(rq);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
@ -2587,9 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
mark_eio(rq);
__i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@ -2605,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock);
if (ve->request) {
ve->request->engine = engine;
__i915_request_submit(ve->request);
dma_fence_set_error(&ve->request->fence, -EIO);
i915_request_mark_complete(ve->request);
rq = fetch_and_zero(&ve->request);
if (rq) {
mark_eio(rq);
rq->engine = engine;
__i915_request_submit(rq);
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
}
spin_unlock(&ve->base.active.lock);
}
@ -3615,6 +3620,8 @@ static void virtual_submission_tasklet(unsigned long data)
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
struct i915_request *old;
unsigned long flags;
GEM_TRACE("%s: rq=%llx:%lld\n",
ve->base.name,
@ -3623,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
GEM_BUG_ON(ve->request);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
spin_lock_irqsave(&ve->base.active.lock, flags);
ve->base.execlists.queue_priority_hint = rq_prio(rq);
WRITE_ONCE(ve->request, rq);
old = ve->request;
if (old) { /* background completion event from preempt-to-busy */
GEM_BUG_ON(!i915_request_completed(old));
__i915_request_submit(old);
i915_request_put(old);
}
list_move_tail(&rq->sched.link, virtual_queue(ve));
if (i915_request_completed(rq)) {
__i915_request_submit(rq);
tasklet_schedule(&ve->base.execlists.tasklet);
ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
} else {
ve->base.execlists.queue_priority_hint = rq_prio(rq);
ve->request = i915_request_get(rq);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_schedule(&ve->base.execlists.tasklet);
}
spin_unlock_irqrestore(&ve->base.active.lock, flags);
}
static struct ve_bond *

View File

@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available

View File

@ -26,6 +26,8 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */
msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */
msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);

View File

@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));

View File

@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
mutex_lock(&pfdev->reset_lock);
if (!mutex_trylock(&pfdev->reset_lock))
return;
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
if (sched_job)
drm_sched_increase_karma(sched_job);
drm_sched_stop(sched, sched_job);
if (js != i)
/* Ensure any timeouts on other slots have finished */
cancel_delayed_work_sync(&sched->work_tdr);
}
drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {

View File

@ -379,19 +379,11 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *ddev = pci_get_drvdata(pdev);
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
/* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
*/
radeon_suspend_kms(ddev, true, true, false);
}
static int radeon_pmops_suspend(struct device *dev)

View File

@ -63,7 +63,6 @@ config TINYDRM_REPAPER
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
depends on THERMAL || !THERMAL
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)

View File

@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags &
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
ttm_bo_get(busy_bo);
kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret;
}

View File

@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else
ret = vmf_insert_pfn(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
*/
if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
break;
else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
if (i == 0)
goto out_io_unlock;
else
break;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))

View File

@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
if (onkey->key_power)
input_set_capability(onkey->input, EV_KEY, KEY_POWER);
input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
input_set_capability(onkey->input, EV_KEY, KEY_POWER);
INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);

View File

@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
if (gpio < 0 && gpio != -ENOENT) {
error = gpio;
goto err_free_mem;
} else if (!gpio_is_valid(gpio)) {
/* Skip GPIO if not present */
if (!gpio_is_valid(gpio)) {
/*
* Skip GPIO if not present. Note we deliberately
* ignore -EPROBE_DEFER errors here. On some devices
* Intel is using so called virtual GPIOs which are not
* GPIOs at all but some way for AML code to check some
* random status bits without need a custom opregion.
* In some cases the resources table we parse points to
* such a virtual GPIO, since these are not real GPIOs
* we do not have a driver for these so they will never
* show up, therefore we ignore -EPROBE_DEFER.
*/
continue;
}

View File

@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
leave_breadcrumbs);
}
static bool elantech_use_host_notify(struct psmouse *psmouse,
struct elantech_device_info *info)
{
if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
return true;
switch (info->bus) {
case ETP_BUS_PS2_ONLY:
/* expected case */
break;
case ETP_BUS_SMB_HST_NTFY_ONLY:
case ETP_BUS_PS2_SMB_HST_NTFY:
/* SMbus implementation is stable since 2018 */
if (dmi_get_bios_year() >= 2018)
return true;
/* fall through */
default:
psmouse_dbg(psmouse,
"Ignoring SMBus bus provider %d\n", info->bus);
break;
}
return false;
}
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
if (!elantech_use_host_notify(psmouse, info) ||
if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
return 0;
}
static bool elantech_use_host_notify(struct psmouse *psmouse,
struct elantech_device_info *info)
{
if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
return true;
switch (info->bus) {
case ETP_BUS_PS2_ONLY:
/* expected case */
break;
case ETP_BUS_SMB_ALERT_ONLY:
/* fall-through */
case ETP_BUS_PS2_SMB_ALERT:
psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
break;
case ETP_BUS_SMB_HST_NTFY_ONLY:
/* fall-through */
case ETP_BUS_PS2_SMB_HST_NTFY:
return true;
default:
psmouse_dbg(psmouse,
"Ignoring SMBus bus provider %d.\n",
info->bus);
}
return false;
}
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;

View File

@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
}
mutex_lock(&data->irq_mutex);
bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
data->irq_count);
/*
* At this point, irq_status has all bits that are set in the
@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
bitmap_copy(data->current_irq_mask, data->new_irq_mask,
data->num_of_irq_regs);
bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
error_unlock:
mutex_unlock(&data->irq_mutex);
return error;
@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
struct device *dev = &rmi_dev->dev;
mutex_lock(&data->irq_mutex);
bitmap_andnot(data->fn_irq_bits,
data->fn_irq_bits, mask, data->irq_count);
bitmap_andnot(data->new_irq_mask,
data->current_irq_mask, mask, data->irq_count);

View File

@ -53,6 +53,7 @@ struct goodix_ts_data {
const char *cfg_name;
struct completion firmware_loading_complete;
unsigned long irq_flags;
unsigned int contact_size;
};
#define GOODIX_GPIO_INT_NAME "irq"
@ -62,6 +63,7 @@ struct goodix_ts_data {
#define GOODIX_MAX_WIDTH 4096
#define GOODIX_INT_TRIGGER 1
#define GOODIX_CONTACT_SIZE 8
#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
#define GOODIX_CONFIG_MAX_LENGTH 240
@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = {
{}
};
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
.ident = "Lenovo YogaBook",
/* YB1-X91L/F and YB1-X90L/F */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
}
},
#endif
{}
};
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
do {
error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
data, GOODIX_CONTACT_SIZE + 1);
data, ts->contact_size + 1);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n",
error);
@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -EPROTO;
if (touch_num > 1) {
data += 1 + GOODIX_CONTACT_SIZE;
data += 1 + ts->contact_size;
error = goodix_i2c_read(ts->client,
GOODIX_READ_COOR_ADDR +
1 + GOODIX_CONTACT_SIZE,
1 + ts->contact_size,
data,
GOODIX_CONTACT_SIZE *
ts->contact_size *
(touch_num - 1));
if (error)
return error;
@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return 0;
}
static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[1]);
@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[1] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[3]);
int input_y = get_unaligned_le16(&coor_data[5]);
int input_w = get_unaligned_le16(&coor_data[7]);
input_mt_slot(ts->input_dev, id);
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
touchscreen_report_pos(ts->input_dev, &ts->prop,
input_x, input_y, true);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
/**
* goodix_process_events - Process incoming events
*
@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts)
input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
for (i = 0; i < touch_num; i++)
goodix_ts_report_touch(ts,
&point_data[1 + GOODIX_CONTACT_SIZE * i]);
if (ts->contact_size == 9)
goodix_ts_report_touch_9b(ts,
&point_data[1 + ts->contact_size * i]);
else
goodix_ts_report_touch_8b(ts,
&point_data[1 + ts->contact_size * i]);
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
"Applying '180 degrees rotated screen' quirk\n");
}
if (dmi_check_system(nine_bytes_report)) {
ts->contact_size = 9;
dev_dbg(&ts->client->dev,
"Non-standard 9-bytes report format quirk\n");
}
error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ts->client = client;
i2c_set_clientdata(client, ts);
init_completion(&ts->firmware_loading_complete);
ts->contact_size = GOODIX_CONTACT_SIZE;
error = goodix_get_gpio_config(ts);
if (error)

View File

@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
pasid = PPR_PASID(*(u64 *)&event[0]);
pasid = (event[0] & EVENT_DOMID_MASK_HI) |
(event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
@ -616,7 +617,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
* to 64 bits.
*/
static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp)
{
unsigned long flags;
@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
if (address <= PM_LEVEL_SIZE(domain->mode) ||
WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode))
*updated = increase_address_space(domain, gfp) || *updated;
*updated = increase_address_space(domain, address, gfp) || *updated;
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];

View File

@ -130,8 +130,8 @@
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff
#define EVENT_DOMID_SHIFT 0
#define EVENT_DOMID_MASK_LO 0xffff
#define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10

View File

@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return 0;
out_clear_smmu:
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
smmu_domain->smmu = NULL;
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);

View File

@ -166,6 +166,9 @@
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct io_pgtable *iop;
struct arm_lpae_io_pgtable *data;
if (cfg->ias != 48 || cfg->oas > 40)
/* No quirks for Mali (hopefully) */
if (cfg->quirks)
return NULL;
if (cfg->ias > 48 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
if (iop) {
u64 mair, ttbr;
/* Copy values as union fields overlap */
mair = cfg->arm_lpae_s1_cfg.mair[0];
ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
cfg->arm_mali_lpae_cfg.memattr = mair;
cfg->arm_mali_lpae_cfg.transtab = ttbr |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
/* Mali seems to need a full 4-level table regardless of IAS */
if (data->levels < ARM_LPAE_MAX_LEVELS) {
data->levels = ARM_LPAE_MAX_LEVELS;
data->pgd_size = sizeof(arm_lpae_iopte);
}
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
* best we can do is mimic the out-of-tree driver and hope that the
* "implementation-defined caching policy" is good enough. Similarly,
* we'll use it for the sake of a valid attribute for our 'device'
* index, although callers should never request that in practice.
*/
cfg->arm_mali_lpae_cfg.memattr =
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
return iop;
data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
/* Ensure the empty pgd is visible before TRANSTAB can be written */
wmb();
cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
return &data->iop;
out_free_data:
kfree(data);
return NULL;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {

View File

@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
/*
* Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
@ -1106,6 +1104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
/* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
return irq;

View File

@ -100,6 +100,7 @@ struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
int num_irq;
struct clk_bulk_data *clocks;
int num_clocks;
bool reset_disabled;
@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
int err, i, irq;
int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
iommu->num_irq = platform_irq_count(pdev);
if (iommu->num_irq < 0)
return iommu->num_irq;
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
i = 0;
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
@ -1245,10 +1251,13 @@ static int rk_iommu_probe(struct platform_device *pdev)
static void rk_iommu_shutdown(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
int i = 0, irq;
int i;
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
}
pm_runtime_force_suspend(&pdev->dev);
}

View File

@ -15,6 +15,7 @@
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
static int al_fic_irq_retrigger(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
return 1;
}
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;

View File

@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
{ .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
};
@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
}
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
#define NR_SAM9X60_IRQS 50
static int __init sam9x60_aic5_of_init(struct device_node *node,
struct device_node *parent)
{
return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
}
IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);

View File

@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
/*

View File

@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}
static void plic_irq_enable(struct irq_data *d)
static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
static void plic_irq_disable(struct irq_data *d)
static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
if (!irqd_irq_disabled(d)) {
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
}
#endif
static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
}
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
/*
* There is no need to mask/unmask PLIC interrupts. They are "masked"
* by reading claim and "unmasked" when writing it back.
*/
.irq_enable = plic_irq_enable,
.irq_disable = plic_irq_disable,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
.irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
irq_set_chip_data(irq, NULL);
irq_set_noprobe(irq);
return 0;
@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
hwirq);
else
generic_handle_irq(irq);
writel(hwirq, claim);
}
csr_set(sie, SIE_SEIE);
}

View File

@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{
return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
}
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;
mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
if (!mg)
return NULL;
mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
memset(mg, 0, sizeof(*mg));
@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
if (!cell_prealloc) {
defer_bio(cache, bio);
return false;
}
build_key(oblock, end, &key);
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
if (!prealloc) {
DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
mg_complete(mg, false);
return -ENOMEM;
}
/*
* Prevent writes to the block, but allow reads to continue.
@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
}
mg = alloc_migration(cache);
if (!mg) {
policy_complete_background_work(cache->policy, op, false);
background_work_end(cache);
return -ENOMEM;
}
mg->op = op;
mg->overwrite_bio = bio;
@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
if (!prealloc) {
invalidate_complete(mg, false);
return -ENOMEM;
}
build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
r = dm_cell_lock_v2(cache->prison, &key,
@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -EPERM;
mg = alloc_migration(cache);
if (!mg) {
background_work_end(cache);
return -ENOMEM;
}
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;

View File

@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
*
* NOTE: Must be called with the bucket lock held
*/
struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
unsigned long region_nr)
static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
unsigned long region_nr)
{
struct dm_clone_region_hydration *hd;

View File

@ -18,7 +18,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
#include <linux/semaphore.h>
#include "dm.h"
@ -107,8 +106,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
/* Maximum number of in-flight COW jobs. */
struct semaphore cow_count;
unsigned in_progress;
struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@ -162,8 +161,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
static int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
dm_put_device(ti, s->origin);
WARN_ON(s->in_progress);
kfree(s);
}
static void account_start_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
s->in_progress++;
spin_unlock(&s->in_progress_wait.lock);
}
static void account_end_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
BUG_ON(!s->in_progress);
s->in_progress--;
if (likely(s->in_progress <= cow_threshold) &&
unlikely(waitqueue_active(&s->in_progress_wait)))
wake_up_locked(&s->in_progress_wait);
spin_unlock(&s->in_progress_wait.lock);
}
static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{
if (unlikely(s->in_progress > cow_threshold)) {
spin_lock(&s->in_progress_wait.lock);
if (likely(s->in_progress > cow_threshold)) {
/*
* NOTE: this throttle doesn't account for whether
* the caller is servicing an IO that will trigger a COW
* so excess throttling may result for chunks not required
* to be COW'd. But if cow_threshold was reached, extra
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
if (unlock_origins)
up_read(&_origins_lock);
io_schedule();
remove_wait_queue(&s->in_progress_wait, &wait);
return false;
}
spin_unlock(&s->in_progress_wait.lock);
}
return true;
}
/*
* Flush a list of buffers.
*/
@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
}
}
static int do_origin(struct dm_dev *origin, struct bio *bio);
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
r = do_origin(s->origin, bio);
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
up(&s->cow_count);
account_end_copy(s);
}
/*
@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
down(&s->cow_count);
account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
down(&s->cow_count);
account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
struct bio *bio = context;
struct dm_snapshot *s = bio->bi_private;
up(&s->cow_count);
account_end_copy(s);
bio->bi_status = write_err ? BLK_STS_IOERR : 0;
bio_endio(bio);
}
@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
dest.sector = bio->bi_iter.bi_sector;
dest.count = s->store->chunk_size;
down(&s->cow_count);
account_start_copy(s);
WARN_ON_ONCE(bio->bi_private);
bio->bi_private = s;
dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return DM_MAPIO_KILL;
if (bio_data_dir(bio) == WRITE) {
while (unlikely(!wait_for_in_progress(s, false)))
; /* wait_for_in_progress() has slept */
}
down_read(&s->lock);
dm_exception_table_lock(&lock);
@ -2112,7 +2163,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
return do_origin(s->origin, bio, false);
}
out_unlock:
@ -2487,15 +2538,24 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
/*
* Called on a write from the origin driver.
*/
static int do_origin(struct dm_dev *origin, struct bio *bio)
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
if (o) {
if (limit) {
struct dm_snapshot *s;
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
}
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
}
up_read(&_origins_lock);
return r;
@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
return do_origin(o->dev, bio);
return do_origin(o->dev, bio, true);
}
/*

View File

@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -ENOTSUPP;
goto abort;
}

View File

@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
goto err_out;
goto err_out_int;
}
jm = kzalloc(sizeof(struct jmb38x_ms)

Some files were not shown because too many files have changed in this diff Show More