Linux 6.0-rc3
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmML5qkeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG/zcH/3YsuPDZ58MmmAdI zcoabNKvy6yV3KHSyJHRAkaZrd7uv8SuRimBmqTTg/frirBVwkR0sBLM+e++SUQP I62kioXERMIUMIGbhowQCnCVzaQyrmVW0iU12Ejw1xHYtbbEe3AnhmfrcRC0PJj5 X7rwMsvOTXifDrFk3y3Mpiud4nc5v5a2QgFC0JiQHtSy7gqHOzbuEjTvNExlbwv4 In+SzpN3eKfEYXW2LuGilSVdVYwri7kyrw2h4iJfxtj5LUjw0ktPfQgg0Z67onN4 s9O7cXByae6gFk8Bl45Mr8TVmcCiAlEeCdizjrI8fwQzrmmj36lhZQgV0jg0ELOs fEDIjN4= =NP9s -----END PGP SIGNATURE----- Merge tag 'v6.0-rc3' into android-mainline Linux 6.0-rc3 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I6f6369eea24565c1be9de9afded6a689cee0d6d6
This commit is contained in:
commit
d8ecc05191
6
.mailmap
6
.mailmap
@ -98,8 +98,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
|
||||
Christian Marangi <ansuelsmth@gmail.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com>
|
||||
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
|
||||
@ -150,6 +149,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
|
||||
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
|
||||
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
|
||||
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
|
||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||
@ -253,6 +254,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
|
||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
|
||||
|
@ -523,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
/sys/devices/system/cpu/vulnerabilities/retbleed
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -230,6 +230,20 @@ The possible values in this file are:
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
* - 'Unknown: No mitigations'
|
||||
- The processor vulnerability status is unknown because it is
|
||||
out of Servicing period. Mitigation is not attempted.
|
||||
|
||||
Definitions:
|
||||
------------
|
||||
|
||||
Servicing period: The process of providing functional and security updates to
|
||||
Intel processors or platforms, utilizing the Intel Platform Update (IPU)
|
||||
process or other similar mechanisms.
|
||||
|
||||
End of Servicing Updates (ESU): ESU is the date at which Intel will no
|
||||
longer provide Servicing, such as through IPU or other similar update
|
||||
processes. ESU dates will typically be aligned to end of quarter.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
@ -5339,6 +5339,8 @@
|
||||
rodata= [KNL]
|
||||
on Mark read-only kernel memory as read-only (default).
|
||||
off Leave read-only kernel memory writable for debugging.
|
||||
full Mark read-only kernel memory and aliases as read-only
|
||||
[arm64]
|
||||
|
||||
rockchip.usb_uart
|
||||
Enable the uart passthrough on the designated usb port
|
||||
|
@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
|
||||
netdev_max_backlog
|
||||
------------------
|
||||
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
receives packets faster than kernel can process them.
|
||||
|
||||
netdev_rss_key
|
||||
|
@ -242,44 +242,34 @@ HWCAP2_MTE3
|
||||
by Documentation/arm64/memory-tagging-extension.rst.
|
||||
|
||||
HWCAP2_SME
|
||||
|
||||
Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
|
||||
by Documentation/arm64/sme.rst.
|
||||
|
||||
HWCAP2_SME_I16I64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F64F64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
|
||||
|
||||
HWCAP2_SME_I8I32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_B16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_F32F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_FA64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
|
||||
|
||||
HWCAP2_WFXT
|
||||
|
||||
Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
|
||||
|
||||
HWCAP2_EBF16
|
||||
|
||||
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
|
@ -52,6 +52,8 @@ stable kernels.
|
||||
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
|
||||
|
@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
|
||||
|
||||
- RMW operations that have a return value are fully ordered.
|
||||
|
||||
- RMW operations that are conditional are unordered on FAILURE,
|
||||
otherwise the above rules apply. In the case of test_and_set_bit_lock(),
|
||||
if the bit in memory is unchanged by the operation then it is deemed to have
|
||||
failed.
|
||||
- RMW operations that are conditional are fully ordered.
|
||||
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
clear_bit_unlock() which has RELEASE semantics.
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
|
||||
clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
|
||||
ACQUIRE semantics.
|
||||
|
||||
Since a platform only has a single means of achieving atomic operations
|
||||
the same barriers as for atomic_t are used, see atomic_t.txt.
|
||||
|
@ -214,6 +214,7 @@ patternProperties:
|
||||
- polling-delay
|
||||
- polling-delay-passive
|
||||
- thermal-sensors
|
||||
- trips
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -3612,6 +3612,7 @@ F: include/linux/find.h
|
||||
F: include/linux/nodemask.h
|
||||
F: lib/bitmap.c
|
||||
F: lib/cpumask.c
|
||||
F: lib/cpumask_kunit.c
|
||||
F: lib/find_bit.c
|
||||
F: lib/find_bit_benchmark.c
|
||||
F: lib/test_bitmap.c
|
||||
@ -3679,6 +3680,7 @@ F: Documentation/networking/bonding.rst
|
||||
F: drivers/net/bonding/
|
||||
F: include/net/bond*
|
||||
F: include/uapi/linux/if_bonding.h
|
||||
F: tools/testing/selftests/drivers/net/bonding/
|
||||
|
||||
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
|
||||
M: Dan Robertson <dan@dlrobertson.com>
|
||||
@ -10665,6 +10667,7 @@ T: git git://git.kernel.dk/linux-block
|
||||
T: git git://git.kernel.dk/liburing
|
||||
F: io_uring/
|
||||
F: include/linux/io_uring.h
|
||||
F: include/linux/io_uring_types.h
|
||||
F: include/uapi/linux/io_uring.h
|
||||
F: tools/io_uring/
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -283,11 +283,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
/*
|
||||
* ffz = Find First Zero in word. Undefined if no zero exists,
|
||||
|
@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2457168
|
||||
bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
|
||||
depends on ARM64_AMU_EXTN
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2457168.
|
||||
|
||||
The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
|
||||
as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
|
||||
incorrectly giving a significantly higher output value.
|
||||
|
||||
Work around this problem by returning 0 when reading the affected counter in
|
||||
key locations that results in disabling all users of this counter. This effect
|
||||
is the same to firmware disabling affected counters.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
|
||||
|
||||
static inline u32 cache_type_cwg(void)
|
||||
{
|
||||
return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
|
||||
return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
|
||||
}
|
||||
|
||||
#define __read_mostly __section(".data..read_mostly")
|
||||
|
@ -153,7 +153,7 @@ struct vl_info {
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
extern void sve_alloc(struct task_struct *task);
|
||||
extern void sve_alloc(struct task_struct *task, bool flush);
|
||||
extern void fpsimd_release_task(struct task_struct *task);
|
||||
extern void fpsimd_sync_to_sve(struct task_struct *task);
|
||||
extern void fpsimd_force_sync_to_sve(struct task_struct *task);
|
||||
@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task);
|
||||
|
||||
#else /* ! CONFIG_ARM64_SVE */
|
||||
|
||||
static inline void sve_alloc(struct task_struct *task) { }
|
||||
static inline void sve_alloc(struct task_struct *task, bool flush) { }
|
||||
static inline void fpsimd_release_task(struct task_struct *task) { }
|
||||
static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
|
||||
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef __ARM64_ASM_SETUP_H
|
||||
#define __ARM64_ASM_SETUP_H
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
void *get_early_fdt_ptr(void);
|
||||
@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys);
|
||||
extern phys_addr_t __fdt_pointer __initdata;
|
||||
extern u64 __cacheline_aligned boot_args[4];
|
||||
|
||||
static inline bool arch_parse_debug_rodata(char *arg)
|
||||
{
|
||||
extern bool rodata_enabled;
|
||||
extern bool rodata_full;
|
||||
|
||||
if (arg && !strcmp(arg, "full")) {
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#define arch_parse_debug_rodata arch_parse_debug_rodata
|
||||
|
||||
#endif
|
||||
|
@ -1116,6 +1116,7 @@
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/alternative.h>
|
||||
@ -1209,8 +1210,6 @@
|
||||
par; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#define SYS_FIELD_GET(reg, field, val) \
|
||||
FIELD_GET(reg##_##field##_MASK, val)
|
||||
|
||||
@ -1220,4 +1219,6 @@
|
||||
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
|
||||
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
|
@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
unsigned int ctype, level, leaves, fw_level;
|
||||
unsigned int ctype, level, leaves;
|
||||
int fw_level;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
|
||||
@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
|
||||
else
|
||||
fw_level = acpi_find_last_cache_level(cpu);
|
||||
|
||||
if (fw_level < 0)
|
||||
return fw_level;
|
||||
|
||||
if (level < fw_level) {
|
||||
/*
|
||||
* some external caches not specified in CLIDR_EL1
|
||||
|
@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1286807
|
||||
{
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
|
||||
},
|
||||
{
|
||||
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
@ -654,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2457168
|
||||
{
|
||||
.desc = "ARM erratum 2457168",
|
||||
.capability = ARM64_WORKAROUND_2457168,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
|
||||
/* Cortex-A510 r0p0-r1p1 */
|
||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2038923
|
||||
{
|
||||
.desc = "ARM erratum 2038923",
|
||||
|
@ -1870,7 +1870,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
|
||||
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
|
||||
smp_processor_id());
|
||||
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
|
||||
update_freq_counters_refs();
|
||||
|
||||
/* 0 reference values signal broken/disabled counters */
|
||||
if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
|
||||
update_freq_counters_refs();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ tsk .req x28 // current thread_info
|
||||
SYM_CODE_START(vectors)
|
||||
kernel_ventry 1, t, 64, sync // Synchronous EL1t
|
||||
kernel_ventry 1, t, 64, irq // IRQ EL1t
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1h
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1t
|
||||
kernel_ventry 1, t, 64, error // Error EL1t
|
||||
|
||||
kernel_ventry 1, h, 64, sync // Synchronous EL1h
|
||||
|
@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task)
|
||||
* do_sve_acc() case, there is no ABI requirement to hide stale data
|
||||
* written previously be task.
|
||||
*/
|
||||
void sve_alloc(struct task_struct *task)
|
||||
void sve_alloc(struct task_struct *task, bool flush)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
if (flush)
|
||||
memset(task->thread.sve_state, 0,
|
||||
sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
force_sig(SIGKILL);
|
||||
return;
|
||||
@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, false);
|
||||
sme_alloc(current);
|
||||
if (!current->thread.sve_state || !current->thread.za_state) {
|
||||
force_sig(SIGKILL);
|
||||
@ -1460,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
||||
fpsimd_bind_task_to_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* If SVE was not already active initialise the SVE registers,
|
||||
* any non-shared state between the streaming and regular SVE
|
||||
* registers is architecturally guaranteed to be zeroed when
|
||||
* we enter streaming mode. We do not need to initialize ZA
|
||||
* since ZA must be disabled at this point and enabling ZA is
|
||||
* architecturally defined to zero ZA.
|
||||
*/
|
||||
if (system_supports_sve() && !test_thread_flag(TIF_SVE))
|
||||
sve_init_regs();
|
||||
|
||||
put_cpu_fpsimd_context();
|
||||
}
|
||||
|
||||
|
@ -94,11 +94,9 @@ asmlinkage u64 kaslr_early_init(void *fdt)
|
||||
|
||||
seed = get_kaslr_seed(fdt);
|
||||
if (!seed) {
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
if (!__early_cpu_has_rndr() ||
|
||||
!__arm64_rndr((unsigned long *)&seed))
|
||||
#endif
|
||||
return 0;
|
||||
if (!__early_cpu_has_rndr() ||
|
||||
!__arm64_rndr((unsigned long *)&seed))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target,
|
||||
* state and ensure there's storage.
|
||||
*/
|
||||
if (target->thread.svcr != old_svcr)
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
}
|
||||
|
||||
/* Registers: FPSIMD-only case */
|
||||
@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target,
|
||||
goto out;
|
||||
}
|
||||
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
if (!target->thread.sve_state) {
|
||||
ret = -ENOMEM;
|
||||
clear_tsk_thread_flag(target, TIF_SVE);
|
||||
@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target,
|
||||
|
||||
/* Ensure there is some SVE storage for streaming mode */
|
||||
if (!target->thread.sve_state) {
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, false);
|
||||
if (!target->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SME);
|
||||
ret = -ENOMEM;
|
||||
|
@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
|
||||
* not taken into account. This limit is not a guarantee and is
|
||||
* NOT ABI.
|
||||
*/
|
||||
#define SIGFRAME_MAXSZ SZ_64K
|
||||
#define SIGFRAME_MAXSZ SZ_256K
|
||||
|
||||
static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
|
||||
unsigned long *offset, size_t size, bool extend)
|
||||
@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
|
||||
fpsimd_flush_task_state(current);
|
||||
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SVE);
|
||||
return -ENOMEM;
|
||||
@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
||||
|
||||
/* Signal handlers are invoked with ZA and streaming mode disabled */
|
||||
if (system_supports_sme()) {
|
||||
/*
|
||||
* If we were in streaming mode the saved register
|
||||
* state was SVE but we will exit SM and use the
|
||||
* FPSIMD register state - flush the saved FPSIMD
|
||||
* register state in case it gets loaded.
|
||||
*/
|
||||
if (current->thread.svcr & SVCR_SM_MASK)
|
||||
memset(¤t->thread.uw.fpsimd_state, 0,
|
||||
sizeof(current->thread.uw.fpsimd_state));
|
||||
|
||||
current->thread.svcr &= ~(SVCR_ZA_MASK |
|
||||
SVCR_SM_MASK);
|
||||
sme_smstop();
|
||||
|
@ -296,12 +296,25 @@ core_initcall(init_amu_fie);
|
||||
|
||||
static void cpu_read_corecnt(void *val)
|
||||
{
|
||||
/*
|
||||
* A value of 0 can be returned if the current CPU does not support AMUs
|
||||
* or if the counter is disabled for this CPU. A return value of 0 at
|
||||
* counter read is properly handled as an error case by the users of the
|
||||
* counter.
|
||||
*/
|
||||
*(u64 *)val = read_corecnt();
|
||||
}
|
||||
|
||||
static void cpu_read_constcnt(void *val)
|
||||
{
|
||||
*(u64 *)val = read_constcnt();
|
||||
/*
|
||||
* Return 0 if the current CPU is affected by erratum 2457168. A value
|
||||
* of 0 is also returned if the current CPU does not support AMUs or if
|
||||
* the counter is disabled. A return value of 0 at counter read is
|
||||
* properly handled as an error case by the users of the counter.
|
||||
*/
|
||||
*(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
|
||||
0UL : read_constcnt();
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
|
||||
*/
|
||||
bool cpc_ffh_supported(void)
|
||||
{
|
||||
return freq_counters_valid(get_cpu_with_amu_feat());
|
||||
int cpu = get_cpu_with_amu_feat();
|
||||
|
||||
/*
|
||||
* FFH is considered supported if there is at least one present CPU that
|
||||
* supports AMUs. Using FFH to read core and reference counters for CPUs
|
||||
* that do not support AMUs, have counters disabled or that are affected
|
||||
* by errata, will result in a return value of 0.
|
||||
*
|
||||
* This is done to allow any enabled and valid counters to be read
|
||||
* through FFH, knowing that potentially returning 0 as counter value is
|
||||
* properly handled by the users of these counters.
|
||||
*/
|
||||
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
|
||||
|
@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
|
||||
vm_area_add_early(vma);
|
||||
}
|
||||
|
||||
static int __init parse_rodata(char *arg)
|
||||
{
|
||||
int ret = strtobool(arg, &rodata_enabled);
|
||||
if (!ret) {
|
||||
rodata_full = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* permit 'full' in addition to boolean options */
|
||||
if (strcmp(arg, "full"))
|
||||
return -EINVAL;
|
||||
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("rodata", parse_rodata);
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
|
@ -67,6 +67,7 @@ WORKAROUND_1902691
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
@ -179,6 +179,21 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
int retval;
|
||||
|
||||
asm volatile(
|
||||
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
|
||||
: "=&r" (retval)
|
||||
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
|
||||
: "p0", "memory"
|
||||
);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* ffz - find first zero in word.
|
||||
* @word: The word to search
|
||||
|
@ -331,11 +331,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
/**
|
||||
* ffz - find the first zero bit in a long word
|
||||
|
@ -111,6 +111,7 @@ config LOONGARCH
|
||||
select PCI_ECAM if ACPI
|
||||
select PCI_LOONGSON
|
||||
select PCI_MSI_ARCH_FALLBACKS
|
||||
select PCI_QUIRKS
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
select SMP
|
||||
|
@ -109,4 +109,20 @@ extern unsigned long vm_map_base;
|
||||
*/
|
||||
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
|
||||
|
||||
/*
|
||||
* On LoongArch, I/O ports mappring is following:
|
||||
*
|
||||
* | .... |
|
||||
* |-----------------------|
|
||||
* | pci io ports(16K~32M) |
|
||||
* |-----------------------|
|
||||
* | isa io ports(0 ~16K) |
|
||||
* PCI_IOBASE ->|-----------------------|
|
||||
* | .... |
|
||||
*/
|
||||
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
|
||||
#define PCI_IOSIZE SZ_32M
|
||||
#define ISA_IOSIZE SZ_16K
|
||||
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
|
||||
|
||||
#endif /* _ASM_ADDRSPACE_H */
|
||||
|
@ -5,8 +5,9 @@
|
||||
#ifndef __ASM_CMPXCHG_H
|
||||
#define __ASM_CMPXCHG_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __xchg_asm(amswap_db, m, val) \
|
||||
({ \
|
||||
@ -21,10 +22,53 @@
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
|
||||
unsigned int size)
|
||||
{
|
||||
unsigned int shift;
|
||||
u32 old32, mask, temp;
|
||||
volatile u32 *ptr32;
|
||||
|
||||
/* Mask value to the correct size. */
|
||||
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
||||
val &= mask;
|
||||
|
||||
/*
|
||||
* Calculate a shift & mask that correspond to the value we wish to
|
||||
* exchange within the naturally aligned 4 byte integerthat includes
|
||||
* it.
|
||||
*/
|
||||
shift = (unsigned long)ptr & 0x3;
|
||||
shift *= BITS_PER_BYTE;
|
||||
mask <<= shift;
|
||||
|
||||
/*
|
||||
* Calculate a pointer to the naturally aligned 4 byte integer that
|
||||
* includes our byte of interest, and load its value.
|
||||
*/
|
||||
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
||||
|
||||
asm volatile (
|
||||
"1: ll.w %0, %3 \n"
|
||||
" andn %1, %0, %z4 \n"
|
||||
" or %1, %1, %z5 \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beqz %1, 1b \n"
|
||||
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
|
||||
: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
|
||||
: "memory");
|
||||
|
||||
return (old32 & mask) >> shift;
|
||||
}
|
||||
|
||||
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __xchg_small(ptr, x, size);
|
||||
|
||||
case 4:
|
||||
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
|
||||
|
||||
@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
|
||||
unsigned int new, unsigned int size)
|
||||
{
|
||||
unsigned int shift;
|
||||
u32 old32, mask, temp;
|
||||
volatile u32 *ptr32;
|
||||
|
||||
/* Mask inputs to the correct size. */
|
||||
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
||||
old &= mask;
|
||||
new &= mask;
|
||||
|
||||
/*
|
||||
* Calculate a shift & mask that correspond to the value we wish to
|
||||
* compare & exchange within the naturally aligned 4 byte integer
|
||||
* that includes it.
|
||||
*/
|
||||
shift = (unsigned long)ptr & 0x3;
|
||||
shift *= BITS_PER_BYTE;
|
||||
old <<= shift;
|
||||
new <<= shift;
|
||||
mask <<= shift;
|
||||
|
||||
/*
|
||||
* Calculate a pointer to the naturally aligned 4 byte integer that
|
||||
* includes our byte of interest, and load its value.
|
||||
*/
|
||||
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
||||
|
||||
asm volatile (
|
||||
"1: ll.w %0, %3 \n"
|
||||
" and %1, %0, %z4 \n"
|
||||
" bne %1, %z5, 2f \n"
|
||||
" andn %1, %0, %z4 \n"
|
||||
" or %1, %1, %z6 \n"
|
||||
" sc.w %1, %2 \n"
|
||||
" beqz %1, 1b \n"
|
||||
" b 3f \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
|
||||
: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
|
||||
: "memory");
|
||||
|
||||
return (old32 & mask) >> shift;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, unsigned int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __cmpxchg_small(ptr, old, new, size);
|
||||
|
||||
case 4:
|
||||
return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
|
||||
(u32)old, new);
|
||||
|
@ -7,34 +7,15 @@
|
||||
|
||||
#define ARCH_HAS_IOREMAP_WC
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
/*
|
||||
* On LoongArch, I/O ports mappring is following:
|
||||
*
|
||||
* | .... |
|
||||
* |-----------------------|
|
||||
* | pci io ports(64K~32M) |
|
||||
* |-----------------------|
|
||||
* | isa io ports(0 ~16K) |
|
||||
* PCI_IOBASE ->|-----------------------|
|
||||
* | .... |
|
||||
*/
|
||||
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
|
||||
#define PCI_IOSIZE SZ_32M
|
||||
#define ISA_IOSIZE SZ_16K
|
||||
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
|
@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn)
|
||||
|
||||
#endif
|
||||
|
||||
#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
|
||||
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
|
||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
||||
|
||||
extern int __virt_addr_valid(volatile void *kaddr);
|
||||
|
@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
return __xchg_small((volatile void *)ptr, val, size);
|
||||
|
||||
case 4:
|
||||
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
|
||||
|
||||
@ -204,9 +208,13 @@ do { \
|
||||
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
|
||||
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
|
||||
|
||||
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
|
||||
|
||||
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
|
||||
|
@ -59,7 +59,6 @@
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
|
||||
*p4d = p4dval;
|
||||
}
|
||||
|
||||
#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
|
||||
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
|
||||
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
|
||||
|
||||
#endif
|
||||
@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
|
||||
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
|
||||
|
||||
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
|
||||
#define pud_phys(pud) PHYSADDR(pud_val(pud))
|
||||
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
|
||||
|
||||
#endif
|
||||
@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp)
|
||||
|
||||
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
|
||||
|
||||
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
|
||||
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
|
||||
|
||||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
|
||||
|
@ -1,10 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _ASM_REBOOT_H
|
||||
#define _ASM_REBOOT_H
|
||||
|
||||
extern void (*pm_restart)(void);
|
||||
|
||||
#endif /* _ASM_REBOOT_H */
|
@ -15,10 +15,16 @@
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
static void default_halt(void)
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
local_irq_disable();
|
||||
clear_csr_ecfg(ECFG0_IM);
|
||||
|
||||
@ -30,18 +36,29 @@ static void default_halt(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void default_poweroff(void)
|
||||
void machine_power_off(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_power_off();
|
||||
#ifdef CONFIG_EFI
|
||||
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
|
||||
#endif
|
||||
|
||||
while (true) {
|
||||
__arch_cpu_idle();
|
||||
}
|
||||
}
|
||||
|
||||
static void default_restart(void)
|
||||
void machine_restart(char *command)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_restart(command);
|
||||
#ifdef CONFIG_EFI
|
||||
if (efi_capsule_pending(NULL))
|
||||
efi_reboot(REBOOT_WARM, NULL);
|
||||
@ -55,47 +72,3 @@ static void default_restart(void)
|
||||
__arch_cpu_idle();
|
||||
}
|
||||
}
|
||||
|
||||
void (*pm_restart)(void);
|
||||
EXPORT_SYMBOL(pm_restart);
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
default_halt();
|
||||
}
|
||||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
pm_power_off();
|
||||
}
|
||||
|
||||
void machine_restart(char *command)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
smp_send_stop();
|
||||
#endif
|
||||
do_kernel_restart(command);
|
||||
pm_restart();
|
||||
}
|
||||
|
||||
static int __init loongarch_reboot_setup(void)
|
||||
{
|
||||
pm_restart = default_restart;
|
||||
pm_power_off = default_poweroff;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
arch_initcall(loongarch_reboot_setup);
|
||||
|
@ -216,6 +216,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
|
||||
return;
|
||||
}
|
||||
|
||||
/* The fault is fully completed (including releasing mmap lock) */
|
||||
if (fault & VM_FAULT_COMPLETED)
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_RETRY)) {
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
|
@ -2,16 +2,9 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/elf-randomize.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||
EXPORT_SYMBOL(shm_align_mask);
|
||||
@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr)
|
||||
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||
return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
||||
|
@ -24,6 +24,8 @@ static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
|
||||
return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
|
||||
}
|
||||
|
||||
extern
|
||||
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
|
||||
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
|
||||
{
|
||||
int cpu_id;
|
||||
|
@ -6,20 +6,23 @@
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
int __vdso_clock_gettime(clockid_t clock,
|
||||
struct __kernel_timespec *ts)
|
||||
extern
|
||||
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
|
||||
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
|
||||
{
|
||||
return __cvdso_clock_gettime(clock, ts);
|
||||
}
|
||||
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||
struct timezone *tz)
|
||||
extern
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
|
||||
{
|
||||
return __cvdso_gettimeofday(tv, tz);
|
||||
}
|
||||
|
||||
int __vdso_clock_getres(clockid_t clock_id,
|
||||
struct __kernel_timespec *res)
|
||||
extern
|
||||
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
|
||||
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
|
||||
{
|
||||
return __cvdso_clock_getres(clock_id, res);
|
||||
}
|
||||
|
@ -157,11 +157,8 @@ arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
static inline int bset_reg_test_and_set_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
|
@ -84,12 +84,10 @@ &mac1 {
|
||||
|
||||
phy1: ethernet-phy@9 {
|
||||
reg = <9>;
|
||||
ti,fifo-depth = <0x1>;
|
||||
};
|
||||
|
||||
phy0: ethernet-phy@8 {
|
||||
reg = <8>;
|
||||
ti,fifo-depth = <0x1>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -102,7 +100,6 @@ &mmc {
|
||||
disable-wp;
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
mmc-ddr-1_8v;
|
||||
mmc-hs200-1_8v;
|
||||
sd-uhs-sdr12;
|
||||
|
@ -54,12 +54,10 @@ &mac1 {
|
||||
|
||||
phy1: ethernet-phy@5 {
|
||||
reg = <5>;
|
||||
ti,fifo-depth = <0x01>;
|
||||
};
|
||||
|
||||
phy0: ethernet-phy@4 {
|
||||
reg = <4>;
|
||||
ti,fifo-depth = <0x01>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -72,7 +70,6 @@ &mmc {
|
||||
disable-wp;
|
||||
cap-sd-highspeed;
|
||||
cap-mmc-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
mmc-ddr-1_8v;
|
||||
mmc-hs200-1_8v;
|
||||
sd-uhs-sdr12;
|
||||
|
@ -193,7 +193,7 @@ cctrllr: cache-controller@2010000 {
|
||||
cache-size = <2097152>;
|
||||
cache-unified;
|
||||
interrupt-parent = <&plic>;
|
||||
interrupts = <1>, <2>, <3>;
|
||||
interrupts = <1>, <3>, <4>, <2>;
|
||||
};
|
||||
|
||||
clint: clint@2000000 {
|
||||
@ -485,9 +485,8 @@ pcie: pcie@2000000000 {
|
||||
ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
|
||||
msi-parent = <&pcie>;
|
||||
msi-controller;
|
||||
microchip,axi-m-atr0 = <0x10 0x0>;
|
||||
status = "disabled";
|
||||
pcie_intc: legacy-interrupt-controller {
|
||||
pcie_intc: interrupt-controller {
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
|
12
arch/riscv/include/asm/signal.h
Normal file
12
arch/riscv/include/asm/signal.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_SIGNAL_H
|
||||
#define __ASM_SIGNAL_H
|
||||
|
||||
#include <uapi/asm/signal.h>
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
asmlinkage __visible
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
|
||||
#endif
|
@ -42,6 +42,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/signal32.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/csr.h>
|
||||
|
@ -20,9 +20,10 @@
|
||||
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
|
@ -176,14 +176,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const volatile unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
return *p & mask;
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
|
@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
|
||||
memcpy(dst, src, arch_task_struct_size);
|
||||
dst->thread.fpu.regs = dst->thread.fpu.fprs;
|
||||
|
||||
/*
|
||||
* Don't transfer over the runtime instrumentation or the guarded
|
||||
* storage control block pointers. These fields are cleared here instead
|
||||
* of in copy_thread() to avoid premature freeing of associated memory
|
||||
* on fork() failure. Wait to clear the RI flag because ->stack still
|
||||
* refers to the source thread.
|
||||
*/
|
||||
dst->thread.ri_cb = NULL;
|
||||
dst->thread.gs_cb = NULL;
|
||||
dst->thread.gs_bc_cb = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -150,13 +162,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
frame->childregs.flags = 0;
|
||||
if (new_stackp)
|
||||
frame->childregs.gprs[15] = new_stackp;
|
||||
|
||||
/* Don't copy runtime instrumentation info */
|
||||
p->thread.ri_cb = NULL;
|
||||
/*
|
||||
* Clear the runtime instrumentation flag after the above childregs
|
||||
* copy. The CB pointer was already cleared in arch_dup_task_struct().
|
||||
*/
|
||||
frame->childregs.psw.mask &= ~PSW_MASK_RI;
|
||||
/* Don't copy guarded storage control block */
|
||||
p->thread.gs_cb = NULL;
|
||||
p->thread.gs_bc_cb = NULL;
|
||||
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
|
@ -379,7 +379,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || is_write)
|
||||
if (is_write)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
@ -135,16 +135,8 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
#define arch_test_bit generic_test_bit
|
||||
#define arch_test_bit_acquire generic_test_bit_acquire
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
|
@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr);
|
||||
void snp_set_page_shared(unsigned long paddr);
|
||||
void sev_prep_identity_maps(unsigned long top_level_pgt);
|
||||
#else
|
||||
static inline void sev_enable(struct boot_params *bp) { }
|
||||
static inline void sev_enable(struct boot_params *bp)
|
||||
{
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 unconditionally (thus here in this stub too) to
|
||||
* ensure that uninitialized values from buggy bootloaders aren't
|
||||
* propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
}
|
||||
static inline void sev_es_shutdown_ghcb(void) { }
|
||||
static inline bool sev_es_check_ghcb_fault(unsigned long address)
|
||||
{
|
||||
|
@ -276,6 +276,14 @@ void sev_enable(struct boot_params *bp)
|
||||
struct msr m;
|
||||
bool snp;
|
||||
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 to ensure that uninitialized values from
|
||||
* buggy bootloaders aren't propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
|
||||
/*
|
||||
* Setup/preliminary detection of SNP. This will be sanity-checked
|
||||
* against CPUID/MSR values later.
|
||||
|
@ -14,7 +14,6 @@ CONFIG_CPU_FREQ=y
|
||||
|
||||
# x86 xen specific config options
|
||||
CONFIG_XEN_PVH=y
|
||||
CONFIG_XEN_MAX_DOMAIN_MEMORY=500
|
||||
CONFIG_XEN_SAVE_RESTORE=y
|
||||
# CONFIG_XEN_DEBUG_FS is not set
|
||||
CONFIG_XEN_MCE_LOG=y
|
||||
|
@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat)
|
||||
* Interrupts are off on entry.
|
||||
*/
|
||||
ASM_CLAC /* Do this early to minimize exposure */
|
||||
SWAPGS
|
||||
ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
|
||||
|
||||
/*
|
||||
* User tracing code (ptrace or signal handlers) might assume that
|
||||
|
@ -6291,10 +6291,8 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_aliases = NULL;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.pebs_block = true;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
|
||||
@ -6337,10 +6335,8 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_aliases = NULL;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.pebs_block = true;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
|
||||
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
|
||||
x86_pmu.lbr_pt_coexist = true;
|
||||
|
@ -291,6 +291,7 @@ static u64 load_latency_data(struct perf_event *event, u64 status)
|
||||
static u64 store_latency_data(struct perf_event *event, u64 status)
|
||||
{
|
||||
union intel_x86_pebs_dse dse;
|
||||
union perf_mem_data_src src;
|
||||
u64 val;
|
||||
|
||||
dse.val = status;
|
||||
@ -304,7 +305,14 @@ static u64 store_latency_data(struct perf_event *event, u64 status)
|
||||
|
||||
val |= P(BLK, NA);
|
||||
|
||||
return val;
|
||||
/*
|
||||
* the pebs_data_source table is only for loads
|
||||
* so override the mem_op to say STORE instead
|
||||
*/
|
||||
src.val = val;
|
||||
src.mem_op = P(OP,STORE);
|
||||
|
||||
return src.val;
|
||||
}
|
||||
|
||||
struct pebs_record_core {
|
||||
@ -822,7 +830,7 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
|
||||
|
||||
struct event_constraint intel_grt_pebs_event_constraints[] = {
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xf),
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
|
||||
INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
@ -2262,6 +2270,7 @@ void __init intel_ds_init(void)
|
||||
PERF_SAMPLE_BRANCH_STACK |
|
||||
PERF_SAMPLE_TIME;
|
||||
x86_pmu.flags |= PMU_FL_PEBS_ALL;
|
||||
x86_pmu.pebs_capable = ~0ULL;
|
||||
pebs_qual = "-baseline";
|
||||
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
|
||||
} else {
|
||||
|
@ -1097,6 +1097,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
|
||||
reg->config = mask;
|
||||
|
||||
/*
|
||||
* The Arch LBR HW can retrieve the common branch types
|
||||
* from the LBR_INFO. It doesn't require the high overhead
|
||||
* SW disassemble.
|
||||
* Enable the branch type by default for the Arch LBR.
|
||||
*/
|
||||
reg->reg |= X86_BR_TYPE_SAVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -841,6 +841,22 @@ int snb_pci2phy_map_init(int devid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
/*
|
||||
* SNB IMC counters are 32-bit and are laid out back to back
|
||||
* in MMIO space. Therefore we must use a 32-bit accessor function
|
||||
* using readq() from uncore_mmio_read_counter() causes problems
|
||||
* because it is reading 64-bit at a time. This is okay for the
|
||||
* uncore_perf_event_update() function because it drops the upper
|
||||
* 32-bits but not okay for plain uncore_read_counter() as invoked
|
||||
* in uncore_pmu_event_start().
|
||||
*/
|
||||
return (u64)readl(box->io_addr + hwc->event_base);
|
||||
}
|
||||
|
||||
static struct pmu snb_uncore_imc_pmu = {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = snb_uncore_imc_event_init,
|
||||
@ -860,7 +876,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
|
||||
.disable_event = snb_uncore_imc_disable_event,
|
||||
.enable_event = snb_uncore_imc_enable_event,
|
||||
.hw_config = snb_uncore_imc_hw_config,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
.read_counter = snb_uncore_imc_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type snb_uncore_imc = {
|
||||
|
@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
|
||||
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("testb %2,%1"
|
||||
CC_SET(nz)
|
||||
: CC_OUT(nz) (oldbit)
|
||||
: "m" (((unsigned char *)addr)[nr >> 3]),
|
||||
"i" (1 << (nr & 7))
|
||||
:"memory");
|
||||
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
@ -226,6 +240,13 @@ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
* @word: The word to search
|
||||
|
@ -457,7 +457,8 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -27,6 +27,7 @@
|
||||
* _X - regular server parts
|
||||
* _D - micro server parts
|
||||
* _N,_P - other mobile parts
|
||||
* _S - other client parts
|
||||
*
|
||||
* Historical OPTDIFFs:
|
||||
*
|
||||
@ -112,6 +113,7 @@
|
||||
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
@ -35,33 +35,56 @@
|
||||
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
||||
|
||||
/*
|
||||
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
|
||||
*/
|
||||
#define __FILL_RETURN_SLOT \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
int3; \
|
||||
772:
|
||||
|
||||
/*
|
||||
* Stuff the entire RSB.
|
||||
*
|
||||
* Google experimented with loop-unrolling and this turned out to be
|
||||
* the optimal version - two calls, each with their own speculation
|
||||
* trap should their return address end up getting used, in a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
add $(BITS_PER_LONG/8) * 2, sp; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
__FILL_RETURN_SLOT \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
lfence;
|
||||
#else
|
||||
/*
|
||||
* i386 doesn't unconditionally have LFENCE, as such it can't
|
||||
* do a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
.rept nr; \
|
||||
__FILL_RETURN_SLOT; \
|
||||
.endr; \
|
||||
add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stuff a single RSB slot.
|
||||
*
|
||||
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
|
||||
* forced to retire before letting a RET instruction execute.
|
||||
*
|
||||
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
|
||||
* before this point.
|
||||
*/
|
||||
#define __FILL_ONE_RETURN \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP; \
|
||||
lfence;
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
@ -132,28 +155,15 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ISSUE_UNBALANCED_RET_GUARD
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call .Lunbalanced_ret_guard_\@
|
||||
int3
|
||||
.Lunbalanced_ret_guard_\@:
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP
|
||||
lfence
|
||||
.endm
|
||||
|
||||
/*
|
||||
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
|
||||
.ifb \ftr2
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||
.else
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
|
||||
.endif
|
||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||
.Lunbalanced_\@:
|
||||
ISSUE_UNBALANCED_RET_GUARD
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
|
||||
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
|
||||
__stringify(__FILL_ONE_RETURN), \ftr2
|
||||
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
|
@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
|
||||
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
|
||||
void snp_set_wakeup_secondary_cpu(void);
|
||||
bool snp_init(struct boot_params *bp);
|
||||
void snp_abort(void);
|
||||
void __init __noreturn snp_abort(void);
|
||||
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
|
||||
#else
|
||||
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
|
||||
|
@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
cpu_mitigations_off()) {
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
|
||||
cpu_mitigations_off()) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
@ -538,6 +539,8 @@ static void __init md_clear_update_mitigation(void)
|
||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
@ -2275,6 +2278,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
|
||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return sysfs_emit(buf, "Unknown: No mitigations\n");
|
||||
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||
|
||||
@ -2421,6 +2427,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
return srbds_show_state(buf);
|
||||
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
case X86_BUG_MMIO_UNKNOWN:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
case X86_BUG_RETBLEED:
|
||||
@ -2480,7 +2487,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||
|
||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
|
||||
else
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
@ -1135,7 +1135,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
#define NO_SWAPGS BIT(6)
|
||||
#define NO_ITLB_MULTIHIT BIT(7)
|
||||
#define NO_SPECTRE_V2 BIT(8)
|
||||
#define NO_EIBRS_PBRSB BIT(9)
|
||||
#define NO_MMIO BIT(9)
|
||||
#define NO_EIBRS_PBRSB BIT(10)
|
||||
|
||||
#define VULNWL(vendor, family, model, whitelist) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||
@ -1158,6 +1159,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
|
||||
|
||||
/* Intel Family 6 */
|
||||
VULNWL_INTEL(TIGERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
|
||||
|
||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
@ -1176,9 +1182,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
|
||||
/*
|
||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||
@ -1193,18 +1199,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||
|
||||
/* AMD Family 0xf - 0x12 */
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* Zhaoxin Family 7 */
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1358,10 +1364,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||
* not want the guest to enumerate the bug.
|
||||
*
|
||||
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
||||
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
if (!arch_cap_mmio_immune(ia32_cap)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||
|
@ -701,7 +701,13 @@ static void __init early_set_pages_state(unsigned long paddr, unsigned int npage
|
||||
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned int npages)
|
||||
{
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
/*
|
||||
* This can be invoked in early boot while running identity mapped, so
|
||||
* use an open coded check for SNP instead of using cc_platform_has().
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -717,7 +723,13 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
|
||||
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned int npages)
|
||||
{
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
/*
|
||||
* This can be invoked in early boot while running identity mapped, so
|
||||
* use an open coded check for SNP instead of using cc_platform_has().
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/* Invalidate the memory pages before they are marked shared in the RMP table. */
|
||||
@ -2100,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp)
|
||||
return true;
|
||||
}
|
||||
|
||||
void __init snp_abort(void)
|
||||
void __init __noreturn snp_abort(void)
|
||||
{
|
||||
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||||
}
|
||||
|
@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip);
|
||||
static struct orc_entry *orc_ftrace_find(unsigned long ip)
|
||||
{
|
||||
struct ftrace_ops *ops;
|
||||
unsigned long caller;
|
||||
unsigned long tramp_addr, offset;
|
||||
|
||||
ops = ftrace_ops_trampoline(ip);
|
||||
if (!ops)
|
||||
return NULL;
|
||||
|
||||
/* Set tramp_addr to the start of the code copied by the trampoline */
|
||||
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
|
||||
caller = (unsigned long)ftrace_regs_call;
|
||||
tramp_addr = (unsigned long)ftrace_regs_caller;
|
||||
else
|
||||
caller = (unsigned long)ftrace_call;
|
||||
tramp_addr = (unsigned long)ftrace_caller;
|
||||
|
||||
/* Now place tramp_addr to the location within the trampoline ip is at */
|
||||
offset = ip - ops->trampoline;
|
||||
tramp_addr += offset;
|
||||
|
||||
/* Prevent unlikely recursion */
|
||||
if (ip == caller)
|
||||
if (ip == tramp_addr)
|
||||
return NULL;
|
||||
|
||||
return orc_find(caller);
|
||||
return orc_find(tramp_addr);
|
||||
}
|
||||
#else
|
||||
static struct orc_entry *orc_ftrace_find(unsigned long ip)
|
||||
|
@ -62,6 +62,7 @@
|
||||
|
||||
static bool __read_mostly pat_bp_initialized;
|
||||
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __read_mostly pat_bp_enabled;
|
||||
static bool __read_mostly pat_cm_initialized;
|
||||
|
||||
@ -86,6 +87,7 @@ void pat_disable(const char *msg_reason)
|
||||
static int __init nopat(char *str)
|
||||
{
|
||||
pat_disable("PAT support disabled via boot option.");
|
||||
pat_force_disabled = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("nopat", nopat);
|
||||
@ -272,7 +274,7 @@ static void pat_ap_init(u64 pat)
|
||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
}
|
||||
|
||||
void init_cache_modes(void)
|
||||
void __init init_cache_modes(void)
|
||||
{
|
||||
u64 pat = 0;
|
||||
|
||||
@ -313,6 +315,12 @@ void init_cache_modes(void)
|
||||
*/
|
||||
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
|
||||
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
|
||||
} else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) {
|
||||
/*
|
||||
* Clearly PAT is enabled underneath. Allow pat_enabled() to
|
||||
* reflect this.
|
||||
*/
|
||||
pat_bp_enabled = true;
|
||||
}
|
||||
|
||||
__init_cache_modes(pat);
|
||||
|
@ -1931,7 +1931,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
/* If we didn't flush the entire list, we could have told the driver
|
||||
* there was more coming, but that turned out to be a lie.
|
||||
*/
|
||||
if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
|
||||
if ((!list_empty(list) || errors || needs_resource ||
|
||||
ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
|
||||
q->mq_ops->commit_rqs(hctx);
|
||||
/*
|
||||
* Any items that need requeuing? Stuff them into hctx->dispatch,
|
||||
@ -2660,6 +2661,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
list_del_init(&rq->queuelist);
|
||||
ret = blk_mq_request_issue_directly(rq, list_empty(list));
|
||||
if (ret != BLK_STS_OK) {
|
||||
errors++;
|
||||
if (ret == BLK_STS_RESOURCE ||
|
||||
ret == BLK_STS_DEV_RESOURCE) {
|
||||
blk_mq_request_bypass_insert(rq, false,
|
||||
@ -2667,7 +2669,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
break;
|
||||
}
|
||||
blk_mq_end_request(rq, ret);
|
||||
errors++;
|
||||
} else
|
||||
queued++;
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu(cpu, policy->related_cpus) {
|
||||
struct acpi_processor *pr = per_cpu(processors, policy->cpu);
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
|
||||
if (pr)
|
||||
freq_qos_remove_request(&pr->thermal_req);
|
||||
|
@ -370,7 +370,7 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
|
||||
bool ret;
|
||||
|
||||
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
|
||||
acpi_handle_err(dn->handle, "Can't tag data node\n");
|
||||
return false;
|
||||
}
|
||||
@ -1043,11 +1043,10 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
|
||||
break; \
|
||||
} \
|
||||
if (__items[i].integer.value > _Generic(__val, \
|
||||
u8: U8_MAX, \
|
||||
u16: U16_MAX, \
|
||||
u32: U32_MAX, \
|
||||
u64: U64_MAX, \
|
||||
default: 0U)) { \
|
||||
u8 *: U8_MAX, \
|
||||
u16 *: U16_MAX, \
|
||||
u32 *: U32_MAX, \
|
||||
u64 *: U64_MAX)) { \
|
||||
ret = -EOVERFLOW; \
|
||||
break; \
|
||||
} \
|
||||
|
@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
|
||||
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
|
||||
ALIGN(offsets_size, sizeof(void *));
|
||||
@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
|
||||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
*/
|
||||
if (binder_alloc_get_vma(alloc) != NULL) {
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
if (binder_alloc_get_vma(alloc) == NULL) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
goto uninitialized;
|
||||
}
|
||||
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
uninitialized:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
|
@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
|
||||
|
||||
lo->lo_offset = info->lo_offset;
|
||||
lo->lo_sizelimit = info->lo_sizelimit;
|
||||
|
||||
/* loff_t vars have been assigned __u64 */
|
||||
if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
|
||||
return -EOVERFLOW;
|
||||
|
||||
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
|
||||
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
|
||||
lo->lo_flags = info->lo_flags;
|
||||
|
@ -536,7 +536,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||||
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
|
||||
if (!cpufreq_driver->target_index)
|
||||
if (!policy->freq_table)
|
||||
return target_freq;
|
||||
|
||||
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
||||
|
@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
|
||||
{
|
||||
if (memcmp(buf, "_SM3_", 5) == 0 &&
|
||||
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
|
||||
dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
|
||||
dmi_ver = get_unaligned_be24(buf + 7);
|
||||
dmi_num = 0; /* No longer specified */
|
||||
dmi_len = get_unaligned_le32(buf + 12);
|
||||
dmi_base = get_unaligned_le64(buf + 16);
|
||||
|
@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||
if (!hive->reset_domain ||
|
||||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
|
||||
r = -ENOENT;
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
goto init_failed;
|
||||
}
|
||||
|
||||
/* Drop the early temporary reset domain we created for device */
|
||||
amdgpu_reset_put_reset_domain(adev->reset_domain);
|
||||
adev->reset_domain = hive->reset_domain;
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4413,8 +4415,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
retry:
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
if (from_hypervisor)
|
||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||
else
|
||||
|
@ -2641,6 +2641,9 @@ static int psp_hw_fini(void *handle)
|
||||
psp_rap_terminate(psp);
|
||||
psp_dtm_terminate(psp);
|
||||
psp_hdcp_terminate(psp);
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1)
|
||||
psp_xgmi_terminate(psp);
|
||||
}
|
||||
|
||||
psp_asd_terminate(psp);
|
||||
|
@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
}
|
||||
|
||||
return psp_xgmi_terminate(&adev->psp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
|
||||
|
@ -131,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
|
||||
bool all_hub, uint8_t dst_sel);
|
||||
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
|
||||
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
|
||||
static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
||||
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
|
||||
{
|
||||
@ -1139,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
|
||||
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
|
||||
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
|
||||
.init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
|
||||
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
|
||||
};
|
||||
|
||||
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
@ -5182,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
|
||||
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||||
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||||
data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||||
WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||||
/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||||
data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||||
WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||||
}
|
||||
} else {
|
||||
/* Program RLC_CGCG_CGLS_CTRL */
|
||||
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
|
||||
@ -5213,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
|
||||
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||||
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||||
data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||||
WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||||
/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||||
data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||||
WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5328,8 +5337,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
gfx_v11_cntl_pg(adev, enable);
|
||||
/* TODO: Enable this when GFXOFF is ready */
|
||||
// amdgpu_gfx_off_ctrl(adev, enable);
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
|
||||
|
||||
gfx_v9_0_tiling_mode_table_init(adev);
|
||||
|
||||
gfx_v9_0_setup_rb(adev);
|
||||
if (adev->gfx.num_gfx_rings)
|
||||
gfx_v9_0_setup_rb(adev);
|
||||
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
|
||||
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
|
||||
|
||||
|
@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
|
||||
|
||||
tmp = mmVM_L2_CNTL3_DEFAULT;
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||||
|
@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
|
||||
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
|
||||
unsigned int num_level, block_size;
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
||||
num_level = adev->vm_manager.num_level;
|
||||
block_size = adev->vm_manager.block_size;
|
||||
if (adev->gmc.translate_further)
|
||||
num_level -= 1;
|
||||
else
|
||||
block_size -= 9;
|
||||
|
||||
for (i = 0; i <= 14; i++) {
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
|
||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
|
||||
@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
|
||||
ENABLE_CONTEXT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
|
||||
PAGE_TABLE_DEPTH,
|
||||
adev->vm_manager.num_level);
|
||||
num_level);
|
||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
|
||||
@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
|
||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
|
||||
PAGE_TABLE_BLOCK_SIZE,
|
||||
adev->vm_manager.block_size - 9);
|
||||
block_size);
|
||||
/* Send no-retry XNACK on fault to suppress VM fault storm. */
|
||||
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
|
||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
|
||||
|
@ -247,6 +247,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
|
||||
|
||||
}
|
||||
|
||||
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
|
||||
return;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
|
||||
if (enable) {
|
||||
data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
|
||||
} else {
|
||||
data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
|
||||
BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
|
||||
}
|
||||
|
||||
static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
|
||||
return;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
|
||||
if (enable)
|
||||
data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
else
|
||||
data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
|
||||
if (enable) {
|
||||
data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
} else {
|
||||
data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
|
||||
}
|
||||
|
||||
static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u64 *flags)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
/* AMD_CG_SUPPORT_BIF_MGCG */
|
||||
data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
|
||||
if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_BIF_LS */
|
||||
data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
|
||||
if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
|
||||
@ -262,6 +337,9 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
|
||||
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
|
||||
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
|
||||
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
|
||||
.update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
|
||||
.update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
|
||||
.get_clockgating_state = nbio_v7_7_get_clockgating_state,
|
||||
.ih_control = nbio_v7_7_ih_control,
|
||||
.init_registers = nbio_v7_7_init_registers,
|
||||
};
|
||||
|
@ -494,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
|
||||
{
|
||||
}
|
||||
|
||||
static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
|
||||
bool enter)
|
||||
{
|
||||
if (enter)
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
else
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
||||
|
||||
if (adev->gfx.funcs->update_perfmon_mgcg)
|
||||
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs soc21_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &soc21_read_disabled_bios,
|
||||
@ -513,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
|
||||
.supports_baco = &amdgpu_dpm_is_baco_supported,
|
||||
.pre_asic_init = &soc21_pre_asic_init,
|
||||
.query_video_codecs = &soc21_query_video_codecs,
|
||||
.update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
|
||||
};
|
||||
|
||||
static int soc21_common_early_init(void *handle)
|
||||
@ -603,6 +618,8 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_BIF_MGCG |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG;
|
||||
adev->pg_flags =
|
||||
@ -702,6 +719,7 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
||||
case IP_VERSION(4, 3, 0):
|
||||
case IP_VERSION(4, 3, 1):
|
||||
case IP_VERSION(7, 7, 0):
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
@ -709,10 +727,6 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
case IP_VERSION(7, 7, 0):
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -382,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(10, 3, 6):
|
||||
gfx_target_version = 100306;
|
||||
if (!vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(10, 3, 7):
|
||||
gfx_target_version = 100307;
|
||||
gfx_target_version = 100306;
|
||||
if (!vf)
|
||||
f2g = &gfx_v10_3_kfd2kgd;
|
||||
break;
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "dal_asic_id.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_dm_trace.h"
|
||||
#include "amdgpu_dm_plane.h"
|
||||
#include "gc/gc_11_0_0_offset.h"
|
||||
#include "gc/gc_11_0_0_sh_mask.h"
|
||||
|
||||
@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
|
||||
*size += 1;
|
||||
}
|
||||
|
||||
bool modifier_has_dcc(uint64_t modifier)
|
||||
static bool modifier_has_dcc(uint64_t modifier)
|
||||
{
|
||||
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
|
||||
}
|
||||
|
||||
unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
|
||||
static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
|
||||
{
|
||||
if (modifier == DRM_FORMAT_MOD_LINEAR)
|
||||
return 0;
|
||||
|
@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
|
||||
const struct drm_plane_state *state,
|
||||
struct dc_scaling_info *scaling_info);
|
||||
|
||||
void get_min_max_dc_plane_scaling(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
int *min_downscale, int *max_upscale);
|
||||
|
||||
int dm_plane_helper_check_state(struct drm_plane_state *state,
|
||||
struct drm_crtc_state *new_crtc_state);
|
||||
|
||||
bool modifier_has_dcc(uint64_t modifier);
|
||||
|
||||
unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
|
||||
|
||||
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
||||
const struct amdgpu_framebuffer *afb,
|
||||
const enum surface_pixel_format format,
|
||||
|
@ -1750,6 +1750,7 @@ static bool dcn314_resource_construct(
|
||||
dc->caps.post_blend_color_processing = true;
|
||||
dc->caps.force_dp_tps4_for_cp2520 = true;
|
||||
dc->caps.dp_hpo = true;
|
||||
dc->caps.dp_hdmi21_pcon_support = true;
|
||||
dc->caps.edp_dsc_support = true;
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
|
@ -15243,6 +15243,8 @@
|
||||
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
|
||||
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
|
||||
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
|
||||
#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
|
||||
#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
|
||||
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
|
||||
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
|
||||
#define regBIF0_PCIE_TX_STATUS 0x420194
|
||||
|
@ -85627,6 +85627,19 @@
|
||||
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
|
||||
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
|
||||
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
|
||||
//BIF0_PCIE_TX_POWER_CTRL_1
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
|
||||
#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
|
||||
//BIF0_PCIE_TX_CTRL_4
|
||||
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
|
||||
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
|
||||
|
@ -24,12 +24,8 @@
|
||||
#ifndef SMU13_DRIVER_IF_V13_0_0_H
|
||||
#define SMU13_DRIVER_IF_V13_0_0_H
|
||||
|
||||
// *** IMPORTANT ***
|
||||
// PMFW TEAM: Always increment the interface version on any change to this file
|
||||
#define SMU13_DRIVER_IF_VERSION 0x23
|
||||
|
||||
//Increment this version if SkuTable_t or BoardTable_t change
|
||||
#define PPTABLE_VERSION 0x1D
|
||||
#define PPTABLE_VERSION 0x22
|
||||
|
||||
#define NUM_GFXCLK_DPM_LEVELS 16
|
||||
#define NUM_SOCCLK_DPM_LEVELS 8
|
||||
@ -1193,8 +1189,17 @@ typedef struct {
|
||||
// SECTION: Advanced Options
|
||||
uint32_t DebugOverrides;
|
||||
|
||||
// Section: Total Board Power idle vs active coefficients
|
||||
uint8_t TotalBoardPowerSupport;
|
||||
uint8_t TotalBoardPowerPadding[3];
|
||||
|
||||
int16_t TotalIdleBoardPowerM;
|
||||
int16_t TotalIdleBoardPowerB;
|
||||
int16_t TotalBoardPowerM;
|
||||
int16_t TotalBoardPowerB;
|
||||
|
||||
// SECTION: Sku Reserved
|
||||
uint32_t Spare[64];
|
||||
uint32_t Spare[61];
|
||||
|
||||
// Padding for MMHUB - do not modify this
|
||||
uint32_t MmHubPadding[8];
|
||||
@ -1259,7 +1264,8 @@ typedef struct {
|
||||
// SECTION: Clock Spread Spectrum
|
||||
|
||||
// UCLK Spread Spectrum
|
||||
uint16_t UclkSpreadPadding;
|
||||
uint8_t UclkTrainingModeSpreadPercent;
|
||||
uint8_t UclkSpreadPadding;
|
||||
uint16_t UclkSpreadFreq; // kHz
|
||||
|
||||
// UCLK Spread Spectrum
|
||||
@ -1272,11 +1278,7 @@ typedef struct {
|
||||
|
||||
// Section: Memory Config
|
||||
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
|
||||
uint8_t PaddingMem1[3];
|
||||
|
||||
// Section: Total Board Power
|
||||
uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
|
||||
uint16_t BoardPowerPadding;
|
||||
uint8_t PaddingMem1[7];
|
||||
|
||||
// SECTION: UMC feature flags
|
||||
uint8_t HsrEnabled;
|
||||
@ -1375,8 +1377,11 @@ typedef struct {
|
||||
uint16_t Vcn1ActivityPercentage ;
|
||||
|
||||
uint32_t EnergyAccumulator;
|
||||
uint16_t AverageSocketPower ;
|
||||
uint16_t AverageSocketPower;
|
||||
uint16_t AverageTotalBoardPower;
|
||||
|
||||
uint16_t AvgTemperature[TEMP_COUNT];
|
||||
uint16_t TempPadding;
|
||||
|
||||
uint8_t PcieRate ;
|
||||
uint8_t PcieWidth ;
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
|
||||
|
||||
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
|
||||
|
@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_private_object_init);
|
||||
|
||||
static void
|
||||
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
|
||||
{
|
||||
/*
|
||||
* Note: obj->dma_buf can't disappear as long as we still hold a
|
||||
* handle reference in obj->handle_count.
|
||||
*/
|
||||
mutex_lock(&filp->prime.lock);
|
||||
if (obj->dma_buf) {
|
||||
drm_prime_remove_buf_handle_locked(&filp->prime,
|
||||
obj->dma_buf);
|
||||
}
|
||||
mutex_unlock(&filp->prime.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||
* @obj: GEM object to clean up.
|
||||
@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
||||
if (obj->funcs->close)
|
||||
obj->funcs->close(obj, file_priv);
|
||||
|
||||
drm_gem_remove_prime_handles(obj, file_priv);
|
||||
drm_prime_remove_buf_handle(&file_priv->prime, id);
|
||||
drm_vma_node_revoke(&obj->vma_node, file_priv);
|
||||
|
||||
drm_gem_object_handle_put_unlocked(obj);
|
||||
|
@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
|
||||
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
|
||||
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
|
||||
struct dma_buf *dma_buf);
|
||||
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||||
uint32_t handle);
|
||||
|
||||
/* drm_drv.c */
|
||||
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
|
||||
|
@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
|
||||
struct dma_buf *dma_buf)
|
||||
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||||
uint32_t handle)
|
||||
{
|
||||
struct rb_node *rb;
|
||||
|
||||
rb = prime_fpriv->dmabufs.rb_node;
|
||||
mutex_lock(&prime_fpriv->lock);
|
||||
|
||||
rb = prime_fpriv->handles.rb_node;
|
||||
while (rb) {
|
||||
struct drm_prime_member *member;
|
||||
|
||||
member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
|
||||
if (member->dma_buf == dma_buf) {
|
||||
member = rb_entry(rb, struct drm_prime_member, handle_rb);
|
||||
if (member->handle == handle) {
|
||||
rb_erase(&member->handle_rb, &prime_fpriv->handles);
|
||||
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
|
||||
|
||||
dma_buf_put(dma_buf);
|
||||
dma_buf_put(member->dma_buf);
|
||||
kfree(member);
|
||||
return;
|
||||
} else if (member->dma_buf < dma_buf) {
|
||||
break;
|
||||
} else if (member->handle < handle) {
|
||||
rb = rb->rb_right;
|
||||
} else {
|
||||
rb = rb->rb_left;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&prime_fpriv->lock);
|
||||
}
|
||||
|
||||
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user