Snap for 9806914 from d0f0c26984 to android14-6.1-keystone-qcom-release

Change-Id: I7330d1d0c92d2ade8dec7107bce4a3149ac34eae
This commit is contained in:
Android Build Coastguard Worker 2023-03-24 07:10:51 +00:00
commit 2be6a1cfdd
398 changed files with 6528 additions and 3431 deletions

View File

@ -52,7 +52,7 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Default output terminal descriptors
All attributes read only:
All attributes read only except bSourceID:
============== =============================================
iTerminal index of string descriptor

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 17
SUBLEVEL = 20
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

File diff suppressed because it is too large Load Diff

View File

@ -70,7 +70,6 @@
debugfs_create_file
debugfs_create_u32
debugfs_create_x32
debugfs_lookup
debugfs_remove
default_llseek
delayed_work_timer_fn
@ -1006,6 +1005,7 @@
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_connector_register
drm_connector_unregister
__drm_crtc_commit_free
@ -1747,6 +1747,9 @@
rproc_get_by_child
try_wait_for_completion
# required by qcom_tsens.ko
debugfs_lookup
# required by qcom_usb_vbus-regulator.ko
regulator_get_current_limit_regmap
regulator_set_current_limit_regmap
@ -2002,6 +2005,7 @@
ufshcd_uic_hibern8_exit
# required by ulpi.ko
debugfs_lookup_and_remove
of_clk_set_defaults
of_device_request_module
__request_module

View File

@ -137,10 +137,10 @@
bpf_prog_put
bpf_prog_sub
bpf_stats_enabled_key
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -1177,6 +1177,7 @@
__fdget
fd_install
fget
_find_first_and_bit
_find_first_bit
_find_first_zero_bit
find_get_pid
@ -1633,8 +1634,8 @@
ieee80211_txq_may_transmit
ieee80211_txq_schedule_start
ieee80211_tx_rate_update
ieee80211_tx_status
ieee80211_tx_status_8023
ieee80211_tx_status
ieee80211_tx_status_ext
ieee80211_tx_status_irqsafe
ieee80211_unregister_hw
@ -2087,8 +2088,8 @@
memremap
memscan
mem_section
memset
memset64
memset
__memset_io
memstart_addr
memunmap
@ -2226,10 +2227,10 @@
nla_find
nla_memcpy
__nla_parse
nla_put
nla_put_64bit
nla_reserve
nla_put
nla_reserve_64bit
nla_reserve
nla_strscpy
__nla_validate
__nlmsg_put
@ -3189,6 +3190,11 @@
snd_soc_unregister_card
snd_soc_unregister_component
snd_timer_interrupt
snd_usb_autoresume
snd_usb_autosuspend
snd_usb_endpoint_close
snd_usb_endpoint_open
snd_usb_endpoint_prepare
snprintf
soc_device_register
soc_device_unregister
@ -3408,6 +3414,7 @@
__traceiter_android_rvh_account_irq
__traceiter_android_rvh_after_dequeue_task
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_audio_usb_offload_disconnect
__traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_tick
@ -3431,6 +3438,9 @@
__traceiter_android_rvh_flush_task
__traceiter_android_rvh_get_nohz_timer_target
__traceiter_android_rvh_gic_v3_set_affinity
__traceiter_android_rvh_iommu_alloc_insert_iova
__traceiter_android_rvh_iommu_iovad_init_alloc_algo
__traceiter_android_rvh_iommu_limit_align_shift
__traceiter_android_rvh_iommu_setup_dma_ops
__traceiter_android_rvh_is_cpu_allowed
__traceiter_android_rvh_migrate_queued_task
@ -3473,6 +3483,7 @@
__traceiter_android_rvh_update_thermal_stats
__traceiter_android_rvh_util_est_update
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_audio_usb_offload_connect
__traceiter_android_vh_binder_restore_priority
__traceiter_android_vh_binder_set_priority
__traceiter_android_vh_binder_wakeup_ilocked
@ -3519,6 +3530,7 @@
__traceiter_gpu_mem_total
__traceiter_ipi_entry
__traceiter_ipi_raise
__traceiter_irq_handler_entry
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
@ -3530,6 +3542,7 @@
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_dequeue_task
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_audio_usb_offload_disconnect
__tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_tick
@ -3553,6 +3566,9 @@
__tracepoint_android_rvh_flush_task
__tracepoint_android_rvh_get_nohz_timer_target
__tracepoint_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_iommu_alloc_insert_iova
__tracepoint_android_rvh_iommu_iovad_init_alloc_algo
__tracepoint_android_rvh_iommu_limit_align_shift
__tracepoint_android_rvh_iommu_setup_dma_ops
__tracepoint_android_rvh_is_cpu_allowed
__tracepoint_android_rvh_migrate_queued_task
@ -3595,6 +3611,7 @@
__tracepoint_android_rvh_update_thermal_stats
__tracepoint_android_rvh_util_est_update
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_audio_usb_offload_connect
__tracepoint_android_vh_binder_restore_priority
__tracepoint_android_vh_binder_set_priority
__tracepoint_android_vh_binder_wakeup_ilocked
@ -3641,6 +3658,7 @@
__tracepoint_gpu_mem_total
__tracepoint_ipi_entry
__tracepoint_ipi_raise
__tracepoint_irq_handler_entry
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
@ -4097,7 +4115,15 @@
xdp_rxq_info_unreg_mem_model
xdp_warn
xfrm_lookup
xhci_alloc_command
xhci_alloc_erst
xhci_free_command
xhci_get_endpoint_index
xhci_queue_stop_endpoint
xhci_ring_alloc
xhci_ring_cmd_db
xhci_ring_free
xhci_trb_virt_to_dma
xp_alloc
xp_dma_map
xp_dma_sync_for_cpu_slow

View File

@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr;
gp = got + 0x8000;
for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);

View File

@ -241,7 +241,7 @@ stmpe811@41 {
irq-trigger = <0x1>;
stmpegpio: stmpe-gpio {
compatible = "stmpe,gpio";
compatible = "st,stmpe-gpio";
reg = <0>;
gpio-controller;
#gpio-cells = <2>;

View File

@ -243,7 +243,6 @@ CONFIG_NET_SCH_FQ_CODEL=y
CONFIG_NET_SCH_FQ=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_BASIC=y
CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
@ -508,6 +507,7 @@ CONFIG_TYPEC=y
CONFIG_TYPEC_TCPM=y
CONFIG_TYPEC_TCPCI=y
CONFIG_TYPEC_UCSI=y
CONFIG_TYPEC_DP_ALTMODE=y
CONFIG_MMC=y
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set

View File

@ -33,7 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
({ \
efi_virtmap_load(); \
__efi_fpsimd_begin(); \
spin_lock(&efi_rt_lock); \
raw_spin_lock(&efi_rt_lock); \
})
#undef arch_efi_call_virt
@ -42,12 +42,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \
({ \
spin_unlock(&efi_rt_lock); \
raw_spin_unlock(&efi_rt_lock); \
__efi_fpsimd_end(); \
efi_virtmap_unload(); \
})
extern spinlock_t efi_rt_lock;
extern raw_spinlock_t efi_rt_lock;
extern u64 *efi_rt_stack_top;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);

View File

@ -37,6 +37,29 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
static inline void set_page_mte_tagged(struct page *page)
{
/*
* Ensure that the tags written prior to this function are visible
* before the page flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &page->flags);
}
static inline bool page_mte_tagged(struct page *page)
{
bool ret = test_bit(PG_mte_tagged, &page->flags);
/*
* If the page is tagged, ensure ordering with a likely subsequent
* read of the tags.
*/
if (ret)
smp_rmb();
return ret;
}
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
@ -56,6 +79,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size);
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
static inline void set_page_mte_tagged(struct page *page)
{
}
static inline bool page_mte_tagged(struct page *page)
{
return false;
}
static inline void mte_zero_clear_page_tags(void *addr)
{
}

View File

@ -1050,7 +1050,7 @@ static inline void arch_swap_invalidate_area(int type)
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
set_bit(PG_mte_tagged, &folio->flags);
set_page_mte_tagged(&folio->page);
}
#endif /* CONFIG_ARM64_MTE */

View File

@ -2075,8 +2075,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
if (!page_mte_tagged(ZERO_PAGE(0))) {
mte_clear_page_tags(lm_alias(empty_zero_page));
set_page_mte_tagged(ZERO_PAGE(0));
}
kasan_init_hw_tags_cpu();
}

View File

@ -146,7 +146,7 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
return s;
}
DEFINE_SPINLOCK(efi_rt_lock);
DEFINE_RAW_SPINLOCK(efi_rt_lock);
asmlinkage u64 *efi_rt_stack_top __ro_after_init;

View File

@ -46,7 +46,7 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
* Pages mapped in user space as !pte_access_permitted() (e.g.
* PROT_EXEC only) may not have the PG_mte_tagged flag set.
*/
if (!test_bit(PG_mte_tagged, &page->flags)) {
if (!page_mte_tagged(page)) {
put_page(page);
dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
continue;

View File

@ -271,7 +271,7 @@ static int swsusp_mte_save_tags(void)
if (!page)
continue;
if (!test_bit(PG_mte_tagged, &page->flags))
if (!page_mte_tagged(page))
continue;
ret = save_tags(page, pfn);

View File

@ -41,8 +41,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
set_page_mte_tagged(page);
return;
}
}
if (!pte_is_tagged)
@ -52,8 +54,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
* Test PG_mte_tagged again in case it was racing with another
* set_pte_at().
*/
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
if (!page_mte_tagged(page)) {
mte_clear_page_tags(page_address(page));
set_page_mte_tagged(page);
}
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
@ -69,9 +73,11 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_bit(PG_mte_tagged, &page->flags))
if (!page_mte_tagged(page)) {
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
set_page_mte_tagged(page);
}
}
/* ensure the tags are visible before the PTE is set */
@ -96,8 +102,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
* pages is tagged, set_pte_at() may zero or change the tags of the
* other page via mte_sync_tags().
*/
if (test_bit(PG_mte_tagged, &page1->flags) ||
test_bit(PG_mte_tagged, &page2->flags))
if (page_mte_tagged(page1) || page_mte_tagged(page2))
return addr1 != addr2;
return ret;
@ -454,7 +459,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
put_page(page);
break;
}
WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
WARN_ON_ONCE(!page_mte_tagged(page));
/* limit access to the end of the page */
offset = offset_in_page(addr);

View File

@ -1061,7 +1061,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
maddr = page_address(page);
if (!write) {
if (test_bit(PG_mte_tagged, &page->flags))
if (page_mte_tagged(page))
num_tags = mte_copy_tags_to_user(tags, maddr,
MTE_GRANULES_PER_PAGE);
else
@ -1078,7 +1078,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
* completed fully
*/
if (num_tags == MTE_GRANULES_PER_PAGE)
set_bit(PG_mte_tagged, &page->flags);
set_page_mte_tagged(page);
kvm_release_pfn_dirty(pfn);
}

View File

@ -1271,9 +1271,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
return -EFAULT;
for (i = 0; i < nr_pages; i++, page++) {
if (!test_bit(PG_mte_tagged, &page->flags)) {
if (!page_mte_tagged(page)) {
mte_clear_page_tags(page_address(page));
set_bit(PG_mte_tagged, &page->flags);
set_page_mte_tagged(page);
}
}

View File

@ -21,9 +21,11 @@ void copy_highpage(struct page *to, struct page *from)
copy_page(kto, kfrom);
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags);
if (system_supports_mte() && page_mte_tagged(from)) {
if (kasan_hw_tags_enabled())
page_kasan_tag_reset(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);
}
}
EXPORT_SYMBOL(copy_highpage);

View File

@ -972,5 +972,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
void tag_clear_highpage(struct page *page)
{
mte_zero_clear_page_tags(page_address(page));
set_bit(PG_mte_tagged, &page->flags);
set_page_mte_tagged(page);
}

View File

@ -24,7 +24,7 @@ int mte_save_tags(struct page *page)
{
void *tag_storage, *ret;
if (!test_bit(PG_mte_tagged, &page->flags))
if (!page_mte_tagged(page))
return 0;
tag_storage = mte_allocate_tag_storage();

View File

@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p)
panic("No configuration setup");
}
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
paging_init();
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#endif
paging_init();
#ifdef CONFIG_NATFEAT
nf_init();

View File

@ -374,7 +374,7 @@ struct pci_msu {
PCI_CFG04_STAT_SSE | \
PCI_CFG04_STAT_PE)
#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD)
#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
#define KORINA_REVID 0
#define KORINA_CLASS_CODE 0

View File

@ -10,7 +10,6 @@
/ {
model = "fsl,T1040RDB-REV-A";
compatible = "fsl,T1040RDB-REV-A";
};
&seville_port0 {

View File

@ -36,15 +36,17 @@
#define PACA_IRQ_DEC 0x08 /* Or FIT */
#define PACA_IRQ_HMI 0x10
#define PACA_IRQ_PMI 0x20
#define PACA_IRQ_REPLAYING 0x40
/*
* Some soft-masked interrupts must be hard masked until they are replayed
* (e.g., because the soft-masked handler does not clear the exception).
* Interrupt replay itself must remain hard masked too.
*/
#ifdef CONFIG_PPC_BOOK3S
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
#else
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING)
#endif
#endif /* CONFIG_PPC64 */

View File

@ -295,7 +295,6 @@ extern void free_unused_pacas(void);
#else /* CONFIG_PPC64 */
static inline void allocate_paca_ptrs(void) { }
static inline void allocate_paca(int cpu) { }
static inline void free_unused_pacas(void) { }

View File

@ -26,6 +26,7 @@
#include <asm/percpu.h>
extern int boot_cpuid;
extern int boot_cpu_hwid; /* PPC64 only */
extern int spinning_secondaries;
extern u32 *cpu_to_phys_id;
extern bool coregroup_enabled;

View File

@ -67,11 +67,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl)
static void iommu_debugfs_del(struct iommu_table *tbl)
{
char name[10];
struct dentry *liobn_entry;
sprintf(name, "%08lx", tbl->it_index);
liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
debugfs_remove(liobn_entry);
debugfs_lookup_and_remove(name, iommu_debugfs_dir);
}
#else
static void iommu_debugfs_add(struct iommu_table *tbl){}

View File

@ -70,22 +70,19 @@ int distribute_irqs = 1;
static inline void next_interrupt(struct pt_regs *regs)
{
/*
* Softirq processing can enable/disable irqs, which will leave
* MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix
* this up.
*/
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
else
irq_soft_mask_set(IRQS_ALL_DISABLED);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
}
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.
*/
lockdep_hardirq_exit();
trace_hardirqs_on();
trace_hardirqs_off();
lockdep_hardirq_enter();
}
static inline bool irq_happened_test_and_clear(u8 irq)
@ -97,22 +94,11 @@ static inline bool irq_happened_test_and_clear(u8 irq)
return false;
}
void replay_soft_interrupts(void)
static void __replay_soft_interrupts(void)
{
struct pt_regs regs;
/*
* Be careful here, calling these interrupt handlers can cause
* softirqs to be raised, which they may run when calling irq_exit,
* which will cause local_irq_enable() to be run, which can then
* recurse into this function. Don't keep any state across
* interrupt handler calls which may change underneath us.
*
* Softirqs can not be disabled over replay to stop this recursion
* because interrupts taken in idle code may require RCU softirq
* to run in the irq RCU tracking context. This is a hard problem
* to fix without changes to the softirq or idle layer.
*
* We use local_paca rather than get_paca() to avoid all the
* debug_smp_processor_id() business in this low level function.
*/
@ -120,13 +106,20 @@ void replay_soft_interrupts(void)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON_ONCE(mfmsr() & MSR_EE);
WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
}
/*
* PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
* MSR[EE] to get PMIs, which can result in more IRQs becoming
* pending.
*/
local_paca->irq_happened |= PACA_IRQ_REPLAYING;
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
regs.msr |= MSR_EE;
again:
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect.
@ -175,13 +168,14 @@ void replay_soft_interrupts(void)
next_interrupt(&regs);
}
/*
* Softirq processing can enable and disable interrupts, which can
* result in new irqs becoming pending. Must keep looping until we
* have cleared out all pending interrupts.
*/
if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS)
goto again;
local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
}
void replay_soft_interrupts(void)
{
irq_enter(); /* See comment in arch_local_irq_restore */
__replay_soft_interrupts();
irq_exit();
}
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
@ -200,13 +194,13 @@ static inline void replay_soft_interrupts_irqrestore(void)
if (kuap_state != AMR_KUAP_BLOCKED)
set_kuap(AMR_KUAP_BLOCKED);
replay_soft_interrupts();
__replay_soft_interrupts();
if (kuap_state != AMR_KUAP_BLOCKED)
set_kuap(kuap_state);
}
#else
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
#endif
notrace void arch_local_irq_restore(unsigned long mask)
@ -219,9 +213,13 @@ notrace void arch_local_irq_restore(unsigned long mask)
return;
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(in_nmi() || in_hardirq());
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON_ONCE(in_nmi());
WARN_ON_ONCE(in_hardirq());
WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
}
again:
/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
@ -248,6 +246,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
/*
* If we came here from the replay below, we might have a preempt
* pending (due to preempt_enable_no_resched()). Have to check now.
*/
preempt_check_resched();
return;
happened:
@ -261,6 +265,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
irq_soft_mask_set(IRQS_ENABLED);
local_paca->irq_happened = 0;
__hard_irq_enable();
preempt_check_resched();
return;
}
@ -296,12 +301,38 @@ notrace void arch_local_irq_restore(unsigned long mask)
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
/*
* Now enter interrupt context. The interrupt handlers themselves
* also call irq_enter/exit (which is okay, they can nest). But call
* it here now to hold off softirqs until the below irq_exit(). If
* we allowed replayed handlers to run softirqs, that enables irqs,
* which must replay interrupts, which recurses in here and makes
* things more complicated. The recursion is limited to 2, and it can
* be made to work, but it's complicated.
*
* local_bh_disable can not be used here because interrupts taken in
* idle are not in the right context (RCU, tick, etc) to run softirqs
* so irq_enter must be called.
*/
irq_enter();
replay_soft_interrupts_irqrestore();
irq_exit();
if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
/*
* The softirq processing in irq_exit() may enable interrupts
* temporarily, which can result in MSR[EE] being enabled and
* more irqs becoming pending. Go around again if that happens.
*/
trace_hardirqs_on();
preempt_enable_no_resched();
goto again;
}
trace_hardirqs_on();
irq_soft_mask_set(IRQS_ENABLED);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
local_paca->irq_happened = 0;
__hard_irq_enable();
preempt_enable();

View File

@ -366,8 +366,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
// Pass the boot CPU's hard CPU id back to our caller
*((u32 *)data) = be32_to_cpu(intserv[found_thread]);
if (IS_ENABLED(CONFIG_PPC64))
boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
/*
* PAPR defines "logical" PVR values for cpus that
@ -751,7 +751,6 @@ static inline void save_fscr_to_task(void) {}
void __init early_init_devtree(void *params)
{
u32 boot_cpu_hwid;
phys_addr_t limit;
DBG(" -> early_init_devtree(%px)\n", params);
@ -847,7 +846,7 @@ void __init early_init_devtree(void *params)
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
if (boot_cpuid < 0) {
printk("Failed to identify boot CPU !\n");
BUG();
@ -864,11 +863,6 @@ void __init early_init_devtree(void *params)
mmu_early_init_devtree();
// NB. paca is not installed until later in early_setup()
allocate_paca_ptrs();
allocate_paca(boot_cpuid);
set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);

View File

@ -86,6 +86,10 @@ EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
#ifdef CONFIG_PPC64
int boot_cpu_hwid = -1;
#endif
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.

View File

@ -385,17 +385,21 @@ void __init early_setup(unsigned long dt_ptr)
/*
* Do early initialization using the flattened device
* tree, such as retrieving the physical memory map or
* calculating/retrieving the hash table size.
* calculating/retrieving the hash table size, discover
* boot_cpuid and boot_cpu_hwid.
*/
early_init_devtree(__va(dt_ptr));
/* Now we know the logical id of our boot cpu, setup the paca. */
if (boot_cpuid != 0) {
/* Poison paca_ptrs[0] again if it's not the boot cpu */
memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
}
allocate_paca_ptrs();
allocate_paca(boot_cpuid);
set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
fixup_boot_paca(paca_ptrs[boot_cpuid]);
setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
// smp_processor_id() now reports boot_cpuid
#ifdef CONFIG_SMP
task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
#endif
/*
* Configure exception handlers. This include setting up trampolines

View File

@ -374,7 +374,7 @@ void vtime_flush(struct task_struct *tsk)
#define calc_cputime_factors()
#endif
void __delay(unsigned long loops)
void __no_kcsan __delay(unsigned long loops)
{
unsigned long start;
@ -395,7 +395,7 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
void __no_kcsan udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}

View File

@ -79,6 +79,20 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
* We only need a stack frame if:
* - we call other functions (kernel helpers), or
* - we use non volatile registers, or
* - we use tail call counter
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
void bpf_jit_realloc_regs(struct codegen_context *ctx)
{
unsigned int nvreg_mask;
@ -118,7 +132,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
#define BPF_TAILCALL_PROLOGUE_SIZE 4
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
@ -171,7 +186,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
/* Tear down our stack frame */
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MTLR(_R0));

View File

@ -87,6 +87,13 @@ endif
# Avoid generating .eh_frame sections.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
# The RISC-V attributes frequently cause compatibility issues and provide no
# information, so just turn them off.
KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)

View File

@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
#endif
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* _ASM_RISCV_FTRACE_H */

View File

@ -3,6 +3,9 @@
* Copyright (C) 2020 SiFive
*/
#ifndef _ASM_RISCV_INSN_H
#define _ASM_RISCV_INSN_H
#include <linux/bits.h>
/* The bit field of immediate value in I-type instruction */
@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \
(RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
(RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
(RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
#endif /* _ASM_RISCV_INSN_H */

View File

@ -9,4 +9,6 @@
int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text(void *addr, u32 insn);
extern int riscv_patch_in_stop_machine;
#endif /* _ASM_RISCV_PATCH_H */

View File

@ -14,6 +14,10 @@ COMPAT_LD := $(LD)
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
COMPAT_LD_FLAGS := -melf32lriscv
# Disable attributes, as they're useless and break the build.
COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute)
COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
# Files to link into the compat_vdso
obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o

View File

@ -15,10 +15,19 @@
void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
{
mutex_lock(&text_mutex);
/*
* The code sequences we use for ftrace can't be patched while the
* kernel is running, so we need to use stop_machine() to modify them
* for now. This doesn't play nice with text_mutex, we use this flag
* to elide the check.
*/
riscv_patch_in_stop_machine = true;
}
void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
{
riscv_patch_in_stop_machine = false;
mutex_unlock(&text_mutex);
}
@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
int out;
ftrace_arch_code_modify_prepare();
mutex_lock(&text_mutex);
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
ftrace_arch_code_modify_post_process();
mutex_unlock(&text_mutex);
return out;
}

View File

@ -11,6 +11,7 @@
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
struct patch_insn {
@ -19,6 +20,8 @@ struct patch_insn {
atomic_t cpu_count;
};
int riscv_patch_in_stop_machine = false;
#ifdef CONFIG_MMU
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
* Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could
* ensure that it was safe between each cores.
*
* We're currently using stop_machine() for ftrace & kprobes, and while
* that ensures text_mutex is held before installing the mappings it
* does not ensure text_mutex is held by the calling thread. That's
* safe but triggers a lockdep failure, so just elide it for that
* specific case.
*/
lockdep_assert_held(&text_mutex);
if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 insn)
{
int ret;
struct patch_insn patch = {
.addr = addr,
.insn = insn,
.cpu_count = ATOMIC_INIT(0),
};
return stop_machine_cpuslocked(patch_text_cb,
&patch, cpu_online_mask);
/*
* kprobes takes text_mutex, before calling patch_text(), but as we call
* calls stop_machine(), the lockdep assertion in patch_insn_write()
* gets confused by the context in which the lock is taken.
* Instead, ensure the lock is held before calling stop_machine(), and
* set riscv_patch_in_stop_machine to skip the check in
* patch_insn_write().
*/
lockdep_assert_held(&text_mutex);
riscv_patch_in_stop_machine = true;
ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
riscv_patch_in_stop_machine = false;
return ret;
}
NOKPROBE_SYMBOL(patch_text);

View File

@ -92,7 +92,7 @@ void notrace walk_stackframe(struct task_struct *task,
while (!kstack_end(ksp)) {
if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
pc = (*ksp++) - 0x4;
pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
}
}

View File

@ -767,6 +767,7 @@ static int vector_config(char *str, char **error_out)
if (parsed == NULL) {
*error_out = "vector_config failed to parse parameters";
kfree(params);
return -EINVAL;
}

View File

@ -132,8 +132,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
out ? 1 : 0,
posted ? cmd : HANDLE_NO_FREE(cmd),
GFP_ATOMIC);
if (ret)
if (ret) {
if (posted)
kfree(cmd);
goto out;
}
if (posted) {
virtqueue_kick(dev->cmd_vq);
@ -623,22 +626,33 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
struct um_pci_device *dev = vdev->priv;
int i;
/* Stop all virtqueues */
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
device_set_wakeup_enable(&vdev->dev, false);
mutex_lock(&um_pci_mtx);
for (i = 0; i < MAX_DEVICES; i++) {
if (um_pci_devices[i].dev != dev)
continue;
um_pci_devices[i].dev = NULL;
irq_free_desc(dev->irq);
break;
}
mutex_unlock(&um_pci_mtx);
um_pci_rescan();
if (i < MAX_DEVICES) {
struct pci_dev *pci_dev;
pci_dev = pci_get_slot(bridge->bus, i);
if (pci_dev)
pci_stop_and_remove_bus_device_locked(pci_dev);
}
/* Stop all virtqueues */
virtio_reset_device(vdev);
dev->cmd_vq = NULL;
dev->irq_vq = NULL;
vdev->config->del_vqs(vdev);
kfree(dev);
}

View File

@ -168,7 +168,8 @@ static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
if (!vu_dev->registered)
return;
virtio_break_device(&vu_dev->vdev);
vu_dev->registered = 0;
schedule_work(&pdata->conn_broken_wk);
}
@ -1136,6 +1137,15 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
static void vu_of_conn_broken(struct work_struct *wk)
{
struct virtio_uml_platform_data *pdata;
struct virtio_uml_device *vu_dev;
pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
vu_dev = platform_get_drvdata(pdata->pdev);
virtio_break_device(&vu_dev->vdev);
/*
* We can't remove the device from the devicetree so the only thing we
* can do is warn.
@ -1266,8 +1276,14 @@ static int vu_unregister_cmdline_device(struct device *dev, void *data)
static void vu_conn_broken(struct work_struct *wk)
{
struct virtio_uml_platform_data *pdata;
struct virtio_uml_device *vu_dev;
pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
vu_dev = platform_get_drvdata(pdata->pdev);
virtio_break_device(&vu_dev->vdev);
vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
}

View File

@ -1,4 +1,4 @@
#define RUNTIME_DISCARD_EXIT
KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
#ifdef CONFIG_LD_SCRIPT_STATIC

View File

@ -240,7 +240,6 @@ CONFIG_NET_SCH_FQ_CODEL=y
CONFIG_NET_SCH_FQ=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_BASIC=y
CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
@ -474,6 +473,7 @@ CONFIG_TYPEC=y
CONFIG_TYPEC_TCPM=y
CONFIG_TYPEC_TCPCI=y
CONFIG_TYPEC_UCSI=y
CONFIG_TYPEC_DP_ALTMODE=y
CONFIG_MMC=y
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set

View File

@ -1695,6 +1695,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
#include <asm/kvm-x86-ops.h>
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
void kvm_x86_vendor_exit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{

View File

@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
static void __resctrl_sched_in(void)
static inline void __resctrl_sched_in(struct task_struct *tsk)
{
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
@ -63,13 +63,13 @@ static void __resctrl_sched_in(void)
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
tmp = READ_ONCE(current->closid);
tmp = READ_ONCE(tsk->closid);
if (tmp)
closid = tmp;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
tmp = READ_ONCE(current->rmid);
tmp = READ_ONCE(tsk->rmid);
if (tmp)
rmid = tmp;
}
@ -90,17 +90,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
return val * scale;
}
static inline void resctrl_sched_in(void)
static inline void resctrl_sched_in(struct task_struct *tsk)
{
if (static_branch_likely(&rdt_enable_key))
__resctrl_sched_in();
__resctrl_sched_in(tsk);
}
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
#else
static inline void resctrl_sched_in(void) {}
static inline void resctrl_sched_in(struct task_struct *tsk) {}
static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
#endif /* CONFIG_X86_CPU_RESCTRL */

View File

@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
}
}
#endif
/*
* Work around Erratum 1386. The XSAVES instruction malfunctions in
* certain circumstances on Zen1/2 uarch, and not all parts have had
* updated microcode at the time of writing (March 2023).
*
* Affected parts all have no supervisor XSAVE states, meaning that
* the XSAVEC instruction (which works fine) is equivalent.
*/
clear_cpu_cap(c, X86_FEATURE_XSAVES);
}
static void init_amd_zn(struct cpuinfo_x86 *c)

View File

@ -314,7 +314,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
resctrl_sched_in();
resctrl_sched_in(current);
}
/*
@ -535,7 +535,7 @@ static void _update_task_closid_rmid(void *task)
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (task == current)
resctrl_sched_in();
resctrl_sched_in(task);
}
static void update_task_closid_rmid(struct task_struct *t)

View File

@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_fpu_finish();
/* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in();
resctrl_sched_in(next_p);
return prev_p;
}

View File

@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in();
resctrl_sched_in(next_p);
return prev_p;
}

View File

@ -5080,15 +5080,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static int __init svm_init(void)
{
int r;
__unused_size_checks();
return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
r = kvm_x86_vendor_init(&svm_init_ops);
if (r)
return r;
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
if (r)
goto err_kvm_init;
return 0;
err_kvm_init:
kvm_x86_vendor_exit();
return r;
}
static void __exit svm_exit(void)
{
kvm_exit();
kvm_x86_vendor_exit();
}
module_init(svm_init)

View File

@ -551,6 +551,33 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
return 0;
}
static void hv_reset_evmcs(void)
{
struct hv_vp_assist_page *vp_ap;
if (!static_branch_unlikely(&enable_evmcs))
return;
/*
* KVM should enable eVMCS if and only if all CPUs have a VP assist
* page, and should reject CPU onlining if eVMCS is enabled the CPU
* doesn't have a VP assist page allocated.
*/
vp_ap = hv_get_vp_assist_page(smp_processor_id());
if (WARN_ON_ONCE(!vp_ap))
return;
/*
* Reset everything to support using non-enlightened VMCS access later
* (e.g. when we reload the module with enlightened_vmcs=0)
*/
vp_ap->nested_control.features.directhypercall = 0;
vp_ap->current_nested_vmcs = 0;
vp_ap->enlighten_vmentry = 0;
}
#else /* IS_ENABLED(CONFIG_HYPERV) */
static void hv_reset_evmcs(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
/*
@ -2501,6 +2528,8 @@ static void vmx_hardware_disable(void)
if (cpu_vmxoff())
kvm_spurious_fault();
hv_reset_evmcs();
intel_pt_handle_vmx(0);
}
@ -8427,41 +8456,23 @@ static void vmx_cleanup_l1d_flush(void)
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
static void vmx_exit(void)
static void __vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
#if IS_ENABLED(CONFIG_HYPERV)
if (static_branch_unlikely(&enable_evmcs)) {
int cpu;
struct hv_vp_assist_page *vp_ap;
/*
* Reset everything to support using non-enlightened VMCS
* access later (e.g. when we reload the module with
* enlightened_vmcs=0)
*/
for_each_online_cpu(cpu) {
vp_ap = hv_get_vp_assist_page(cpu);
if (!vp_ap)
continue;
vp_ap->nested_control.features.directhypercall = 0;
vp_ap->current_nested_vmcs = 0;
vp_ap->enlighten_vmentry = 0;
}
static_branch_disable(&enable_evmcs);
}
#endif
vmx_cleanup_l1d_flush();
}
allow_smaller_maxphyaddr = false;
static void vmx_exit(void)
{
kvm_exit();
kvm_x86_vendor_exit();
__vmx_exit();
}
module_exit(vmx_exit);
@ -8502,23 +8513,20 @@ static int __init vmx_init(void)
}
#endif
r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
r = kvm_x86_vendor_init(&vmx_init_ops);
if (r)
return r;
/*
* Must be called after kvm_init() so enable_ept is properly set
* Must be called after common x86 init so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) {
vmx_exit();
return r;
}
if (r)
goto err_l1d_flush;
vmx_setup_fb_clear_ctrl();
@ -8542,6 +8550,21 @@ static int __init vmx_init(void)
if (!enable_ept)
allow_smaller_maxphyaddr = true;
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
goto err_kvm_init;
return 0;
err_kvm_init:
__vmx_exit();
err_l1d_flush:
kvm_x86_vendor_exit();
return r;
}
module_init(vmx_init);

View File

@ -9351,7 +9351,16 @@ static struct notifier_block pvclock_gtod_notifier = {
int kvm_arch_init(void *opaque)
{
struct kvm_x86_init_ops *ops = opaque;
return 0;
}
void kvm_arch_exit(void)
{
}
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
u64 host_pat;
int r;
@ -9441,8 +9450,9 @@ int kvm_arch_init(void *opaque)
kmem_cache_destroy(x86_emulator_cache);
return r;
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
void kvm_arch_exit(void)
void kvm_x86_vendor_exit(void)
{
#ifdef CONFIG_X86_64
if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
@ -9468,6 +9478,7 @@ void kvm_arch_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{

View File

@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
asm("syscall"
: "=a" (ret)
: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
: "rcx", "r11", "memory");
return ret;
}
@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
asm("syscall"
: "=a" (ret)
: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
: "rcx", "r11", "memory");
return ret;
}

View File

@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
}
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
int disk_alloc_events(struct gendisk *disk);
void disk_add_events(struct gendisk *disk);

View File

@ -356,9 +356,10 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
}
EXPORT_SYMBOL_GPL(disk_uevent);
int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
{
struct block_device *bdev;
int ret = 0;
if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
return -EINVAL;
@ -366,16 +367,29 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
return -EINVAL;
if (disk->open_partitions)
return -EBUSY;
/* Someone else has bdev exclusively open? */
if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
return -EBUSY;
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
/*
* If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
* synchronize with other exclusive openers and other partition
* scanners.
*/
if (!(mode & FMODE_EXCL)) {
ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions);
if (ret)
return ret;
}
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
blkdev_put(bdev, mode);
return 0;
ret = PTR_ERR(bdev);
else
blkdev_put(bdev, mode & ~FMODE_EXCL);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(disk->part0, disk_scan_partitions);
return ret;
}
/**
@ -501,9 +515,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
if (ret)
goto out_unregister_bdi;
/* Make sure the first partition scan will be proceed */
if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
!test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev_add(disk->part0, ddev->devt);
if (get_capacity(disk))
disk_scan_partitions(disk, FMODE_READ, NULL);
disk_scan_partitions(disk, FMODE_READ);
/*
* Announce the disk and partitions after all partitions are

View File

@ -467,10 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
* user space. Note the separate arg/argp parameters that are needed
* to deal with the compat_ptr() conversion.
*/
static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
unsigned long arg, void __user *argp)
static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg,
void __user *argp)
{
struct block_device *bdev = I_BDEV(file->f_mapping->host);
unsigned int max_sectors;
switch (cmd) {
@ -528,8 +528,7 @@ static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
file);
return disk_scan_partitions(bdev->bd_disk, mode);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
@ -607,7 +606,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
break;
}
ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
if (ret != -ENOIOCTLCMD)
return ret;
@ -676,7 +675,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
break;
}
ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);

View File

@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
/**
* acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
* @adev: ACPI companion of the target device.
*
* Evaluate _S0W for @adev and return the value produced by it or return
* ACPI_STATE_UNKNOWN on errors (including _S0W not present).
*/
u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
{
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
if (ACPI_FAILURE(status))
return ACPI_STATE_UNKNOWN;
return state;
}
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);

View File

@ -49,13 +49,13 @@
#include <trace/hooks/timer.h>
#include <trace/hooks/topology.h>
#include <trace/hooks/hung_task.h>
#include <trace/hooks/audio_usboffload.h>
#include <trace/hooks/bug.h>
#include <trace/hooks/softlockup.h>
#include <trace/hooks/power.h>
#include <trace/hooks/fault.h>
#include <trace/hooks/traps.h>
#include <trace/hooks/thermal.h>
#include <trace/hooks/audio_usboffload.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -161,11 +161,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_set_affinity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterrupt_tasks);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterrupt_tasks_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_vendor_set);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_ep_action);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_synctype);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_report_bug);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_watchdog_timer_softlockup);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_freeze_todo);
@ -179,3 +174,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_el1_fpac);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_panic_unhandled);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect);

View File

@ -322,8 +322,10 @@ static int hd44780_probe(struct platform_device *pdev)
static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
struct hd44780_common *hdc = lcd->drvdata;
charlcd_unregister(lcd);
kfree(hdc->hd44780);
kfree(lcd->drvdata);
kfree(lcd);

View File

@ -251,7 +251,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
@ -279,11 +279,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
sib_leaf = per_cpu_cacheinfo_idx(i, index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
break;
}
}
}
/* record the maximum cache line size */
@ -297,7 +299,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index;
unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
@ -308,9 +310,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
break;
}
}
}
if (of_have_populated_dt())
of_node_put(this_leaf->fw_token);

View File

@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m)
static void component_debugfs_del(struct aggregate_device *m)
{
debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
}
#else

View File

@ -372,7 +372,7 @@ late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
debugfs_lookup_and_remove("devices_deferred", NULL);
}
__exitcall(deferred_probe_exit);

View File

@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo,
return -EINVAL;
}
/* Avoid assigning overflow values */
if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
return -EOVERFLOW;
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
/* loff_t vars have been assigned __u64 */
if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
return -EOVERFLOW;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
struct request *rq = blk_mq_rq_from_pdu(cmd);
const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
goto failed;
}
if (cmd->blkcg_css)
kthread_associate_blkcg(cmd->blkcg_css);
if (cmd->memcg_css)
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
old_memcg = set_active_memcg(
mem_cgroup_from_css(cmd->memcg_css));
mem_cgroup_from_css(cmd_memcg_css));
/*
* do_req_filebacked() may call blk_mq_complete_request() synchronously
* or asynchronously if using aio. Hence, do not touch 'cmd' after
* do_req_filebacked() has returned unless we are sure that 'cmd' has
* not yet been completed.
*/
ret = do_req_filebacked(lo, rq);
if (cmd->blkcg_css)
if (cmd_blkcg_css)
kthread_associate_blkcg(NULL);
if (cmd->memcg_css) {
if (cmd_memcg_css) {
set_active_memcg(old_memcg);
css_put(cmd->memcg_css);
css_put(cmd_memcg_css);
}
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
if (!use_aio || ret) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else

View File

@ -219,7 +219,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mutex_unlock(&mhi_chan->lock);
break;
case MHI_PKT_TYPE_RESET_CHAN_CMD:
dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
if (!ch_ring->started) {
dev_err(dev, "Channel (%u) not opened\n", ch_id);
return -ENODEV;
@ -990,44 +990,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
static void mhi_ep_reset_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state cur_state;
int ret;
mhi_ep_abort_transfer(mhi_cntrl);
mhi_ep_power_down(mhi_cntrl);
mutex_lock(&mhi_cntrl->state_lock);
spin_lock_bh(&mhi_cntrl->state_lock);
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
mhi_ep_mmio_reset(mhi_cntrl);
cur_state = mhi_cntrl->mhi_state;
spin_unlock_bh(&mhi_cntrl->state_lock);
/*
* Only proceed further if the reset is due to SYS_ERR. The host will
* issue reset during shutdown also and we don't need to do re-init in
* that case.
*/
if (cur_state == MHI_STATE_SYS_ERR) {
mhi_ep_mmio_init(mhi_cntrl);
if (cur_state == MHI_STATE_SYS_ERR)
mhi_ep_power_up(mhi_cntrl);
/* Set AMSS EE before signaling ready state */
mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
/* All set, notify the host that we are ready */
ret = mhi_ep_set_ready_state(mhi_cntrl);
if (ret)
return;
dev_dbg(dev, "READY state notification sent to the host\n");
ret = mhi_ep_enable(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
return;
}
enable_irq(mhi_cntrl->irq);
}
mutex_unlock(&mhi_cntrl->state_lock);
}
/*
@ -1106,11 +1087,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
{
if (mhi_cntrl->enabled)
if (mhi_cntrl->enabled) {
mhi_ep_abort_transfer(mhi_cntrl);
kfree(mhi_cntrl->mhi_event);
disable_irq(mhi_cntrl->irq);
kfree(mhi_cntrl->mhi_event);
disable_irq(mhi_cntrl->irq);
}
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
@ -1400,8 +1381,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
spin_lock_init(&mhi_cntrl->state_lock);
spin_lock_init(&mhi_cntrl->list_lock);
mutex_init(&mhi_cntrl->state_lock);
mutex_init(&mhi_cntrl->event_lock);
/* Set MHI version and AMSS EE before enumeration */

View File

@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
int ret;
/* If MHI is in M3, resume suspended channels */
spin_lock_bh(&mhi_cntrl->state_lock);
mutex_lock(&mhi_cntrl->state_lock);
old_state = mhi_cntrl->mhi_state;
if (old_state == MHI_STATE_M3)
mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
spin_unlock_bh(&mhi_cntrl->state_lock);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
return ret;
goto err_unlock;
}
/* Signal host that the device moved to M0 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
if (ret) {
dev_err(dev, "Failed sending M0 state change event\n");
return ret;
goto err_unlock;
}
if (old_state == MHI_STATE_READY) {
@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
if (ret) {
dev_err(dev, "Failed sending AMSS EE event\n");
return ret;
goto err_unlock;
}
}
return 0;
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
spin_lock_bh(&mhi_cntrl->state_lock);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
spin_unlock_bh(&mhi_cntrl->state_lock);
mutex_lock(&mhi_cntrl->state_lock);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
return ret;
goto err_unlock;
}
mhi_ep_suspend_channels(mhi_cntrl);
@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
if (ret) {
dev_err(dev, "Failed sending M3 state change event\n");
return ret;
goto err_unlock;
}
return 0;
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
enum mhi_state mhi_state;
int ret, is_ready;
spin_lock_bh(&mhi_cntrl->state_lock);
mutex_lock(&mhi_cntrl->state_lock);
/* Ensure that the MHISTATUS is set to RESET by host */
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
if (mhi_state != MHI_STATE_RESET || is_ready) {
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
spin_unlock_bh(&mhi_cntrl->state_lock);
return -EIO;
ret = -EIO;
goto err_unlock;
}
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
spin_unlock_bh(&mhi_cntrl->state_lock);
if (ret)
mhi_ep_handle_syserr(mhi_cntrl);
err_unlock:
mutex_unlock(&mhi_cntrl->state_lock);
return ret;
}

View File

@ -74,7 +74,8 @@
/*
* Timer values
*/
#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
/* How many times to we retry sending/receiving the message. */
@ -82,7 +83,9 @@
#define SSIF_RECV_RETRIES 250
#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
/*
@ -229,6 +232,9 @@ struct ssif_info {
bool got_alert;
bool waiting_alert;
/* Used to inform the timeout that it should do a resend. */
bool do_resend;
/*
* If set to true, this will request events the next time the
* state machine is idle.
@ -241,12 +247,6 @@ struct ssif_info {
*/
bool req_flags;
/*
* Used to perform timer operations when run-to-completion
* mode is on. This is a countdown timer.
*/
int rtc_us_timer;
/* Used for sending/receiving data. +1 for the length. */
unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
unsigned int data_len;
@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
static void start_get(struct ssif_info *ssif_info)
{
ssif_info->rtc_us_timer = 0;
ssif_info->multi_pos = 0;
ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
@ -538,22 +537,28 @@ static void start_get(struct ssif_info *ssif_info)
ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
}
static void start_resend(struct ssif_info *ssif_info);
static void retry_timeout(struct timer_list *t)
{
struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
bool waiting;
bool waiting, resend;
if (ssif_info->stopping)
return;
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
resend = ssif_info->do_resend;
ssif_info->do_resend = false;
waiting = ssif_info->waiting_alert;
ssif_info->waiting_alert = false;
ipmi_ssif_unlock_cond(ssif_info, flags);
if (waiting)
start_get(ssif_info);
if (resend)
start_resend(ssif_info);
}
static void watch_timeout(struct timer_list *t)
@ -602,8 +607,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
start_get(ssif_info);
}
static void start_resend(struct ssif_info *ssif_info);
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len)
{
@ -622,7 +625,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
ssif_info->rtc_us_timer = SSIF_MSG_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
@ -909,7 +911,13 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
if (result < 0) {
ssif_info->retries_left--;
if (ssif_info->retries_left > 0) {
start_resend(ssif_info);
/*
* Wait the retry timeout time per the spec,
* then redo the send.
*/
ssif_info->do_resend = true;
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_REQ_RETRY_JIFFIES);
return;
}
@ -973,7 +981,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_PART_JIFFIES);
@ -1320,8 +1327,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
if (ret) {
retry_cnt--;
if (retry_cnt > 0)
if (retry_cnt > 0) {
msleep(SSIF_REQ_RETRY_MSEC);
goto retry1;
}
return -ENODEV;
}
@ -1462,8 +1471,10 @@ static int start_multipart_test(struct i2c_client *client,
32, msg);
if (ret) {
retry_cnt--;
if (retry_cnt > 0)
if (retry_cnt > 0) {
msleep(SSIF_REQ_RETRY_MSEC);
goto retry_write;
}
dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
return ret;
}

View File

@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
ret = -EIO;
virt = acpi_os_map_iomem(start, len);
if (!virt)
if (!virt) {
dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
/* try EFI log next */
ret = -ENODEV;
goto err;
}
memcpy_fromio(log->bios_event_log, virt, len);

View File

@ -507,6 +507,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
/*
* Some AMD fTPM versions may cause stutter
* https://www.amd.com/en/support/kb/faq/pa-410
*
* Fixes are available in two series of fTPM firmware:
* 6.x.y.z series: 6.0.18.6 +
* 3.x.y.z series: 3.57.y.5 +
*/
static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
{
u32 val1, val2;
u64 version;
int ret;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
return false;
ret = tpm_request_locality(chip);
if (ret)
return false;
ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
if (ret)
goto release;
if (val1 != 0x414D4400U /* AMD */) {
ret = -ENODEV;
goto release;
}
ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
if (ret)
goto release;
ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
release:
tpm_relinquish_locality(chip);
if (ret)
return false;
version = ((u64)val1 << 32) | val2;
if ((version >> 48) == 6) {
if (version >= 0x0006000000180006ULL)
return false;
} else if ((version >> 48) == 3) {
if (version >= 0x0003005700000005ULL)
return false;
} else {
return false;
}
dev_warn(&chip->dev,
"AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
version);
return true;
}
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
@ -516,7 +573,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int tpm_add_hwrng(struct tpm_chip *chip)
{
if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
tpm_amd_is_rng_defective(chip))
return 0;
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),

View File

@ -150,6 +150,79 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
enum tpm2_pt_props {
TPM2_PT_NONE = 0x00000000,
TPM2_PT_GROUP = 0x00000100,
TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
TPM2_PT_MODES = TPM2_PT_FIXED + 45,
TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
TPM2_PT_VAR = TPM2_PT_GROUP * 2,
TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
};
/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
* bytes, but 128 is still a relatively large number of random bytes and

View File

@ -22,7 +22,7 @@ config CLK_RENESAS
select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
select CLK_R8A7795 if ARCH_R8A77951
select CLK_R8A77960 if ARCH_R8A77960
select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965

View File

@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
};
static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1),
DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1),
DEF_MOD("tmu4", 121, R8A7795_CLK_S0D6),
@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac30", 326, R8A7795_CLK_S3D1),
DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */
DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac31", 329, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7795_CLK_CP),
DEF_MOD("pwm", 523, R8A7795_CLK_S0D12),
DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2),
DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2),
DEF_MOD("fcpvd0", 603, R8A7795_CLK_S0D2),
DEF_MOD("fcpvb1", 606, R8A7795_CLK_S0D1),
DEF_MOD("fcpvb0", 607, R8A7795_CLK_S0D1),
DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpvi1", 610, R8A7795_CLK_S0D1),
DEF_MOD("fcpvi0", 611, R8A7795_CLK_S0D1),
DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpf1", 614, R8A7795_CLK_S0D1),
DEF_MOD("fcpf0", 615, R8A7795_CLK_S0D1),
DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpcs", 619, R8A7795_CLK_S0D1),
DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspd2", 621, R8A7795_CLK_S0D2),
DEF_MOD("vspd1", 622, R8A7795_CLK_S0D2),
DEF_MOD("vspd0", 623, R8A7795_CLK_S0D2),
DEF_MOD("vspbc", 624, R8A7795_CLK_S0D1),
DEF_MOD("vspbd", 626, R8A7795_CLK_S0D1),
DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2),
@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("cmm2", 709, R8A7795_CLK_S2D1),
DEF_MOD("cmm1", 710, R8A7795_CLK_S2D1),
DEF_MOD("cmm0", 711, R8A7795_CLK_S2D1),
DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
DEF_MOD("csi40", 716, R8A7795_CLK_CSI0),
@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
{ 2, 192, 1, 192, 1, 32, },
};
static const struct soc_device_attribute r8a7795es1[] __initconst = {
static const struct soc_device_attribute r8a7795_denylist[] __initconst = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
};
/*
* Fixups for R-Car H3 ES1.x
*/
static const unsigned int r8a7795es1_mod_nullify[] __initconst = {
MOD_CLK_ID(326), /* USB-DMAC3-0 */
MOD_CLK_ID(329), /* USB-DMAC3-1 */
MOD_CLK_ID(700), /* EHCI/OHCI3 */
MOD_CLK_ID(705), /* HS-USB-IF3 */
};
static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
{ MOD_CLK_ID(118), R8A7795_CLK_S2D1 }, /* FDP1-1 */
{ MOD_CLK_ID(119), R8A7795_CLK_S2D1 }, /* FDP1-0 */
{ MOD_CLK_ID(121), R8A7795_CLK_S3D2 }, /* TMU4 */
{ MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */
{ MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */
{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
{ MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */
{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
{ MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
{ MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */
{ MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */
{ MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */
{ MOD_CLK_ID(606), R8A7795_CLK_S2D1 }, /* FCPVB1 */
{ MOD_CLK_ID(607), R8A7795_CLK_S2D1 }, /* FCPVB0 */
{ MOD_CLK_ID(610), R8A7795_CLK_S2D1 }, /* FCPVI1 */
{ MOD_CLK_ID(611), R8A7795_CLK_S2D1 }, /* FCPVI0 */
{ MOD_CLK_ID(614), R8A7795_CLK_S2D1 }, /* FCPF1 */
{ MOD_CLK_ID(615), R8A7795_CLK_S2D1 }, /* FCPF0 */
{ MOD_CLK_ID(619), R8A7795_CLK_S2D1 }, /* FCPCS */
{ MOD_CLK_ID(621), R8A7795_CLK_S2D1 }, /* VSPD2 */
{ MOD_CLK_ID(622), R8A7795_CLK_S2D1 }, /* VSPD1 */
{ MOD_CLK_ID(623), R8A7795_CLK_S2D1 }, /* VSPD0 */
{ MOD_CLK_ID(624), R8A7795_CLK_S2D1 }, /* VSPBC */
{ MOD_CLK_ID(626), R8A7795_CLK_S2D1 }, /* VSPBD */
{ MOD_CLK_ID(630), R8A7795_CLK_S2D1 }, /* VSPI1 */
{ MOD_CLK_ID(631), R8A7795_CLK_S2D1 }, /* VSPI0 */
{ MOD_CLK_ID(804), R8A7795_CLK_S2D1 }, /* VIN7 */
{ MOD_CLK_ID(805), R8A7795_CLK_S2D1 }, /* VIN6 */
{ MOD_CLK_ID(806), R8A7795_CLK_S2D1 }, /* VIN5 */
{ MOD_CLK_ID(807), R8A7795_CLK_S2D1 }, /* VIN4 */
{ MOD_CLK_ID(808), R8A7795_CLK_S2D1 }, /* VIN3 */
{ MOD_CLK_ID(809), R8A7795_CLK_S2D1 }, /* VIN2 */
{ MOD_CLK_ID(810), R8A7795_CLK_S2D1 }, /* VIN1 */
{ MOD_CLK_ID(811), R8A7795_CLK_S2D1 }, /* VIN0 */
{ MOD_CLK_ID(812), R8A7795_CLK_S3D2 }, /* EAVB-IF */
{ MOD_CLK_ID(820), R8A7795_CLK_S2D1 }, /* IMR3 */
{ MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */
{ MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */
{ MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */
{ MOD_CLK_ID(905), R8A7795_CLK_CP }, /* GPIO7 */
{ MOD_CLK_ID(906), R8A7795_CLK_CP }, /* GPIO6 */
{ MOD_CLK_ID(907), R8A7795_CLK_CP }, /* GPIO5 */
{ MOD_CLK_ID(908), R8A7795_CLK_CP }, /* GPIO4 */
{ MOD_CLK_ID(909), R8A7795_CLK_CP }, /* GPIO3 */
{ MOD_CLK_ID(910), R8A7795_CLK_CP }, /* GPIO2 */
{ MOD_CLK_ID(911), R8A7795_CLK_CP }, /* GPIO1 */
{ MOD_CLK_ID(912), R8A7795_CLK_CP }, /* GPIO0 */
{ MOD_CLK_ID(918), R8A7795_CLK_S3D2 }, /* I2C6 */
{ MOD_CLK_ID(919), R8A7795_CLK_S3D2 }, /* I2C5 */
{ MOD_CLK_ID(927), R8A7795_CLK_S3D2 }, /* I2C4 */
{ MOD_CLK_ID(928), R8A7795_CLK_S3D2 }, /* I2C3 */
};
/*
* Fixups for R-Car H3 ES2.x
*/
static const unsigned int r8a7795es2_mod_nullify[] __initconst = {
MOD_CLK_ID(117), /* FDP1-2 */
MOD_CLK_ID(327), /* USB3-IF1 */
MOD_CLK_ID(600), /* FCPVD3 */
MOD_CLK_ID(609), /* FCPVI2 */
MOD_CLK_ID(613), /* FCPF2 */
MOD_CLK_ID(616), /* FCPCI1 */
MOD_CLK_ID(617), /* FCPCI0 */
MOD_CLK_ID(620), /* VSPD3 */
MOD_CLK_ID(629), /* VSPI2 */
MOD_CLK_ID(713), /* CSI21 */
};
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
u32 cpg_mode;
int error;
/*
* We panic here to ensure removed SoCs and clk updates are always in
* sync to avoid overclocking damages. The panic can only be seen with
* commandline args 'earlycon keep_bootcon'. But these SoCs were for
* developers only anyhow.
*/
if (soc_device_match(r8a7795_denylist))
panic("SoC not supported anymore!\n");
error = rcar_rst_read_mode_pins(&cpg_mode);
if (error)
return error;
@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
return -EINVAL;
}
if (soc_device_match(r8a7795es1)) {
cpg_core_nullify_range(r8a7795_core_clks,
ARRAY_SIZE(r8a7795_core_clks),
R8A7795_CLK_S0D2, R8A7795_CLK_S0D12);
mssr_mod_nullify(r8a7795_mod_clks,
ARRAY_SIZE(r8a7795_mod_clks),
r8a7795es1_mod_nullify,
ARRAY_SIZE(r8a7795es1_mod_nullify));
mssr_mod_reparent(r8a7795_mod_clks,
ARRAY_SIZE(r8a7795_mod_clks),
r8a7795es1_mod_reparent,
ARRAY_SIZE(r8a7795es1_mod_reparent));
} else {
mssr_mod_nullify(r8a7795_mod_clks,
ARRAY_SIZE(r8a7795_mod_clks),
r8a7795es2_mod_nullify,
ARRAY_SIZE(r8a7795es2_mod_nullify));
}
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}

View File

@ -310,19 +310,10 @@ static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
static u32 cpg_quirks __initdata;
#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7795", .revision = "ES1.0",
.data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
},
{
.soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)(RCKCR_CKSEL),
},
{
.soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)(RCKCR_CKSEL),
@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
* multiplier when cpufreq changes between normal and boost
* modes.
*/
mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
return cpg_pll_clk_register(core->name, __clk_get_name(parent),
base, mult, CPG_PLL0CR, 0);
base, 2, CPG_PLL0CR, 0);
case CLK_TYPE_GEN3_PLL1:
mult = cpg_pll_config->pll1_mult;
@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
* multiplier when cpufreq changes between normal and boost
* modes.
*/
mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
return cpg_pll_clk_register(core->name, __clk_get_name(parent),
base, mult, CPG_PLL2CR, 2);
base, 2, CPG_PLL2CR, 2);
case CLK_TYPE_GEN3_PLL3:
mult = cpg_pll_config->pll3_mult;
@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
*/
value = readl(base + CPG_PLL4CR);
mult = (((value >> 24) & 0x7f) + 1) * 2;
if (cpg_quirks & PLL_ERRATA)
mult *= 2;
break;
case CLK_TYPE_GEN3_SDH:

View File

@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void)
subsys_initcall(cpg_mssr_init);
void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
unsigned int num_core_clks,
unsigned int first_clk,
unsigned int last_clk)
{
unsigned int i;
for (i = 0; i < num_core_clks; i++)
if (core_clks[i].id >= first_clk &&
core_clks[i].id <= last_clk)
core_clks[i].name = NULL;
}
void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const unsigned int *clks, unsigned int n)
@ -1139,19 +1126,5 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
}
}
void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const struct mssr_mod_reparent *clks,
unsigned int n)
{
unsigned int i, j;
for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
if (mod_clks[i].id == clks[j].clk) {
mod_clks[i].parent = clks[j].parent;
j++;
}
}
MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
MODULE_LICENSE("GPL v2");

View File

@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np,
/*
* Helpers for fixing up clock tables depending on SoC revision
*/
struct mssr_mod_reparent {
unsigned int clk, parent;
};
extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks,
unsigned int num_core_clks,
unsigned int first_clk,
unsigned int last_clk);
extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const unsigned int *clks, unsigned int n);
extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
unsigned int num_mod_clks,
const struct mssr_mod_reparent *clks,
unsigned int n);
#endif

View File

@ -43,6 +43,39 @@ struct dma_buf_list {
static struct dma_buf_list db_list;
/**
* dma_buf_get_each - Helps in traversing the db_list and calls the
* callback function which can extract required info out of each
* dmabuf.
* The db_list needs to be locked to prevent the db_list from being
* dynamically updated during the traversal process.
*
* @callback: [in] Handle for each dmabuf buffer in db_list.
* @private: [in] User-defined, used to pass in when callback is
* called.
*
* Returns 0 on success, otherwise returns a non-zero value for
* mutex_lock_interruptible or callback.
*/
int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf,
void *private), void *private)
{
struct dma_buf *buf;
int ret = mutex_lock_interruptible(&db_list.lock);
if (ret)
return ret;
list_for_each_entry(buf, &db_list.head, list_node) {
ret = callback(buf, private);
if (ret)
break;
}
mutex_unlock(&db_list.lock);
return ret;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_get_each, MINIDUMP);
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
struct dma_buf *dmabuf;

View File

@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
{
/* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"IdeaPad Duet 3 10IGL5"),
},
},
{},
};

View File

@ -393,9 +393,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
en = &nv_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] &&
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
if (!adev->reg_offset[en->hwip][en->inst])
continue;
else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
*value = nv_get_register_value(adev,

View File

@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] &&
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
if (!adev->reg_offset[en->hwip][en->inst])
continue;
else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;

View File

@ -47,19 +47,31 @@
static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
.codec_array = vcn_4_0_0_video_codecs_encode_array,
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
@ -68,23 +80,47 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
.codec_array = vcn_4_0_0_video_codecs_decode_array,
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
{
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
switch (adev->ip_versions[UVD_HWIP][0]) {
if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
return -EINVAL;
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode;
else
*codecs = &vcn_4_0_0_video_codecs_decode;
case IP_VERSION(4, 0, 4):
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
else
*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
} else {
if (encode)
*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
else
*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
}
return 0;
default:
return -EINVAL;
@ -254,9 +290,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i];
if (adev->reg_offset[en->hwip][en->inst] &&
reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
if (!adev->reg_offset[en->hwip][en->inst])
continue;
else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
*value = soc21_get_register_value(adev,

View File

@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
if (!pdd->doorbell_index) {
int r = kfd_alloc_process_doorbells(pdd->dev,
&pdd->doorbell_index);
if (r)
if (r < 0)
return 0;
}

View File

@ -2109,13 +2109,19 @@ static bool dcn32_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
/* Calculate the available MALL space */
dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
dc, dc->ctx->dc_bios->vram_info.num_chans) *
dc->caps.mall_size_per_mem_channel * 1024 * 1024;
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
dc->caps.subvp_swath_height_margin_lines = 16;
@ -2545,3 +2551,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
return idle_pipe;
}
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
{
/*
* DCN32 and DCN321 SKUs may have different sizes for MALL
* but we may not be able to access all the MALL space.
* If the num_chans is power of 2, then we can access all
* of the available MALL space. Otherwise, we can only
* access:
*
* max_cab_size_in_bytes = total_cache_size_in_bytes *
* ((2^floor(log2(num_chans)))/num_chans)
*
* Calculating the MALL sizes for all available SKUs, we
* have come up with the follow simplified check.
* - we have max_chans which provides the max MALL size.
* Each chans supports 4MB of MALL so:
*
* total_cache_size_in_bytes = max_chans * 4 MB
*
* - we have avail_chans which shows the number of channels
* we can use if we can't access the entire MALL space.
* It is generally half of max_chans
* - so we use the following checks:
*
* if (num_chans == max_chans), return max_chans
* if (num_chans < max_chans), return avail_chans
*
* - exception is GC_11_0_0 where we can't access max_chans,
* so we define max_avail_chans as the maximum available
* MALL space
*
*/
int gc_11_0_0_max_chans = 48;
int gc_11_0_0_max_avail_chans = 32;
int gc_11_0_0_avail_chans = 16;
int gc_11_0_3_max_chans = 16;
int gc_11_0_3_avail_chans = 8;
int gc_11_0_2_max_chans = 8;
int gc_11_0_2_avail_chans = 4;
if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
return (num_chans == gc_11_0_0_max_chans) ?
gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
} else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
return (num_chans == gc_11_0_2_max_chans) ?
gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
} else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
return (num_chans == gc_11_0_3_max_chans) ?
gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
}
}

View File

@ -142,6 +142,10 @@ void dcn32_restore_mall_state(struct dc *dc,
struct dc_state *context,
struct mall_temp_config *temp_config);
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
/* definitions for run time init of reg offsets */
/* CLK SRC */

View File

@ -1697,11 +1697,18 @@ static bool dcn321_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
/* Calculate the available MALL space */
dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
dc, dc->ctx->dc_bios->vram_info.num_chans) *
dc->caps.mall_size_per_mem_channel * 1024 * 1024;
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;

View File

@ -676,7 +676,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
*/
if (pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
dcn32_allow_subvp_with_active_margin(pipe)))) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@ -2379,8 +2381,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
}
/* Override from VBIOS for num_chan */
if (dc->ctx->dc_bios->vram_info.num_chans)
if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
@ -2558,3 +2563,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
}
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
{
bool allow = false;
uint32_t refresh_rate = 0;
/* Allow subvp on displays that have active margin for 2560x1440@60hz displays
* only for now. There must be no scaling as well.
*
* For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
* for p-state switching.
*/
if (pipe->stream && pipe->plane_state) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
if (pipe->stream->timing.v_addressable == 1440 &&
pipe->stream->timing.h_addressable == 2560 &&
refresh_rate >= 55 && refresh_rate <= 65 &&
pipe->plane_state->src_rect.height == 1440 &&
pipe->plane_state->src_rect.width == 2560 &&
pipe->plane_state->dst_rect.height == 1440 &&
pipe->plane_state->dst_rect.width == 2560)
allow = true;
}
return allow;
}

View File

@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
}
/* Override from VBIOS for num_chan */
if (dc->ctx->dc_bios->vram_info.num_chans)
if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;

View File

@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
if (!port)
if (!port) {
drm_dbg_kms(mgr->dev,
"VCPI %d for port %p not in topology, not creating a payload\n",
payload->vcpi, payload->port);
payload->vc_start_slot = -1;
return 0;
}
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
@ -3644,6 +3649,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
}
out_unlock:
@ -3856,7 +3864,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
goto out;
goto out_clear_reply;
/* Multi-packet message transmission, don't clear the reply */
if (!msg->have_eomt)
@ -5355,27 +5363,52 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
* drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
* This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
* This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
* The MST topology state, or NULL if there's no topology state for this MST mgr
* The old MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_private_state *old_priv_state =
drm_atomic_get_old_private_obj_state(state, &mgr->base);
return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
/**
* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
* This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
* The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_private_state *priv_state =
struct drm_private_state *new_priv_state =
drm_atomic_get_new_private_obj_state(state, &mgr->base);
return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);

View File

@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
connector->hdr_sink_metadata.hdmi_type1.eotf)) {
DRM_DEBUG_KMS("EOTF Not Supported\n");
return -EINVAL;
}
connector->hdr_sink_metadata.hdmi_type1.eotf))
DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
err = hdmi_drm_infoframe_init(frame);
if (err < 0)

View File

@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)

View File

@ -107,9 +107,6 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
bool
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
@ -160,3 +157,6 @@ menu "drm/i915 Unstable Evolution"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.unstable"
endmenu
config DRM_I915_GVT
bool

View File

@ -2053,7 +2053,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);

View File

@ -620,14 +620,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
const struct edid *edid, bool use_fallback)
{
return intel_opregion_get_panel_type(i915);
}
static int vbt_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
const struct edid *edid, bool use_fallback)
{
const struct bdb_lvds_options *lvds_options;
@ -652,7 +652,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
static int pnpid_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
const struct edid *edid, bool use_fallback)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
static int fallback_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
const struct edid *edid, bool use_fallback)
{
return 0;
return use_fallback ? 0 : -1;
}
enum panel_type {
@ -715,13 +715,13 @@ enum panel_type {
static int get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
const struct edid *edid, bool use_fallback)
{
struct {
const char *name;
int (*get_panel_type)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid);
const struct edid *edid, bool use_fallback);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
edid, use_fallback);
drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
@ -2592,6 +2593,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
static bool
intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
{
return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
}
static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
@ -2642,7 +2649,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
{
struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
is_dvi = intel_bios_encoder_supports_dvi(devdata);
@ -2650,13 +2657,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
is_crt = intel_bios_encoder_supports_crt(devdata);
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
is_edp = intel_bios_encoder_supports_edp(devdata);
is_dsi = intel_bios_encoder_supports_dsi(devdata);
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
drm_dbg_kms(&i915->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
HAS_LSPCON(i915) && child->lspcon,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
@ -2701,6 +2709,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
port = dsi_dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE)
return;
@ -3191,14 +3201,26 @@ void intel_bios_init(struct drm_i915_private *i915)
kfree(oprom_vbt);
}
void intel_bios_init_panel(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
static void intel_bios_init_panel(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid,
bool use_fallback)
{
init_vbt_panel_defaults(panel);
/* already have it? */
if (panel->vbt.panel_type >= 0) {
drm_WARN_ON(&i915->drm, !use_fallback);
return;
}
panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
panel->vbt.panel_type = get_panel_type(i915, devdata,
edid, use_fallback);
if (panel->vbt.panel_type < 0) {
drm_WARN_ON(&i915->drm, use_fallback);
return;
}
init_vbt_panel_defaults(panel);
parse_panel_options(i915, panel);
parse_generic_dtd(i915, panel);
@ -3213,6 +3235,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
parse_mipi_sequence(i915, panel);
}
void intel_bios_init_panel_early(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata)
{
intel_bios_init_panel(i915, panel, devdata, NULL, false);
}
void intel_bios_init_panel_late(struct drm_i915_private *i915,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
{
intel_bios_init_panel(i915, panel, devdata, edid, true);
}
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @i915: i915 device instance

View File

@ -232,10 +232,13 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
void intel_bios_init_panel(struct drm_i915_private *dev_priv,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid);
void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata);
void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);

View File

@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
INIT_LIST_HEAD(&connector->panel.fixed_modes);
intel_panel_init_alloc(connector);
return 0;
}

View File

@ -5969,6 +5969,10 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
if (ret)
return ret;
ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;

View File

@ -291,7 +291,7 @@ struct intel_vbt_panel_data {
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits */
unsigned int panel_type:4;
int panel_type;
unsigned int lvds_dither:1;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */

View File

@ -5179,6 +5179,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
encoder->devdata);
intel_pps_init(intel_dp);
/* Cache DPCD and EDID for edp. */
@ -5213,8 +5216,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
intel_bios_init_panel(dev_priv, &intel_connector->panel,
encoder->devdata, IS_ERR(edid) ? NULL : edid);
intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
encoder->devdata, IS_ERR(edid) ? NULL : edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);

Some files were not shown because too many files have changed in this diff Show More