Snap for 11616740 from cb9f58fd85
to android12-5.10-keystone-qcom-release
Change-Id: I0d1f404eadd7143c2b65b4df232c2451faf292df Signed-off-by: Coastguard Worker <android-build-coastguard-worker@google.com>
This commit is contained in:
commit
a31d98fc53
@ -6,3 +6,12 @@ Description:
|
||||
OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
|
||||
matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
|
||||
are free to create needed API under optee-ta-<uuid> directory.
|
||||
|
||||
What: /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
|
||||
Date: November 2023
|
||||
KernelVersion: 6.7
|
||||
Contact: op-tee@lists.trustedfirmware.org
|
||||
Description:
|
||||
Allows to distinguish whether an OP-TEE based TA/device requires user-space
|
||||
tee-supplicant to function properly or not. This attribute will be present for
|
||||
devices which depend on tee-supplicant to be running.
|
||||
|
@ -57,3 +57,12 @@ Description:
|
||||
* 0 - default,
|
||||
* 1 - overboost,
|
||||
* 2 - silent
|
||||
|
||||
What: /sys/devices/platform/<platform>/dgpu_disable
|
||||
Date: Aug 2022
|
||||
KernelVersion: 5.17
|
||||
Contact: "Luke Jones" <luke@ljones.dev>
|
||||
Description:
|
||||
Disable discrete GPU:
|
||||
* 0 - Enable dGPU,
|
||||
* 1 - Disable dGPU
|
||||
|
@ -31,18 +31,18 @@ see only some of them, depending on your kernel's configuration.
|
||||
|
||||
Table : Subdirectories in /proc/sys/net
|
||||
|
||||
========= =================== = ========== ==================
|
||||
========= =================== = ========== ===================
|
||||
Directory Content Directory Content
|
||||
========= =================== = ========== ==================
|
||||
802 E802 protocol mptcp Multipath TCP
|
||||
appletalk Appletalk protocol netfilter Network Filter
|
||||
========= =================== = ========== ===================
|
||||
802 E802 protocol mptcp Multipath TCP
|
||||
appletalk Appletalk protocol netfilter Network Filter
|
||||
ax25 AX25 netrom NET/ROM
|
||||
bridge Bridging rose X.25 PLP layer
|
||||
core General parameter tipc TIPC
|
||||
ethernet Ethernet protocol unix Unix domain sockets
|
||||
ipv4 IP version 4 x25 X.25 protocol
|
||||
bridge Bridging rose X.25 PLP layer
|
||||
core General parameter tipc TIPC
|
||||
ethernet Ethernet protocol unix Unix domain sockets
|
||||
ipv4 IP version 4 x25 X.25 protocol
|
||||
ipv6 IP version 6
|
||||
========= =================== = ========== ==================
|
||||
========= =================== = ========== ===================
|
||||
|
||||
1. /proc/sys/net/core - Network core options
|
||||
============================================
|
||||
|
@ -1902,6 +1902,14 @@ accept_ra_min_hop_limit - INTEGER
|
||||
|
||||
Default: 1
|
||||
|
||||
accept_ra_min_lft - INTEGER
|
||||
Minimum acceptable lifetime value in Router Advertisement.
|
||||
|
||||
RA sections with a lifetime less than this value shall be
|
||||
ignored. Zero lifetimes stay unaffected.
|
||||
|
||||
Default: 0
|
||||
|
||||
accept_ra_pinfo - BOOLEAN
|
||||
Learn Prefix Information in Router Advertisement.
|
||||
|
||||
|
@ -70,6 +70,9 @@ Instead, the 2-factor form of the allocator should be used::
|
||||
|
||||
foo = kmalloc_array(count, size, GFP_KERNEL);
|
||||
|
||||
Specifically, kmalloc() can be replaced with kmalloc_array(), and
|
||||
kzalloc() can be replaced with kcalloc().
|
||||
|
||||
If no 2-factor form is available, the saturate-on-overflow helpers should
|
||||
be used::
|
||||
|
||||
@ -90,9 +93,20 @@ Instead, use the helper::
|
||||
array usage and switch to a `flexible array member
|
||||
<#zero-length-and-one-element-arrays>`_ instead.
|
||||
|
||||
See array_size(), array3_size(), and struct_size(),
|
||||
for more details as well as the related check_add_overflow() and
|
||||
check_mul_overflow() family of functions.
|
||||
For other calculations, please compose the use of the size_mul(),
|
||||
size_add(), and size_sub() helpers. For example, in the case of::
|
||||
|
||||
foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL);
|
||||
|
||||
Instead, use the helpers::
|
||||
|
||||
foo = krealloc(size_add(current_size,
|
||||
size_mul(chunk_size,
|
||||
size_sub(count, 3))), GFP_KERNEL);
|
||||
|
||||
For more details, also see array3_size() and flex_array_size(),
|
||||
as well as the related check_mul_overflow(), check_add_overflow(),
|
||||
check_sub_overflow(), and check_shl_overflow() family of functions.
|
||||
|
||||
simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull()
|
||||
----------------------------------------------------------------------
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 198
|
||||
SUBLEVEL = 205
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2565,6 +2565,7 @@
|
||||
__stack_chk_guard
|
||||
stack_depot_fetch
|
||||
stack_trace_save
|
||||
static_key_enable
|
||||
static_key_disable
|
||||
static_key_slow_dec
|
||||
static_key_slow_inc
|
||||
@ -2761,6 +2762,7 @@
|
||||
__traceiter_android_rvh_wake_up_new_task
|
||||
__traceiter_android_vh_account_task_time
|
||||
__traceiter_android_vh_add_page_to_lrulist
|
||||
__traceiter_android_vh_adjust_alloc_flags
|
||||
__traceiter_android_vh_alloc_pages_slowpath_begin
|
||||
__traceiter_android_vh_alloc_pages_slowpath_end
|
||||
__traceiter_android_vh_allow_domain_state
|
||||
@ -2937,6 +2939,7 @@
|
||||
__traceiter_android_vh_ufs_send_tm_command
|
||||
__traceiter_android_vh_update_page_mapcount
|
||||
__traceiter_android_vh_update_topology_flags_workfn
|
||||
__traceiter_android_vh_vmscan_kswapd_done
|
||||
__traceiter_binder_transaction_received
|
||||
__traceiter_cpu_frequency
|
||||
__traceiter_cpu_frequency_limits
|
||||
@ -2945,6 +2948,7 @@
|
||||
__traceiter_ipi_entry
|
||||
__traceiter_ipi_raise
|
||||
__traceiter_irq_handler_entry
|
||||
__traceiter_mm_vmscan_kswapd_wake
|
||||
__traceiter_net_dev_queue
|
||||
__traceiter_net_dev_xmit
|
||||
__traceiter_netif_receive_skb
|
||||
@ -2972,6 +2976,7 @@
|
||||
__traceiter_android_vh_free_oem_binder_struct
|
||||
__traceiter_android_vh_binder_special_task
|
||||
__traceiter_android_vh_binder_free_buf
|
||||
__traceiter_android_vh_binder_buffer_release
|
||||
__tracepoint_android_rvh_account_irq
|
||||
__tracepoint_android_rvh_after_enqueue_task
|
||||
__tracepoint_android_rvh_build_perf_domains
|
||||
@ -3035,6 +3040,7 @@
|
||||
__tracepoint_android_rvh_wake_up_new_task
|
||||
__tracepoint_android_vh_account_task_time
|
||||
__tracepoint_android_vh_add_page_to_lrulist
|
||||
__tracepoint_android_vh_adjust_alloc_flags
|
||||
__tracepoint_android_vh_alloc_pages_slowpath_begin
|
||||
__tracepoint_android_vh_alloc_pages_slowpath_end
|
||||
__tracepoint_android_vh_allow_domain_state
|
||||
@ -3211,6 +3217,7 @@
|
||||
__tracepoint_android_vh_ufs_send_tm_command
|
||||
__tracepoint_android_vh_update_page_mapcount
|
||||
__tracepoint_android_vh_update_topology_flags_workfn
|
||||
__tracepoint_android_vh_vmscan_kswapd_done
|
||||
__tracepoint_binder_transaction_received
|
||||
__tracepoint_cpu_frequency
|
||||
__tracepoint_cpu_frequency_limits
|
||||
@ -3219,6 +3226,7 @@
|
||||
__tracepoint_ipi_entry
|
||||
__tracepoint_ipi_raise
|
||||
__tracepoint_irq_handler_entry
|
||||
__tracepoint_mm_vmscan_kswapd_wake
|
||||
__tracepoint_net_dev_queue
|
||||
__tracepoint_net_dev_xmit
|
||||
__tracepoint_netif_receive_skb
|
||||
@ -3250,6 +3258,7 @@
|
||||
__tracepoint_android_vh_free_oem_binder_struct
|
||||
__tracepoint_android_vh_binder_special_task
|
||||
__tracepoint_android_vh_binder_free_buf
|
||||
__tracepoint_android_vh_binder_buffer_release
|
||||
trace_print_array_seq
|
||||
trace_print_flags_seq
|
||||
trace_print_hex_seq
|
||||
@ -3714,4 +3723,6 @@
|
||||
xhci_ring_cmd_db
|
||||
xhci_ring_free
|
||||
xhci_trb_virt_to_dma
|
||||
xt_register_match
|
||||
xt_unregister_match
|
||||
zero_pfn
|
||||
|
@ -46,6 +46,7 @@
|
||||
__traceiter_android_vh_unuse_swap_page
|
||||
__traceiter_android_vh_waiting_for_page_migration
|
||||
__traceiter_android_vh_should_end_madvise
|
||||
__traceiter_android_vh_exit_check
|
||||
__traceiter_android_vh_bio_free
|
||||
__traceiter_android_rvh_internal_blk_mq_alloc_request
|
||||
__traceiter_android_vh_internal_blk_mq_free_request
|
||||
@ -101,6 +102,7 @@
|
||||
__tracepoint_android_vh_unuse_swap_page
|
||||
__tracepoint_android_vh_waiting_for_page_migration
|
||||
__tracepoint_android_vh_should_end_madvise
|
||||
__tracepoint_android_vh_exit_check
|
||||
__tracepoint_android_vh_bio_free
|
||||
__tracepoint_android_rvh_internal_blk_mq_alloc_request
|
||||
__tracepoint_android_vh_internal_blk_mq_free_request
|
||||
|
@ -437,7 +437,7 @@ iomuxc_lpsr: iomuxc-lpsr@302c0000 {
|
||||
};
|
||||
|
||||
gpt1: timer@302d0000 {
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
|
||||
reg = <0x302d0000 0x10000>;
|
||||
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
|
||||
@ -446,7 +446,7 @@ gpt1: timer@302d0000 {
|
||||
};
|
||||
|
||||
gpt2: timer@302e0000 {
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
|
||||
reg = <0x302e0000 0x10000>;
|
||||
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
|
||||
@ -456,7 +456,7 @@ gpt2: timer@302e0000 {
|
||||
};
|
||||
|
||||
gpt3: timer@302f0000 {
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
|
||||
reg = <0x302f0000 0x10000>;
|
||||
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
|
||||
@ -466,7 +466,7 @@ gpt3: timer@302f0000 {
|
||||
};
|
||||
|
||||
gpt4: timer@30300000 {
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
|
||||
compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
|
||||
reg = <0x30300000 0x10000>;
|
||||
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
|
||||
|
@ -765,6 +765,7 @@ &uart1 {
|
||||
&uart3 {
|
||||
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
|
||||
&omap4_pmx_core 0x17c>;
|
||||
overrun-throttle-ms = <500>;
|
||||
};
|
||||
|
||||
&uart4 {
|
||||
|
@ -82,14 +82,12 @@ cxo_board {
|
||||
};
|
||||
};
|
||||
|
||||
regulators {
|
||||
vsdcc_fixed: vsdcc-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "SDCC Power";
|
||||
regulator-min-microvolt = <2700000>;
|
||||
regulator-max-microvolt = <2700000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
vsdcc_fixed: vsdcc-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "SDCC Power";
|
||||
regulator-min-microvolt = <2700000>;
|
||||
regulator-max-microvolt = <2700000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
soc: soc {
|
||||
|
@ -10,10 +10,6 @@
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ARM_EXCEPTION_H */
|
||||
|
@ -16,6 +16,7 @@
|
||||
ENTRY(mmioset)
|
||||
ENTRY(memset)
|
||||
UNWIND( .fnstart )
|
||||
and r1, r1, #255 @ cast to unsigned char
|
||||
ands r3, r0, #3 @ 1 unaligned?
|
||||
mov ip, r0 @ preserve r0 as return value
|
||||
bne 6f @ 1
|
||||
|
@ -502,6 +502,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
|
||||
|
||||
name = devm_kasprintf(&pdev->dev,
|
||||
GFP_KERNEL, "mmdc%d", ret);
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto pmu_release_id;
|
||||
}
|
||||
|
||||
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
|
||||
pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
|
||||
@ -524,9 +528,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
|
||||
|
||||
pmu_register_err:
|
||||
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
|
||||
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
|
||||
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
|
||||
hrtimer_cancel(&pmu_mmdc->hrtimer);
|
||||
pmu_release_id:
|
||||
ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
|
||||
pmu_free:
|
||||
kfree(pmu_mmdc);
|
||||
return ret;
|
||||
|
@ -362,7 +362,8 @@ static int __init xen_guest_init(void)
|
||||
* for secondary CPUs as they are brought up.
|
||||
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
|
||||
*/
|
||||
xen_vcpu_info = alloc_percpu(struct vcpu_info);
|
||||
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
|
||||
1 << fls(sizeof(struct vcpu_info) - 1));
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1065,6 +1065,8 @@ choice
|
||||
config CPU_BIG_ENDIAN
|
||||
bool "Build big-endian kernel"
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 130000
|
||||
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
|
||||
depends on AS_IS_GNU || AS_VERSION >= 150000
|
||||
help
|
||||
Say Y if you plan on running a kernel with a big-endian userspace.
|
||||
|
||||
|
@ -69,7 +69,7 @@ red {
|
||||
};
|
||||
};
|
||||
|
||||
memory {
|
||||
memory@40000000 {
|
||||
reg = <0 0x40000000 0 0x40000000>;
|
||||
};
|
||||
|
||||
|
@ -55,7 +55,7 @@ wps {
|
||||
};
|
||||
};
|
||||
|
||||
memory {
|
||||
memory@40000000 {
|
||||
reg = <0 0x40000000 0 0x20000000>;
|
||||
};
|
||||
|
||||
|
@ -43,7 +43,7 @@ extcon_usb: extcon_iddig {
|
||||
id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
||||
usb_p1_vbus: regulator@0 {
|
||||
usb_p1_vbus: regulator-usb-p1 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
@ -52,7 +52,7 @@ usb_p1_vbus: regulator@0 {
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
usb_p0_vbus: regulator@1 {
|
||||
usb_p0_vbus: regulator-usb-p0 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
|
@ -30,7 +30,7 @@ reserved-memory {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
scp_mem_reserved: scp_mem_region {
|
||||
scp_mem_reserved: memory@50000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0 0x50000000 0 0x2900000>;
|
||||
no-map;
|
||||
|
@ -95,7 +95,7 @@ reserved_memory: reserved-memory {
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
scp_mem_reserved: scp_mem_region {
|
||||
scp_mem_reserved: memory@50000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0 0x50000000 0 0x2900000>;
|
||||
no-map;
|
||||
|
@ -129,12 +129,6 @@ scm {
|
||||
};
|
||||
};
|
||||
|
||||
tcsr_mutex: hwlock {
|
||||
compatible = "qcom,tcsr-mutex";
|
||||
syscon = <&tcsr_mutex_regs 0 0x80>;
|
||||
#hwlock-cells = <1>;
|
||||
};
|
||||
|
||||
pmuv8: pmu {
|
||||
compatible = "arm,cortex-a53-pmu";
|
||||
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
|
||||
@ -175,7 +169,7 @@ q6_region: memory@4ab00000 {
|
||||
smem {
|
||||
compatible = "qcom,smem";
|
||||
memory-region = <&smem_region>;
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
};
|
||||
|
||||
soc: soc {
|
||||
@ -242,9 +236,10 @@ gcc: gcc@1800000 {
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
tcsr_mutex_regs: syscon@1905000 {
|
||||
compatible = "syscon";
|
||||
reg = <0x0 0x01905000 0x0 0x8000>;
|
||||
tcsr_mutex: hwlock@1905000 {
|
||||
compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
|
||||
reg = <0x0 0x01905000 0x0 0x20000>;
|
||||
#hwlock-cells = <1>;
|
||||
};
|
||||
|
||||
tcsr_q6: syscon@1945000 {
|
||||
|
@ -1175,7 +1175,7 @@ apps_iommu: iommu@1ef0000 {
|
||||
#size-cells = <1>;
|
||||
#iommu-cells = <1>;
|
||||
compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
|
||||
ranges = <0 0x01e20000 0x40000>;
|
||||
ranges = <0 0x01e20000 0x20000>;
|
||||
reg = <0x01ef0000 0x3000>;
|
||||
clocks = <&gcc GCC_SMMU_CFG_CLK>,
|
||||
<&gcc GCC_APSS_TCU_CLK>;
|
||||
|
@ -564,6 +564,8 @@ &wifi {
|
||||
vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
|
||||
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
|
||||
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
|
||||
|
||||
qcom,snoc-host-cap-8bit-quirk;
|
||||
};
|
||||
|
||||
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
|
||||
|
@ -1022,7 +1022,9 @@ power-domain@RK3399_PD_VCODEC {
|
||||
power-domain@RK3399_PD_VDU {
|
||||
reg = <RK3399_PD_VDU>;
|
||||
clocks = <&cru ACLK_VDU>,
|
||||
<&cru HCLK_VDU>;
|
||||
<&cru HCLK_VDU>,
|
||||
<&cru SCLK_VDU_CA>,
|
||||
<&cru SCLK_VDU_CORE>;
|
||||
pm_qos = <&qos_video_m1_r>,
|
||||
<&qos_video_m1_w>;
|
||||
};
|
||||
@ -1276,7 +1278,7 @@ vpu_mmu: iommu@ff650800 {
|
||||
|
||||
vdec: video-codec@ff660000 {
|
||||
compatible = "rockchip,rk3399-vdec";
|
||||
reg = <0x0 0xff660000 0x0 0x400>;
|
||||
reg = <0x0 0xff660000 0x0 0x480>;
|
||||
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
|
||||
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
|
||||
|
@ -694,7 +694,6 @@ CONFIG_PANIC_TIMEOUT=-1
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_TRACE_MMIO_ACCESS=y
|
||||
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
|
||||
|
@ -761,6 +761,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
if (pte_hw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
/*
|
||||
* If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
|
||||
* dirtiness again.
|
||||
*/
|
||||
if (pte_sw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
@ -468,6 +468,7 @@ config MACH_LOONGSON2EF
|
||||
|
||||
config MACH_LOONGSON64
|
||||
bool "Loongson 64-bit family of machines"
|
||||
select ARCH_DMA_DEFAULT_COHERENT
|
||||
select ARCH_SPARSEMEM_ENABLE
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
@ -1379,6 +1380,7 @@ config CPU_LOONGSON64
|
||||
select CPU_SUPPORTS_MSA
|
||||
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select DMA_NONCOHERENT
|
||||
select WEAK_ORDERING
|
||||
select WEAK_REORDERING_BEYOND_LLSC
|
||||
select MIPS_ASID_BITS_VARIABLE
|
||||
|
@ -117,7 +117,8 @@ struct irq_source_routing_table {
|
||||
u64 pci_io_start_addr;
|
||||
u64 pci_io_end_addr;
|
||||
u64 pci_config_addr;
|
||||
u32 dma_mask_bits;
|
||||
u16 dma_mask_bits;
|
||||
u16 dma_noncoherent;
|
||||
} __packed;
|
||||
|
||||
struct interface_info {
|
||||
|
@ -667,7 +667,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
int srcu_idx, err;
|
||||
kvm_pfn_t pfn;
|
||||
pte_t *ptep, entry, old_pte;
|
||||
pte_t *ptep, entry;
|
||||
bool writeable;
|
||||
unsigned long prot_bits;
|
||||
unsigned long mmu_seq;
|
||||
@ -739,7 +739,6 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
entry = pfn_pte(pfn, __pgprot(prot_bits));
|
||||
|
||||
/* Write the PTE */
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, entry);
|
||||
|
||||
err = 0;
|
||||
|
@ -13,6 +13,8 @@
|
||||
* Copyright (C) 2009 Lemote Inc.
|
||||
* Author: Wu Zhangjin, wuzhangjin@gmail.com
|
||||
*/
|
||||
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <asm/bootinfo.h>
|
||||
@ -131,8 +133,14 @@ void __init prom_init_env(void)
|
||||
loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
|
||||
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
|
||||
if (loongson_sysconf.dma_mask_bits < 32 ||
|
||||
loongson_sysconf.dma_mask_bits > 64)
|
||||
loongson_sysconf.dma_mask_bits > 64) {
|
||||
loongson_sysconf.dma_mask_bits = 32;
|
||||
dma_default_coherent = true;
|
||||
} else {
|
||||
dma_default_coherent = !eirq_source->dma_noncoherent;
|
||||
}
|
||||
|
||||
pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
|
||||
|
||||
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
|
||||
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
|
||||
|
@ -140,6 +140,11 @@ static __init void reserve_pio_range(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Reserve vgabios if it comes from firmware */
|
||||
if (loongson_sysconf.vgabios_addr)
|
||||
memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
|
||||
SZ_256K);
|
||||
}
|
||||
|
||||
void __init arch_init_irq(void)
|
||||
|
@ -75,7 +75,6 @@
|
||||
|
||||
/* We now return you to your regularly scheduled HPUX. */
|
||||
|
||||
#define ENOSYM 215 /* symbol does not exist in executable */
|
||||
#define ENOTSOCK 216 /* Socket operation on non-socket */
|
||||
#define EDESTADDRREQ 217 /* Destination address required */
|
||||
#define EMSGSIZE 218 /* Message too long */
|
||||
@ -101,7 +100,6 @@
|
||||
#define ETIMEDOUT 238 /* Connection timed out */
|
||||
#define ECONNREFUSED 239 /* Connection refused */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EHOSTDOWN 241 /* Host is down */
|
||||
#define EHOSTUNREACH 242 /* No route to host */
|
||||
|
||||
|
@ -465,6 +465,7 @@ struct pdc_model { /* for PDC_MODEL */
|
||||
unsigned long arch_rev;
|
||||
unsigned long pot_key;
|
||||
unsigned long curr_key;
|
||||
unsigned long width; /* default of PSW_W bit (1=enabled) */
|
||||
};
|
||||
|
||||
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
|
||||
|
@ -497,13 +497,13 @@
|
||||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
@ -511,8 +511,7 @@
|
||||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
|
@ -69,9 +69,8 @@ $bss_loop:
|
||||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
#if defined(CONFIG_PA20)
|
||||
/* check for 64-bit capable CPU as required by current kernel */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
|
@ -69,9 +69,6 @@
|
||||
|
||||
#define _PTE_NONE_MASK 0
|
||||
|
||||
/* Until my rework is finished, 40x still needs atomic PTE updates */
|
||||
#define PTE_ATOMIC_UPDATES 1
|
||||
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define _PAGE_BASE (_PAGE_BASE_NC)
|
||||
|
||||
|
@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
|
||||
#define pte_wrprotect pte_wrprotect
|
||||
|
||||
static inline int pte_read(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
|
||||
}
|
||||
|
||||
#define pte_read pte_read
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return !(pte_val(pte) & _PAGE_RO);
|
||||
|
@ -216,7 +216,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
|
||||
{
|
||||
unsigned long old;
|
||||
|
||||
if (pte_young(*ptep))
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
|
||||
return (old & _PAGE_ACCESSED) != 0;
|
||||
|
@ -45,7 +45,9 @@ static inline int pte_write(pte_t pte)
|
||||
return pte_val(pte) & _PAGE_RW;
|
||||
}
|
||||
#endif
|
||||
#ifndef pte_read
|
||||
static inline int pte_read(pte_t pte) { return 1; }
|
||||
#endif
|
||||
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
|
||||
|
@ -23,6 +23,15 @@
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
#define __REST_1FPVSR(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
REST_FPR(n,base); \
|
||||
b 3f; \
|
||||
2: REST_VSR(n,c,base); \
|
||||
3:
|
||||
|
||||
#define __REST_32FPVSRS(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
2: SAVE_32VSRS(n,c,base); \
|
||||
3:
|
||||
#else
|
||||
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
|
||||
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
|
||||
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
|
||||
#endif
|
||||
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
|
||||
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
|
||||
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
|
||||
SAVE_32FPVSRS(0, R4, R3)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r3)
|
||||
REST_1FPVSR(0, R4, R3)
|
||||
blr
|
||||
EXPORT_SYMBOL(store_fp_state)
|
||||
|
||||
@ -132,4 +144,5 @@ _GLOBAL(save_fpu)
|
||||
2: SAVE_32FPVSRS(0, R4, R6)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r6)
|
||||
REST_1FPVSR(0, R4, R6)
|
||||
blr
|
||||
|
@ -906,6 +906,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
/* Parse memory topology */
|
||||
mem_topology_setup();
|
||||
/* Set max_mapnr before paging_init() */
|
||||
set_max_mapnr(max_pfn);
|
||||
|
||||
/*
|
||||
* Release secondary cpus out of their spinloops at 0x60 now that
|
||||
|
@ -36,6 +36,9 @@ _GLOBAL(ftrace_regs_caller)
|
||||
/* Save the original return address in A's stack frame */
|
||||
std r0,LRSAVE(r1)
|
||||
|
||||
/* Create a minimal stack frame for representing B */
|
||||
stdu r1, -STACK_FRAME_MIN_SIZE(r1)
|
||||
|
||||
/* Create our stack frame + pt_regs */
|
||||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
@ -52,7 +55,7 @@ _GLOBAL(ftrace_regs_caller)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
/* Save previous stack pointer (r1) */
|
||||
addi r8, r1, SWITCH_FRAME_SIZE
|
||||
addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
||||
std r8, GPR1(r1)
|
||||
|
||||
/* Load special regs for save below */
|
||||
@ -65,6 +68,8 @@ _GLOBAL(ftrace_regs_caller)
|
||||
mflr r7
|
||||
/* Save it as pt_regs->nip */
|
||||
std r7, _NIP(r1)
|
||||
/* Also save it in B's stackframe header for proper unwind */
|
||||
std r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
|
||||
/* Save the read LR in pt_regs->link */
|
||||
std r0, _LINK(r1)
|
||||
|
||||
@ -121,7 +126,7 @@ ftrace_regs_call:
|
||||
ld r2, 24(r1)
|
||||
|
||||
/* Pop our stack frame */
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
@ -145,7 +150,7 @@ ftrace_no_trace:
|
||||
mflr r3
|
||||
mtctr r3
|
||||
REST_GPR(3, r1)
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
@ -153,6 +158,9 @@ _GLOBAL(ftrace_caller)
|
||||
/* Save the original return address in A's stack frame */
|
||||
std r0, LRSAVE(r1)
|
||||
|
||||
/* Create a minimal stack frame for representing B */
|
||||
stdu r1, -STACK_FRAME_MIN_SIZE(r1)
|
||||
|
||||
/* Create our stack frame + pt_regs */
|
||||
stdu r1, -SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
@ -166,6 +174,7 @@ _GLOBAL(ftrace_caller)
|
||||
/* Get the _mcount() call site out of LR */
|
||||
mflr r7
|
||||
std r7, _NIP(r1)
|
||||
std r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save callee's TOC in the ABI compliant location */
|
||||
std r2, 24(r1)
|
||||
@ -200,7 +209,7 @@ ftrace_call:
|
||||
ld r2, 24(r1)
|
||||
|
||||
/* Pop our stack frame */
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
|
||||
|
||||
/* Reload original LR */
|
||||
ld r0, LRSAVE(r1)
|
||||
|
@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
|
||||
mfvscr v0
|
||||
li r4, VRSTATE_VSCR
|
||||
stvx v0, r4, r3
|
||||
lvx v0, 0, r3
|
||||
blr
|
||||
EXPORT_SYMBOL(store_vr_state)
|
||||
|
||||
@ -104,6 +105,7 @@ _GLOBAL(save_altivec)
|
||||
mfvscr v0
|
||||
li r4,VRSTATE_VSCR
|
||||
stvx v0,r4,r7
|
||||
lvx v0,0,r7
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
@ -293,7 +293,6 @@ void __init mem_init(void)
|
||||
#endif
|
||||
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
set_max_mapnr(max_pfn);
|
||||
|
||||
kasan_late_init();
|
||||
|
||||
|
@ -1289,8 +1289,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
/*
|
||||
* Disable instruction sampling if it was enabled
|
||||
*/
|
||||
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
|
||||
val &= ~MMCRA_SAMPLE_ENABLE;
|
||||
val &= ~MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
/* Disable BHRB via mmcra (BHRBRD) for p10 */
|
||||
if (ppmu->flags & PPMU_ARCH_31)
|
||||
@ -1301,7 +1300,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
* instruction sampling or BHRB.
|
||||
*/
|
||||
if (val != mmcra) {
|
||||
mtspr(SPRN_MMCRA, mmcra);
|
||||
mtspr(SPRN_MMCRA, val);
|
||||
mb();
|
||||
isync();
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ static int trace_imc_mem_size;
|
||||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
|
@ -523,8 +523,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
|
||||
|
||||
if (cmd) {
|
||||
rc = init_cpu_associativity();
|
||||
if (rc)
|
||||
if (rc) {
|
||||
destroy_cpu_associativity();
|
||||
goto out;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
|
||||
|
@ -779,7 +779,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
|
||||
if (out_qpage)
|
||||
*out_qpage = be64_to_cpu(qpage);
|
||||
if (out_qsize)
|
||||
*out_qsize = be32_to_cpu(qsize);
|
||||
*out_qsize = be64_to_cpu(qsize);
|
||||
if (out_qeoi_page)
|
||||
*out_qeoi_page = be64_to_cpu(qeoi_page);
|
||||
if (out_escalate_irq)
|
||||
|
@ -344,16 +344,14 @@ int handle_misaligned_store(struct pt_regs *regs)
|
||||
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
|
||||
len = 8;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
|
||||
len = 8;
|
||||
val.data_ulong = GET_RS2C(insn, regs);
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
|
||||
len = 4;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
|
||||
len = 4;
|
||||
val.data_ulong = GET_RS2C(insn, regs);
|
||||
} else {
|
||||
|
@ -201,7 +201,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
||||
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
|
||||
/* Set return value. */
|
||||
if (!is_tail_call)
|
||||
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
|
||||
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
|
||||
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
|
||||
is_tail_call ? 4 : 0, /* skip TCC init */
|
||||
ctx);
|
||||
@ -394,12 +394,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
|
||||
*rd = RV_REG_T2;
|
||||
}
|
||||
|
||||
static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
|
||||
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
s64 upper, lower;
|
||||
|
||||
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
|
||||
if (rvoff && fixed_addr && is_21b_int(rvoff)) {
|
||||
emit(rv_jal(rd, rvoff >> 1), ctx);
|
||||
return 0;
|
||||
} else if (in_auipc_jalr_range(rvoff)) {
|
||||
@ -420,24 +420,17 @@ static bool is_signed_bpf_cond(u8 cond)
|
||||
cond == BPF_JSGE || cond == BPF_JSLE;
|
||||
}
|
||||
|
||||
static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
|
||||
static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
|
||||
{
|
||||
s64 off = 0;
|
||||
u64 ip;
|
||||
u8 rd;
|
||||
int ret;
|
||||
|
||||
if (addr && ctx->insns) {
|
||||
ip = (u64)(long)(ctx->insns + ctx->ninsns);
|
||||
off = addr - ip;
|
||||
}
|
||||
|
||||
ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
|
||||
emit_mv(rd, RV_REG_A0, ctx);
|
||||
return 0;
|
||||
return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
|
||||
}
|
||||
|
||||
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
@ -731,7 +724,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
rvoff = rv_offset(i, off, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
@ -850,17 +843,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
/* function call */
|
||||
case BPF_JMP | BPF_CALL:
|
||||
{
|
||||
bool fixed;
|
||||
bool fixed_addr;
|
||||
u64 addr;
|
||||
|
||||
mark_call(ctx);
|
||||
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
|
||||
&fixed);
|
||||
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
|
||||
&addr, &fixed_addr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = emit_call(fixed, addr, ctx);
|
||||
|
||||
ret = emit_call(addr, fixed_addr, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (insn->src_reg != BPF_PSEUDO_CALL)
|
||||
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
|
||||
break;
|
||||
}
|
||||
/* tail call */
|
||||
@ -875,7 +872,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
break;
|
||||
|
||||
rvoff = epilogue_offset(ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*pmd) || pmd_large(*pmd))
|
||||
continue;
|
||||
page = virt_to_page(pmd_val(*pmd));
|
||||
page = phys_to_page(pmd_val(*pmd));
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
@ -130,8 +130,8 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
|
||||
if (pud_none(*pud) || pud_large(*pud))
|
||||
continue;
|
||||
if (!pud_folded(*pud)) {
|
||||
page = virt_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pmd(pud, addr, next);
|
||||
@ -151,8 +151,8 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
|
||||
if (p4d_none(*p4d))
|
||||
continue;
|
||||
if (!p4d_folded(*p4d)) {
|
||||
page = virt_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pud(p4d, addr, next);
|
||||
@ -173,8 +173,8 @@ static void mark_kernel_pgd(void)
|
||||
if (pgd_none(*pgd))
|
||||
continue;
|
||||
if (!pgd_folded(*pgd)) {
|
||||
page = virt_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_p4d(pgd, addr, next);
|
||||
|
@ -717,7 +717,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
|
||||
pte_clear(mm, addr, ptep);
|
||||
}
|
||||
if (reset)
|
||||
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
|
||||
pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -541,6 +541,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
s->dma_length = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
|
||||
{
|
||||
size_t n = BITS_TO_LONGS(bits);
|
||||
size_t bytes;
|
||||
|
||||
if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
|
||||
return NULL;
|
||||
|
||||
return vzalloc(bytes);
|
||||
}
|
||||
|
||||
int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
{
|
||||
@ -577,13 +588,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
zdev->end_dma - zdev->start_dma + 1);
|
||||
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
|
||||
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
|
||||
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
|
||||
zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
|
||||
if (!zdev->iommu_bitmap) {
|
||||
rc = -ENOMEM;
|
||||
goto free_dma_table;
|
||||
}
|
||||
if (!s390_iommu_strict) {
|
||||
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
|
||||
zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
|
||||
if (!zdev->lazy_bitmap) {
|
||||
rc = -ENOMEM;
|
||||
goto free_bitmap;
|
||||
|
@ -25,6 +25,17 @@ config STACK_DEBUG
|
||||
every function call and will therefore incur a major
|
||||
performance hit. Most users should say N.
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool "Early printk"
|
||||
depends on SH_STANDARD_BIOS
|
||||
help
|
||||
Say Y here to redirect kernel printk messages to the serial port
|
||||
used by the SH-IPL bootloader, starting very early in the boot
|
||||
process and ending when the kernel's serial console is initialised.
|
||||
This option is only useful while porting the kernel to a new machine,
|
||||
when the kernel may crash or hang before the serial console is
|
||||
initialised. If unsure, say N.
|
||||
|
||||
config 4KSTACKS
|
||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||
depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
|
||||
|
@ -463,5 +463,5 @@ ccslow: cmp %g1, 0
|
||||
* we only bother with faults on loads... */
|
||||
|
||||
cc_fault:
|
||||
ret
|
||||
retl
|
||||
clr %o0
|
||||
|
@ -1360,20 +1360,10 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
-BIT_ULL(vaddr_bits - 1);
|
||||
}
|
||||
@ -1381,7 +1371,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
return __is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
BIT_ULL(vaddr_bits - 1) - 1;
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ struct legacy_pic {
|
||||
void (*make_irq)(unsigned int irq);
|
||||
};
|
||||
|
||||
void legacy_pic_pcat_compat(void);
|
||||
|
||||
extern struct legacy_pic *legacy_pic;
|
||||
extern struct legacy_pic null_legacy_pic;
|
||||
|
||||
|
@ -505,6 +505,7 @@
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
#define MSR_AMD64_TW_CFG 0xc0011023
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
@ -541,12 +542,17 @@
|
||||
|
||||
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
/* Zen4 */
|
||||
#define MSR_ZEN4_BP_CFG 0xc001102e
|
||||
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
|
||||
|
||||
/* Zen 2 */
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
/* Fam 16h MSRs */
|
||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||
|
@ -12,13 +12,6 @@
|
||||
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
/*
|
||||
* Too small node sizes may confuse the VM badly. Usually they
|
||||
* result from BIOS bugs. So dont recognize nodes as standalone
|
||||
* NUMA entities that have less than this amount of RAM listed:
|
||||
*/
|
||||
#define NODE_MIN_SIZE (4*1024*1024)
|
||||
|
||||
extern int numa_off;
|
||||
|
||||
/*
|
||||
|
@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
extern bool __virt_addr_valid(unsigned long kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
|
||||
|
||||
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return __canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
@ -108,27 +108,16 @@ extern unsigned long _brk_end;
|
||||
void *extend_brk(size_t size, size_t align);
|
||||
|
||||
/*
|
||||
* Reserve space in the brk section. The name must be unique within
|
||||
* the file, and somewhat descriptive. The size is in bytes. Must be
|
||||
* used at file scope.
|
||||
* Reserve space in the .brk section, which is a block of memory from which the
|
||||
* caller is allowed to allocate very early (before even memblock is available)
|
||||
* by calling extend_brk(). All allocated memory will be eventually converted
|
||||
* to memblock. Any leftover unallocated memory will be freed.
|
||||
*
|
||||
* (This uses a temp function to wrap the asm so we can pass it the
|
||||
* size parameter; otherwise we wouldn't be able to. We can't use a
|
||||
* "section" attribute on a normal variable because it always ends up
|
||||
* being @progbits, which ends up allocating space in the vmlinux
|
||||
* executable.)
|
||||
* The size is in bytes.
|
||||
*/
|
||||
#define RESERVE_BRK(name,sz) \
|
||||
static void __section(".discard.text") __used notrace \
|
||||
__brk_reservation_fn_##name##__(void) { \
|
||||
asm volatile ( \
|
||||
".pushsection .brk_reservation,\"aw\",@nobits;" \
|
||||
".brk." #name ":" \
|
||||
" 1:.skip %c0;" \
|
||||
" .size .brk." #name ", . - 1b;" \
|
||||
" .popsection" \
|
||||
: : "i" (sz)); \
|
||||
}
|
||||
#define RESERVE_BRK(name, size) \
|
||||
__section(".bss..brk") __aligned(1) __used \
|
||||
static char __brk_##name[size]
|
||||
|
||||
/* Helper for reserving space for arrays of things */
|
||||
#define RESERVE_BRK_ARRAY(type, name, entries) \
|
||||
@ -146,12 +135,19 @@ asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
|
||||
|
||||
#endif /* __i386__ */
|
||||
#endif /* _SETUP */
|
||||
#else
|
||||
#define RESERVE_BRK(name,sz) \
|
||||
.pushsection .brk_reservation,"aw",@nobits; \
|
||||
.brk.name: \
|
||||
1: .skip sz; \
|
||||
.size .brk.name,.-1b; \
|
||||
|
||||
#else /* __ASSEMBLY */
|
||||
|
||||
.macro __RESERVE_BRK name, size
|
||||
.pushsection .bss..brk, "aw"
|
||||
SYM_DATA_START(__brk_\name)
|
||||
.skip \size
|
||||
SYM_DATA_END(__brk_\name)
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
#define RESERVE_BRK(name, size) __RESERVE_BRK name, size
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_SETUP_H */
|
||||
|
@ -446,7 +446,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
|
||||
#define copy_mc_to_kernel copy_mc_to_kernel
|
||||
|
||||
unsigned long __must_check
|
||||
copy_mc_to_user(void *to, const void *from, unsigned len);
|
||||
copy_mc_to_user(void __user *to, const void *from, unsigned len);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -141,6 +141,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
|
||||
madt->address);
|
||||
}
|
||||
|
||||
if (madt->flags & ACPI_MADT_PCAT_COMPAT)
|
||||
legacy_pic_pcat_compat();
|
||||
|
||||
default_acpi_madt_oem_check(madt->header.oem_id,
|
||||
madt->header.oem_table_id);
|
||||
|
||||
|
@ -424,6 +424,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
||||
u8 insn_buff[MAX_PATCH_LEN];
|
||||
|
||||
DPRINTK("alt table %px, -> %px", start, end);
|
||||
|
||||
/*
|
||||
* In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
|
||||
* cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
|
||||
* During the process, KASAN becomes confused seeing partial LA57
|
||||
* conversion and triggers a false-positive out-of-bound report.
|
||||
*
|
||||
* Disable KASAN until the patching is complete.
|
||||
*/
|
||||
kasan_disable_current();
|
||||
|
||||
/*
|
||||
* The scan order should be from start to end. A later scanned
|
||||
* alternative code can overwrite previously scanned alternative code.
|
||||
@ -491,6 +502,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
||||
next:
|
||||
optimize_nops(instr, a->instrlen);
|
||||
}
|
||||
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION)
|
||||
|
@ -81,6 +81,10 @@ static const int amd_div0[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
|
||||
|
||||
static const int amd_erratum_1485[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
|
||||
AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
|
||||
|
||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
{
|
||||
int osvw_id = *erratum++;
|
||||
@ -1178,6 +1182,10 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
|
||||
cpu_has_amd_erratum(c, amd_erratum_1485))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -1320,6 +1328,9 @@ static void zenbleed_check_cpu(void *unused)
|
||||
|
||||
void amd_check_microcode(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
return;
|
||||
|
||||
on_each_cpu(zenbleed_check_cpu, NULL, 1);
|
||||
}
|
||||
|
||||
|
@ -2407,7 +2407,7 @@ static void __init srso_select_mitigation(void)
|
||||
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
|
||||
|
||||
pred_cmd:
|
||||
if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
|
||||
if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
|
||||
boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
}
|
||||
|
@ -89,8 +89,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
/* Socket ID is ApicId[6] for these processors. */
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
/*
|
||||
* Socket ID is ApicId[6] for the processors with model <= 0x3
|
||||
* when running on host.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
|
@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
|
||||
* while the kernel still uses a direct mapping.
|
||||
*/
|
||||
static struct desc_ptr startup_gdt_descr = {
|
||||
.size = sizeof(startup_gdt),
|
||||
.size = sizeof(startup_gdt)-1,
|
||||
.address = 0,
|
||||
};
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
*/
|
||||
static void init_8259A(int auto_eoi);
|
||||
|
||||
static bool pcat_compat __ro_after_init;
|
||||
static int i8259A_auto_eoi;
|
||||
DEFINE_RAW_SPINLOCK(i8259A_lock);
|
||||
|
||||
@ -301,15 +302,32 @@ static void unmask_8259A(void)
|
||||
|
||||
static int probe_8259A(void)
|
||||
{
|
||||
unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
|
||||
unsigned long flags;
|
||||
unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
|
||||
unsigned char new_val;
|
||||
|
||||
/*
|
||||
* Check to see if we have a PIC.
|
||||
* Mask all except the cascade and read
|
||||
* back the value we just wrote. If we don't
|
||||
* have a PIC, we will read 0xff as opposed to the
|
||||
* value we wrote.
|
||||
* If MADT has the PCAT_COMPAT flag set, then do not bother probing
|
||||
* for the PIC. Some BIOSes leave the PIC uninitialized and probing
|
||||
* fails.
|
||||
*
|
||||
* Right now this causes problems as quite some code depends on
|
||||
* nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
|
||||
* when the system has an IO/APIC because then PIC is not required
|
||||
* at all, except for really old machines where the timer interrupt
|
||||
* must be routed through the PIC. So just pretend that the PIC is
|
||||
* there and let legacy_pic->init() initialize it for nothing.
|
||||
*
|
||||
* Alternatively this could just try to initialize the PIC and
|
||||
* repeat the probe, but for cases where there is no PIC that's
|
||||
* just pointless.
|
||||
*/
|
||||
if (pcat_compat)
|
||||
return nr_legacy_irqs();
|
||||
|
||||
/*
|
||||
* Check to see if we have a PIC. Mask all except the cascade and
|
||||
* read back the value we just wrote. If we don't have a PIC, we
|
||||
* will read 0xff as opposed to the value we wrote.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&i8259A_lock, flags);
|
||||
|
||||
@ -431,5 +449,9 @@ static int __init i8259A_init_ops(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(i8259A_init_ops);
|
||||
|
||||
void __init legacy_pic_pcat_compat(void)
|
||||
{
|
||||
pcat_compat = true;
|
||||
}
|
||||
|
@ -64,11 +64,6 @@ RESERVE_BRK(dmi_alloc, 65536);
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Range of the BSS area. The size of the BSS area is determined
|
||||
* at link time, with RESERVE_BRK*() facility reserving additional
|
||||
* chunks.
|
||||
*/
|
||||
unsigned long _brk_start = (unsigned long)__brk_base;
|
||||
unsigned long _brk_end = (unsigned long)__brk_base;
|
||||
|
||||
|
@ -997,6 +997,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb,
|
||||
enum es_result ret;
|
||||
long *reg_data;
|
||||
|
||||
if (user_mode(ctxt->regs))
|
||||
return ES_UNSUPPORTED;
|
||||
|
||||
switch (insn->opcode.bytes[0]) {
|
||||
/* MMIO Write */
|
||||
case 0x88:
|
||||
|
@ -414,7 +414,7 @@ SECTIONS
|
||||
.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
|
||||
__brk_base = .;
|
||||
. += 64 * 1024; /* 64k alignment slop space */
|
||||
*(.brk_reservation) /* areas brk users have reserved */
|
||||
*(.bss..brk) /* areas brk users have reserved */
|
||||
__brk_limit = .;
|
||||
}
|
||||
|
||||
|
@ -688,7 +688,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
|
||||
static inline bool emul_is_noncanonical_address(u64 la,
|
||||
struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
|
||||
return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -738,7 +738,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
case X86EMUL_MODE_PROT64:
|
||||
*linear = la;
|
||||
va_bits = ctxt_virt_addr_bits(ctxt);
|
||||
if (get_canonical(la, va_bits) != la)
|
||||
if (!__is_canonical_address(la, va_bits))
|
||||
goto bad;
|
||||
|
||||
*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
|
||||
|
@ -674,10 +674,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
|
||||
|
||||
stimer_cleanup(stimer);
|
||||
stimer->count = count;
|
||||
if (stimer->count == 0)
|
||||
stimer->config.enable = 0;
|
||||
else if (stimer->config.auto_enable)
|
||||
stimer->config.enable = 1;
|
||||
if (!host) {
|
||||
if (stimer->count == 0)
|
||||
stimer->config.enable = 0;
|
||||
else if (stimer->config.auto_enable)
|
||||
stimer->config.enable = 1;
|
||||
}
|
||||
|
||||
if (stimer->config.enable)
|
||||
stimer_mark_pending(stimer, false);
|
||||
|
@ -2397,13 +2397,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
|
||||
{
|
||||
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
|
||||
int vector, mode, trig_mode;
|
||||
int r;
|
||||
|
||||
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
|
||||
vector = reg & APIC_VECTOR_MASK;
|
||||
mode = reg & APIC_MODE_MASK;
|
||||
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
|
||||
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
|
||||
NULL);
|
||||
|
||||
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
|
||||
if (r && lvt_type == APIC_LVTPC)
|
||||
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1640,7 +1640,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
|
||||
* value, and that something deterministic happens if the guest
|
||||
* invokes 64-bit SYSENTER.
|
||||
*/
|
||||
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
|
||||
data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
|
||||
}
|
||||
|
||||
msr.data = data;
|
||||
@ -3132,6 +3132,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_AMD64_PATCH_LOADER:
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
break;
|
||||
|
||||
@ -3485,6 +3486,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_IA32_PERF_CTL:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
/*
|
||||
* Intel Sandy Bridge CPUs must support the RAPL (running average power
|
||||
|
@ -156,14 +156,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
|
||||
}
|
||||
|
||||
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
|
||||
{
|
||||
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
|
||||
return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
|
||||
}
|
||||
|
||||
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
||||
|
@ -74,23 +74,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
|
||||
|
||||
unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
|
||||
unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (copy_mc_fragile_enabled) {
|
||||
__uaccess_begin();
|
||||
ret = copy_mc_fragile(dst, src, len);
|
||||
ret = copy_mc_fragile((__force void *)dst, src, len);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ERMS)) {
|
||||
__uaccess_begin();
|
||||
ret = copy_mc_enhanced_fast_string(dst, src, len);
|
||||
ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
return copy_user_generic(dst, src, len);
|
||||
return copy_user_generic((__force void *)dst, src, len);
|
||||
}
|
||||
|
@ -4,22 +4,26 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)unsafe_src;
|
||||
|
||||
/*
|
||||
* Range covering the highest possible canonical userspace address
|
||||
* as well as non-canonical address range. For the canonical range
|
||||
* we also need to include the userspace guard page.
|
||||
* Do not allow userspace addresses. This disallows
|
||||
* normal userspace and the userspace guard page:
|
||||
*/
|
||||
return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
|
||||
canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
|
||||
if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Allow everything during early boot before 'x86_virt_bits'
|
||||
* is initialized. Needed for instruction decoding in early
|
||||
* exception handlers.
|
||||
*/
|
||||
if (!boot_cpu_data.x86_virt_bits)
|
||||
return true;
|
||||
|
||||
return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
|
||||
}
|
||||
#else
|
||||
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
|
||||
|
@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
|
||||
if (start >= end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Don't confuse VM with a node that doesn't have the
|
||||
* minimum amount of memory:
|
||||
*/
|
||||
if (end && (end - start) < NODE_MIN_SIZE)
|
||||
continue;
|
||||
|
||||
alloc_node_data(nid);
|
||||
}
|
||||
|
||||
|
@ -1409,6 +1409,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
|
||||
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
|
||||
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* Update has_rules[] flags for the updated tg's subtree. A tg is
|
||||
* considered to have rules if either the tg itself or any of its
|
||||
@ -1436,6 +1437,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
|
||||
this_tg->latency_target = max(this_tg->latency_target,
|
||||
parent_tg->latency_target);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* We're already holding queue_lock and know @tg is valid. Let's
|
||||
|
@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
||||
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
||||
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -156,8 +156,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
|
||||
return 0;
|
||||
|
||||
len = snprintf(modalias, size, "acpi:");
|
||||
if (len <= 0)
|
||||
return len;
|
||||
if (len >= size)
|
||||
return -ENOMEM;
|
||||
|
||||
size -= len;
|
||||
|
||||
@ -210,8 +210,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
|
||||
len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
|
||||
ACPI_FREE(buf.pointer);
|
||||
|
||||
if (len <= 0)
|
||||
return len;
|
||||
if (len >= size)
|
||||
return -ENOMEM;
|
||||
|
||||
size -= len;
|
||||
|
||||
of_compatible = acpi_dev->data.of_compatible;
|
||||
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
|
||||
|
@ -52,6 +52,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
|
||||
int polarity)
|
||||
{
|
||||
struct irq_fwspec fwspec;
|
||||
unsigned int irq;
|
||||
|
||||
if (WARN_ON(!acpi_gsi_domain_id)) {
|
||||
pr_warn("GSI: No registered irqchip, giving up\n");
|
||||
@ -63,7 +64,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
|
||||
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
|
||||
fwspec.param_count = 2;
|
||||
|
||||
return irq_create_fwspec_mapping(&fwspec);
|
||||
irq = irq_create_fwspec_mapping(&fwspec);
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
return irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_register_gsi);
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
|
||||
@ -380,21 +381,136 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type);
|
||||
|
||||
static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
|
||||
static const struct dmi_system_id medion_laptop[] = {
|
||||
{
|
||||
.ident = "MEDION P15651",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "M15T"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id asus_laptop[] = {
|
||||
{
|
||||
.ident = "Asus Vivobook K3402ZA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus Vivobook K3502ZA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus Vivobook S5402ZA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus Vivobook S5602ZA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B1402CBA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B1502CBA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2402CBA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2502",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
struct irq_override_cmp {
|
||||
const struct dmi_system_id *system;
|
||||
unsigned char irq;
|
||||
unsigned char triggering;
|
||||
unsigned char polarity;
|
||||
unsigned char shareable;
|
||||
};
|
||||
|
||||
static const struct irq_override_cmp skip_override_table[] = {
|
||||
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
|
||||
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
|
||||
};
|
||||
|
||||
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
|
||||
u8 shareable)
|
||||
{
|
||||
res->start = gsi;
|
||||
res->end = gsi;
|
||||
res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
|
||||
const struct irq_override_cmp *entry = &skip_override_table[i];
|
||||
|
||||
if (dmi_check_system(entry->system) &&
|
||||
entry->irq == gsi &&
|
||||
entry->triggering == triggering &&
|
||||
entry->polarity == polarity &&
|
||||
entry->shareable == shareable)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
||||
u8 triggering, u8 polarity, u8 shareable,
|
||||
bool legacy)
|
||||
bool check_override)
|
||||
{
|
||||
int irq, p, t;
|
||||
|
||||
if (!valid_IRQ(gsi)) {
|
||||
acpi_dev_irqresource_disabled(res, gsi);
|
||||
irqresource_disabled(res, gsi);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -408,7 +524,9 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
||||
* using extended IRQ descriptors we take the IRQ configuration
|
||||
* from _CRS directly.
|
||||
*/
|
||||
if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
|
||||
if (check_override &&
|
||||
acpi_dev_irq_override(gsi, triggering, polarity, shareable) &&
|
||||
!acpi_get_override_irq(gsi, &t, &p)) {
|
||||
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
|
||||
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
|
||||
|
||||
@ -426,7 +544,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
||||
res->start = irq;
|
||||
res->end = irq;
|
||||
} else {
|
||||
acpi_dev_irqresource_disabled(res, gsi);
|
||||
irqresource_disabled(res, gsi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,7 +581,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
||||
*/
|
||||
irq = &ares->data.irq;
|
||||
if (index >= irq->interrupt_count) {
|
||||
acpi_dev_irqresource_disabled(res, 0);
|
||||
irqresource_disabled(res, 0);
|
||||
return false;
|
||||
}
|
||||
acpi_dev_get_irqresource(res, irq->interrupts[index],
|
||||
@ -473,7 +591,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
||||
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
|
||||
ext_irq = &ares->data.extended_irq;
|
||||
if (index >= ext_irq->interrupt_count) {
|
||||
acpi_dev_irqresource_disabled(res, 0);
|
||||
irqresource_disabled(res, 0);
|
||||
return false;
|
||||
}
|
||||
if (is_gsi(ext_irq))
|
||||
@ -481,7 +599,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
||||
ext_irq->triggering, ext_irq->polarity,
|
||||
ext_irq->shareable, false);
|
||||
else
|
||||
acpi_dev_irqresource_disabled(res, 0);
|
||||
irqresource_disabled(res, 0);
|
||||
break;
|
||||
default:
|
||||
res->flags = 0;
|
||||
|
@ -3830,12 +3830,14 @@ binder_free_buf(struct binder_proc *proc,
|
||||
struct binder_buffer *buffer, bool is_failure)
|
||||
{
|
||||
bool enqueue_task = true;
|
||||
bool has_transaction = false;
|
||||
|
||||
trace_android_vh_binder_free_buf(proc, thread, buffer);
|
||||
binder_inner_proc_lock(proc);
|
||||
if (buffer->transaction) {
|
||||
buffer->transaction->buffer = NULL;
|
||||
buffer->transaction = NULL;
|
||||
has_transaction = true;
|
||||
}
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (buffer->async_transaction && buffer->target_node) {
|
||||
@ -3859,6 +3861,8 @@ binder_free_buf(struct binder_proc *proc,
|
||||
}
|
||||
binder_node_inner_unlock(buf_node);
|
||||
}
|
||||
trace_android_vh_binder_buffer_release(proc, thread, buffer,
|
||||
has_transaction);
|
||||
trace_binder_transaction_buffer_release(buffer);
|
||||
binder_release_entire_buffer(proc, thread, buffer, is_failure);
|
||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||
|
@ -291,6 +291,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_rw);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_before_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_after_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_est_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_adjust_alloc_flags);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool);
|
||||
@ -506,6 +507,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_buffer_release);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_perf_huristic_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command_post_change);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_abort_success_ctrl);
|
||||
@ -513,6 +515,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_rsp_check_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_handler);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_check_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_print_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bio_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_internal_blk_mq_alloc_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_internal_blk_mq_free_request);
|
||||
|
@ -2224,7 +2224,7 @@ static void ata_eh_link_report(struct ata_link *link)
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
struct ata_queued_cmd *qc;
|
||||
const char *frozen, *desc;
|
||||
char tries_buf[6] = "";
|
||||
char tries_buf[16] = "";
|
||||
int tag, nr_failed = 0;
|
||||
|
||||
if (ehc->i.flags & ATA_EHI_QUIET)
|
||||
|
@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
||||
if (pnp_port_valid(idev, 1)) {
|
||||
ctl_addr = devm_ioport_map(&idev->dev,
|
||||
pnp_port_start(idev, 1), 1);
|
||||
if (!ctl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
ap->ioaddr.altstatus_addr = ctl_addr;
|
||||
ap->ioaddr.ctl_addr = ctl_addr;
|
||||
ap->ops = &isapnp_port_ops;
|
||||
|
@ -2290,19 +2290,21 @@ static int get_esi(struct atm_dev *dev)
|
||||
static int reset_sar(struct atm_dev *dev)
|
||||
{
|
||||
IADEV *iadev;
|
||||
int i, error = 1;
|
||||
int i, error;
|
||||
unsigned int pci[64];
|
||||
|
||||
iadev = INPH_IA_DEV(dev);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_read_config_dword(iadev->pci,
|
||||
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_write_config_dword(iadev->pci,
|
||||
i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
udelay(5);
|
||||
return 0;
|
||||
}
|
||||
|
@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
|
||||
struct sk_buff *skb;
|
||||
unsigned int len;
|
||||
|
||||
spin_lock(&card->cli_queue_lock);
|
||||
spin_lock_bh(&card->cli_queue_lock);
|
||||
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
|
||||
spin_unlock(&card->cli_queue_lock);
|
||||
spin_unlock_bh(&card->cli_queue_lock);
|
||||
if(skb == NULL)
|
||||
return sprintf(buf, "No data.\n");
|
||||
|
||||
@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
|
||||
struct pkt_hdr *header;
|
||||
|
||||
/* Remove any yet-to-be-transmitted packets from the pending queue */
|
||||
spin_lock(&card->tx_queue_lock);
|
||||
spin_lock_bh(&card->tx_queue_lock);
|
||||
skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
|
||||
if (SKB_CB(skb)->vcc == vcc) {
|
||||
skb_unlink(skb, &card->tx_queue[port]);
|
||||
solos_pop(vcc, skb);
|
||||
}
|
||||
}
|
||||
spin_unlock(&card->tx_queue_lock);
|
||||
spin_unlock_bh(&card->tx_queue_lock);
|
||||
|
||||
skb = alloc_skb(sizeof(*header), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
|
@ -1187,8 +1187,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
||||
else if (drv->remove)
|
||||
drv->remove(dev);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
|
||||
devres_release_all(dev);
|
||||
arch_teardown_dma_ops(dev);
|
||||
kfree(dev->dma_range_map);
|
||||
@ -1200,6 +1198,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
||||
pm_runtime_reinit(dev);
|
||||
dev_pm_set_driver_flags(dev, 0);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
|
||||
klist_remove(&dev->p->knode_driver);
|
||||
device_pm_check_callbacks(dev);
|
||||
if (dev->bus)
|
||||
|
@ -367,6 +367,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
|
||||
devcd->devcd_dev.class = &devcd_class;
|
||||
|
||||
mutex_lock(&devcd->mutex);
|
||||
dev_set_uevent_suppress(&devcd->devcd_dev, true);
|
||||
if (device_add(&devcd->devcd_dev))
|
||||
goto put_device;
|
||||
|
||||
@ -378,6 +379,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
|
||||
"devcoredump"))
|
||||
/* nothing - symlink will be missing */;
|
||||
|
||||
dev_set_uevent_suppress(&devcd->devcd_dev, false);
|
||||
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
|
||||
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
|
||||
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
|
||||
mutex_unlock(&devcd->mutex);
|
||||
|
@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
|
||||
name = map->dev->driver->name;
|
||||
|
||||
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
|
||||
if (ret < 0) {
|
||||
if (ret >= PAGE_SIZE) {
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1511,7 +1511,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
|
||||
|
||||
/* If the user didn't specify a name match any */
|
||||
if (data)
|
||||
return !strcmp((*r)->name, data);
|
||||
return (*r)->name && !strcmp((*r)->name, data);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
@ -1643,17 +1643,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
}
|
||||
|
||||
if (!map->cache_bypass && map->format.parse_val) {
|
||||
unsigned int ival;
|
||||
unsigned int ival, offset;
|
||||
int val_bytes = map->format.val_bytes;
|
||||
for (i = 0; i < val_len / val_bytes; i++) {
|
||||
ival = map->format.parse_val(val + (i * val_bytes));
|
||||
ret = regcache_write(map,
|
||||
reg + regmap_get_offset(map, i),
|
||||
ival);
|
||||
|
||||
/* Cache the last written value for noinc writes */
|
||||
i = noinc ? val_len - val_bytes : 0;
|
||||
for (; i < val_len; i += val_bytes) {
|
||||
ival = map->format.parse_val(val + i);
|
||||
offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
|
||||
ret = regcache_write(map, reg + offset, ival);
|
||||
if (ret) {
|
||||
dev_err(map->dev,
|
||||
"Error in caching of register: %x ret: %d\n",
|
||||
reg + regmap_get_offset(map, i), ret);
|
||||
reg + offset, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -415,6 +415,18 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Realtek 8852BE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Realtek Bluetooth devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
|
||||
.driver_info = BTUSB_REALTEK },
|
||||
@ -3095,6 +3107,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
if (data->evt_skb == NULL)
|
||||
goto err_free_wc;
|
||||
|
||||
/* Parse and handle the return WMT event */
|
||||
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
|
||||
if (wmt_evt->whdr.op != hdr->op) {
|
||||
|
@ -67,7 +67,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
struct vhci_data *data = hci_get_drvdata(hdev);
|
||||
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
mutex_lock(&data->open_mutex);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
mutex_unlock(&data->open_mutex);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
|
@ -58,7 +58,8 @@ struct amd_geode_priv {
|
||||
|
||||
static int geode_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
void __iomem *mem = (void __iomem *)rng->priv;
|
||||
struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
|
||||
void __iomem *mem = priv->membase;
|
||||
|
||||
*data = readl(mem + GEODE_RNG_DATA_REG);
|
||||
|
||||
@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
||||
static int geode_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
void __iomem *mem = (void __iomem *)rng->priv;
|
||||
struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
|
||||
void __iomem *mem = priv->membase;
|
||||
int data, i;
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user