Merge "Merge keystone/android-mainline-keystone-qcom-release.5.18.0 (05713a3
) into msm-pineapple"
This commit is contained in:
commit
0027140a17
@ -14381,7 +14381,6 @@ F: arch/arm/*omap*/*pm*
|
||||
F: drivers/cpufreq/omap-cpufreq.c
|
||||
|
||||
OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
|
||||
M: Rajendra Nayak <rnayak@codeaurora.org>
|
||||
M: Paul Walmsley <paul@pwsan.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1 +1 @@
|
||||
3361d46a39aa14c50c90c89da13e4f94c7386098
|
||||
a80c9ffa86c5919c10a73b47c4fe218d72a5838b
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -100,12 +100,14 @@ w83773g@4c {
|
||||
lm25066@40 {
|
||||
compatible = "lm25066";
|
||||
reg = <0x40>;
|
||||
shunt-resistor-micro-ohms = <1000>;
|
||||
};
|
||||
|
||||
/* 12VSB PMIC */
|
||||
lm25066@41 {
|
||||
compatible = "lm25066";
|
||||
reg = <0x41>;
|
||||
shunt-resistor-micro-ohms = <10000>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -196,7 +198,7 @@ &gpio {
|
||||
gpio-line-names =
|
||||
/* A */ "LOCATORLED_STATUS_N", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI",
|
||||
"", "", "", "",
|
||||
/* B */ "DDR_MEM_TEMP", "", "", "", "", "", "", "",
|
||||
/* B */ "POST_COMPLETE_N", "", "", "", "", "", "", "",
|
||||
/* C */ "", "", "", "", "PCIE_HP_SEL_N", "PCIE_SATA_SEL_N", "LOCATORBTN", "",
|
||||
/* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON",
|
||||
"", "", "", "PSU_FAN_FAIL_N",
|
||||
|
@ -117,9 +117,9 @@ pinctrl_fwspid_default: fwspid_default {
|
||||
groups = "FWSPID";
|
||||
};
|
||||
|
||||
pinctrl_fwqspid_default: fwqspid_default {
|
||||
function = "FWSPID";
|
||||
groups = "FWQSPID";
|
||||
pinctrl_fwqspi_default: fwqspi_default {
|
||||
function = "FWQSPI";
|
||||
groups = "FWQSPI";
|
||||
};
|
||||
|
||||
pinctrl_fwspiwp_default: fwspiwp_default {
|
||||
@ -653,12 +653,12 @@ pinctrl_pwm9g1_default: pwm9g1_default {
|
||||
};
|
||||
|
||||
pinctrl_qspi1_default: qspi1_default {
|
||||
function = "QSPI1";
|
||||
function = "SPI1";
|
||||
groups = "QSPI1";
|
||||
};
|
||||
|
||||
pinctrl_qspi2_default: qspi2_default {
|
||||
function = "QSPI2";
|
||||
function = "SPI2";
|
||||
groups = "QSPI2";
|
||||
};
|
||||
|
||||
|
@ -389,6 +389,16 @@ sbc: secure-boot-controller@1e6f2000 {
|
||||
reg = <0x1e6f2000 0x1000>;
|
||||
};
|
||||
|
||||
video: video@1e700000 {
|
||||
compatible = "aspeed,ast2600-video-engine";
|
||||
reg = <0x1e700000 0x1000>;
|
||||
clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
|
||||
<&syscon ASPEED_CLK_GATE_ECLK>;
|
||||
clock-names = "vclk", "eclk";
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gpio0: gpio@1e780000 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
|
@ -1145,7 +1145,7 @@ vector_bhb_loop8_\name:
|
||||
|
||||
@ bhb workaround
|
||||
mov r0, #8
|
||||
3: b . + 4
|
||||
3: W(b) . + 4
|
||||
subs r0, r0, #1
|
||||
bne 3b
|
||||
dsb
|
||||
|
@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
|
||||
{
|
||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
|
||||
cpu_v7_spectre_v2_init();
|
||||
cpu_v7_spectre_bhb_init();
|
||||
}
|
||||
|
||||
void cpu_v7_bugs_init(void)
|
||||
|
@ -622,6 +622,10 @@ &qupv3_id_2 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&rxmacro {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&slpi {
|
||||
status = "okay";
|
||||
firmware-name = "qcom/sm8250/slpi.mbn";
|
||||
@ -773,6 +777,8 @@ right_spkr: wsa8810-left@0,4{
|
||||
};
|
||||
|
||||
&swr1 {
|
||||
status = "okay";
|
||||
|
||||
wcd_rx: wcd9380-rx@0,4 {
|
||||
compatible = "sdw20217010d00";
|
||||
reg = <0 4>;
|
||||
@ -781,6 +787,8 @@ wcd_rx: wcd9380-rx@0,4 {
|
||||
};
|
||||
|
||||
&swr2 {
|
||||
status = "okay";
|
||||
|
||||
wcd_tx: wcd9380-tx@0,3 {
|
||||
compatible = "sdw20217010d00";
|
||||
reg = <0 3>;
|
||||
@ -819,6 +827,10 @@ config {
|
||||
};
|
||||
};
|
||||
|
||||
&txmacro {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&uart12 {
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -2255,6 +2255,7 @@ rxmacro: rxmacro@3200000 {
|
||||
pinctrl-0 = <&rx_swr_active>;
|
||||
compatible = "qcom,sm8250-lpass-rx-macro";
|
||||
reg = <0 0x3200000 0 0x1000>;
|
||||
status = "disabled";
|
||||
|
||||
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
@ -2273,6 +2274,7 @@ rxmacro: rxmacro@3200000 {
|
||||
swr1: soundwire-controller@3210000 {
|
||||
reg = <0 0x3210000 0 0x2000>;
|
||||
compatible = "qcom,soundwire-v1.5.1";
|
||||
status = "disabled";
|
||||
interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&rxmacro>;
|
||||
clock-names = "iface";
|
||||
@ -2300,6 +2302,7 @@ txmacro: txmacro@3220000 {
|
||||
pinctrl-0 = <&tx_swr_active>;
|
||||
compatible = "qcom,sm8250-lpass-tx-macro";
|
||||
reg = <0 0x3220000 0 0x1000>;
|
||||
status = "disabled";
|
||||
|
||||
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
@ -2323,6 +2326,7 @@ swr2: soundwire-controller@3230000 {
|
||||
compatible = "qcom,soundwire-v1.5.1";
|
||||
interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "core";
|
||||
status = "disabled";
|
||||
|
||||
clocks = <&txmacro>;
|
||||
clock-names = "iface";
|
||||
|
@ -16,6 +16,7 @@ / {
|
||||
|
||||
aliases {
|
||||
ethernet0 = &gmac0;
|
||||
ethernet1 = &gmac1;
|
||||
mmc0 = &sdmmc0;
|
||||
mmc1 = &sdhci;
|
||||
};
|
||||
@ -78,7 +79,6 @@ &gmac0 {
|
||||
assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
|
||||
assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>;
|
||||
clock_in_out = "input";
|
||||
phy-handle = <&rgmii_phy0>;
|
||||
phy-mode = "rgmii";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gmac0_miim
|
||||
@ -90,8 +90,38 @@ &gmac0_rgmii_clk
|
||||
snps,reset-active-low;
|
||||
/* Reset time is 20ms, 100ms for rtl8211f */
|
||||
snps,reset-delays-us = <0 20000 100000>;
|
||||
tx_delay = <0x4f>;
|
||||
rx_delay = <0x0f>;
|
||||
status = "okay";
|
||||
|
||||
fixed-link {
|
||||
speed = <1000>;
|
||||
full-duplex;
|
||||
pause;
|
||||
};
|
||||
};
|
||||
|
||||
&gmac1 {
|
||||
assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
|
||||
assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
|
||||
clock_in_out = "output";
|
||||
phy-handle = <&rgmii_phy1>;
|
||||
phy-mode = "rgmii";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gmac1m1_miim
|
||||
&gmac1m1_tx_bus2
|
||||
&gmac1m1_rx_bus2
|
||||
&gmac1m1_rgmii_clk
|
||||
&gmac1m1_rgmii_bus>;
|
||||
|
||||
snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
|
||||
snps,reset-active-low;
|
||||
/* Reset time is 20ms, 100ms for rtl8211f */
|
||||
snps,reset-delays-us = <0 20000 100000>;
|
||||
|
||||
tx_delay = <0x3c>;
|
||||
rx_delay = <0x2f>;
|
||||
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -315,8 +345,8 @@ &i2c5 {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&mdio0 {
|
||||
rgmii_phy0: ethernet-phy@0 {
|
||||
&mdio1 {
|
||||
rgmii_phy1: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0x0>;
|
||||
};
|
||||
@ -345,9 +375,9 @@ &pmu_io_domains {
|
||||
pmuio2-supply = <&vcc3v3_pmu>;
|
||||
vccio1-supply = <&vccio_acodec>;
|
||||
vccio3-supply = <&vccio_sd>;
|
||||
vccio4-supply = <&vcc_1v8>;
|
||||
vccio4-supply = <&vcc_3v3>;
|
||||
vccio5-supply = <&vcc_3v3>;
|
||||
vccio6-supply = <&vcc_3v3>;
|
||||
vccio6-supply = <&vcc_1v8>;
|
||||
vccio7-supply = <&vcc_3v3>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -76,6 +76,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
|
||||
mte_sync_page_tags(page, old_pte, check_swap,
|
||||
pte_is_tagged);
|
||||
}
|
||||
|
||||
/* ensure the tags are visible before the PTE is set */
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
int memcmp_pages(struct page *page1, struct page *page2)
|
||||
|
@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
|
||||
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
|
||||
|
||||
struct pv_time_stolen_time_region {
|
||||
struct pvclock_vcpu_stolen_time *kaddr;
|
||||
struct pvclock_vcpu_stolen_time __rcu *kaddr;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
|
||||
@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
|
||||
/* return stolen time in ns by asking the hypervisor */
|
||||
static u64 para_steal_clock(int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
u64 ret = 0;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
|
||||
@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
|
||||
* online notification callback runs. Until the callback
|
||||
* has run we just return zero.
|
||||
*/
|
||||
if (!reg->kaddr)
|
||||
rcu_read_lock();
|
||||
kaddr = rcu_dereference(reg->kaddr);
|
||||
if (!kaddr) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = this_cpu_ptr(&stolen_time_region);
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
|
||||
memunmap(reg->kaddr);
|
||||
memset(reg, 0, sizeof(*reg));
|
||||
kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
|
||||
synchronize_rcu();
|
||||
memunmap(kaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stolen_time_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
|
||||
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
|
||||
return -EINVAL;
|
||||
|
||||
reg->kaddr = memremap(res.a0,
|
||||
kaddr = memremap(res.a0,
|
||||
sizeof(struct pvclock_vcpu_stolen_time),
|
||||
MEMREMAP_WB);
|
||||
|
||||
rcu_assign_pointer(reg->kaddr, kaddr);
|
||||
|
||||
if (!reg->kaddr) {
|
||||
pr_warn("Failed to map stolen time data structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
|
||||
le32_to_cpu(reg->kaddr->attributes) != 0) {
|
||||
if (le32_to_cpu(kaddr->revision) != 0 ||
|
||||
le32_to_cpu(kaddr->attributes) != 0) {
|
||||
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -37,6 +37,15 @@
|
||||
* safe memory that has been set up to be preserved during the copy operation.
|
||||
*/
|
||||
SYM_CODE_START(arm64_relocate_new_kernel)
|
||||
/*
|
||||
* The kimage structure isn't allocated specially and may be clobbered
|
||||
* during relocation. We must load any values we need from it prior to
|
||||
* any relocation occurring.
|
||||
*/
|
||||
ldr x28, [x0, #KIMAGE_START]
|
||||
ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
|
||||
ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
|
||||
|
||||
/* Setup the list loop variables. */
|
||||
ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
|
||||
ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
|
||||
@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
ldr x4, [x0, #KIMAGE_START] /* relocation start */
|
||||
ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
|
||||
ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
|
||||
turn_off_mmu x12, x13
|
||||
|
||||
/* Start new image. */
|
||||
cbz x1, .Lel1
|
||||
mov x1, x4 /* relocation start */
|
||||
mov x2, x0 /* dtb address */
|
||||
cbz x27, .Lel1
|
||||
mov x1, x28 /* kernel entry point */
|
||||
mov x2, x26 /* dtb address */
|
||||
mov x3, xzr
|
||||
mov x4, xzr
|
||||
mov x0, #HVC_SOFT_RESTART
|
||||
hvc #0 /* Jumps from el2 */
|
||||
.Lel1:
|
||||
mov x0, x26 /* dtb address */
|
||||
mov x1, xzr
|
||||
mov x2, xzr
|
||||
mov x3, xzr
|
||||
br x4 /* Jumps from el1 */
|
||||
br x28 /* Jumps from el1 */
|
||||
SYM_CODE_END(arm64_relocate_new_kernel)
|
||||
|
@ -1436,7 +1436,8 @@ static int kvm_init_vector_slots(void)
|
||||
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
|
||||
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
|
||||
|
||||
if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
|
||||
if (kvm_system_needs_idmapped_vectors() &&
|
||||
!is_protected_kvm_enabled()) {
|
||||
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
|
||||
__BP_HARDEN_HYP_VECS_SZ, &base);
|
||||
if (err)
|
||||
|
@ -1123,8 +1123,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
|
||||
if (irqchip_in_kernel(vcpu->kvm) &&
|
||||
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
if (kvm_vgic_global_state.type == VGIC_V3) {
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
|
||||
}
|
||||
|
@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page);
|
||||
flush_kernel_icache_range_asm(s,e); \
|
||||
} while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
||||
memcpy(dst, src, len); \
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
|
||||
} while (0)
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
|
||||
memcpy(dst, src, len); \
|
||||
} while (0)
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len);
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma,
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
static inline void
|
||||
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
if (PageAnon(page)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(void *addr)
|
||||
|
@ -26,12 +26,14 @@
|
||||
#define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from))
|
||||
|
||||
struct page;
|
||||
struct vm_area_struct;
|
||||
|
||||
void clear_page_asm(void *page);
|
||||
void copy_page_asm(void *to, void *from);
|
||||
#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *pg);
|
||||
void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
|
||||
struct vm_area_struct *vma);
|
||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
|
||||
/*
|
||||
* These are used to make use of C type-checking..
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
int split_tlb __ro_after_init;
|
||||
int dcache_stride __ro_after_init;
|
||||
@ -91,7 +92,7 @@ static inline void flush_data_cache(void)
|
||||
}
|
||||
|
||||
|
||||
/* Virtual address of pfn. */
|
||||
/* Kernel virtual address of pfn. */
|
||||
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
|
||||
|
||||
void
|
||||
@ -124,11 +125,13 @@ show_cache_info(struct seq_file *m)
|
||||
cache_info.ic_size/1024 );
|
||||
if (cache_info.dc_loop != 1)
|
||||
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
|
||||
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
|
||||
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
|
||||
cache_info.dc_size/1024,
|
||||
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
|
||||
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
|
||||
((cache_info.dc_loop == 1) ? "direct mapped" : buf));
|
||||
((cache_info.dc_loop == 1) ? "direct mapped" : buf),
|
||||
cache_info.dc_conf.cc_alias
|
||||
);
|
||||
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
|
||||
cache_info.it_size,
|
||||
cache_info.dt_size,
|
||||
@ -324,25 +327,81 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void
|
||||
__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long physaddr)
|
||||
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
if (!static_branch_likely(&parisc_has_cache))
|
||||
return;
|
||||
unsigned long flags, space, pgd, prot;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
|
||||
vmaddr &= PAGE_MASK;
|
||||
|
||||
preempt_disable();
|
||||
purge_dcache_page_asm(physaddr, vmaddr);
|
||||
|
||||
/* Set context for flush */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_icache_page_asm(physaddr, vmaddr);
|
||||
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
/* Restore previous context */
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
pgd_t *pgd = mm->pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d)) {
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static inline bool pte_needs_flush(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
|
||||
== (_PAGE_PRESENT | _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping_file(page);
|
||||
struct vm_area_struct *mpnt;
|
||||
unsigned long offset;
|
||||
unsigned long addr, old_addr = 0;
|
||||
unsigned long count = 0;
|
||||
pgoff_t pgoff;
|
||||
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
@ -357,33 +416,52 @@ void flush_dcache_page(struct page *page)
|
||||
|
||||
pgoff = page->index;
|
||||
|
||||
/* We have carefully arranged in arch_get_unmapped_area() that
|
||||
/*
|
||||
* We have carefully arranged in arch_get_unmapped_area() that
|
||||
* *any* mappings of a file are always congruently mapped (whether
|
||||
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
|
||||
* to flush one address here for them all to become coherent */
|
||||
|
||||
* to flush one address here for them all to become coherent
|
||||
* on machines that support equivalent aliasing
|
||||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
addr = mpnt->vm_start + offset;
|
||||
if (parisc_requires_coherency()) {
|
||||
pte_t *ptep;
|
||||
|
||||
/* The TLB is the engine of coherence on parisc: The
|
||||
* CPU is entitled to speculate any page with a TLB
|
||||
* mapping, so here we kill the mapping then flush the
|
||||
* page along a special flush only alias mapping.
|
||||
* This guarantees that the page is no-longer in the
|
||||
* cache for any process and nor may it be
|
||||
* speculatively read in (until the user or kernel
|
||||
* specifically accesses it, of course) */
|
||||
|
||||
flush_tlb_page(mpnt, addr);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
__flush_cache_page(mpnt, addr, page_to_phys(page));
|
||||
if (parisc_requires_coherency() && old_addr)
|
||||
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
|
||||
old_addr = addr;
|
||||
ptep = get_ptep(mpnt->vm_mm, addr);
|
||||
if (ptep && pte_needs_flush(*ptep))
|
||||
flush_user_cache_page(mpnt, addr);
|
||||
} else {
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc:
|
||||
* The CPU is entitled to speculate any page
|
||||
* with a TLB mapping, so here we kill the
|
||||
* mapping then flush the page along a special
|
||||
* flush only alias mapping. This guarantees that
|
||||
* the page is no-longer in the cache for any
|
||||
* process and nor may it be speculatively read
|
||||
* in (until the user or kernel specifically
|
||||
* accesses it, of course)
|
||||
*/
|
||||
flush_tlb_page(mpnt, addr);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
__flush_cache_page(mpnt, addr, page_to_phys(page));
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(mpnt->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, mpnt->vm_file);
|
||||
old_addr = addr;
|
||||
}
|
||||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
}
|
||||
@ -403,7 +481,7 @@ void __init parisc_setup_cache_timing(void)
|
||||
{
|
||||
unsigned long rangetime, alltime;
|
||||
unsigned long size;
|
||||
unsigned long threshold;
|
||||
unsigned long threshold, threshold2;
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_data_cache();
|
||||
@ -417,11 +495,16 @@ void __init parisc_setup_cache_timing(void)
|
||||
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
|
||||
alltime, size, rangetime);
|
||||
|
||||
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
|
||||
if (threshold > cache_info.dc_size)
|
||||
threshold = cache_info.dc_size;
|
||||
if (threshold)
|
||||
parisc_cache_flush_threshold = threshold;
|
||||
threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
|
||||
pr_info("Calculated flush threshold is %lu KiB\n",
|
||||
threshold/1024);
|
||||
|
||||
/*
|
||||
* The threshold computed above isn't very reliable. The following
|
||||
* heuristic works reasonably well on c8000/rp3440.
|
||||
*/
|
||||
threshold2 = cache_info.dc_size * num_online_cpus();
|
||||
parisc_cache_flush_threshold = threshold2;
|
||||
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
|
||||
parisc_cache_flush_threshold/1024);
|
||||
|
||||
@ -477,19 +560,47 @@ void flush_kernel_dcache_page_addr(void *addr)
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
|
||||
|
||||
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
|
||||
struct page *pg)
|
||||
static void flush_cache_page_if_present(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
/* Copy using kernel mapping. No coherency is needed (all in
|
||||
kunmap) for the `to' page. However, the `from' page needs to
|
||||
be flushed through a mapping equivalent to the user mapping
|
||||
before it can be accessed through the kernel mapping. */
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(__pa(vfrom), vaddr);
|
||||
copy_page_asm(vto, vfrom);
|
||||
preempt_enable();
|
||||
pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
|
||||
|
||||
/*
|
||||
* The pte check is racy and sometimes the flush will trigger
|
||||
* a non-access TLB miss. Hopefully, the page has already been
|
||||
* flushed.
|
||||
*/
|
||||
if (ptep && pte_needs_flush(*ptep))
|
||||
flush_cache_page(vma, vmaddr, pfn);
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kfrom = kmap_local_page(from);
|
||||
kto = kmap_local_page(to);
|
||||
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
|
||||
copy_page_asm(kto, kfrom);
|
||||
kunmap_local(kto);
|
||||
kunmap_local(kfrom);
|
||||
}
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_user_page);
|
||||
|
||||
/* __flush_tlb_range()
|
||||
*
|
||||
@ -520,92 +631,105 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long usize = 0;
|
||||
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
usize += vma->vm_end - vma->vm_start;
|
||||
return usize;
|
||||
}
|
||||
|
||||
static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d)) {
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, pfn;
|
||||
pte_t *ptep;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
ptep = get_ptep(mm->pgd, addr);
|
||||
if (ptep) {
|
||||
pfn = pte_pfn(*ptep);
|
||||
flush_cache_page(vma, addr, pfn);
|
||||
/*
|
||||
* The vma can contain pages that aren't present. Although
|
||||
* the pte search is expensive, we need the pte to find the
|
||||
* page pfn and to check whether the page should be flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, addr);
|
||||
if (ptep && pte_needs_flush(*ptep)) {
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, addr);
|
||||
} else {
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long usize = 0;
|
||||
|
||||
for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
|
||||
usize += vma->vm_end - vma->vm_start;
|
||||
return usize;
|
||||
}
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* Flushing the whole cache on each cpu takes forever on
|
||||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
if (mm->context.space_id)
|
||||
flush_tlb_all();
|
||||
/*
|
||||
* Flushing the whole cache on each cpu takes forever on
|
||||
* rp3440, etc. So, avoid it if the mm isn't too big.
|
||||
*
|
||||
* Note that we must flush the entire cache on machines
|
||||
* with aliasing caches to prevent random segmentation
|
||||
* faults.
|
||||
*/
|
||||
if (!parisc_requires_coherency()
|
||||
|| mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_all();
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Flush mm */
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
|
||||
flush_cache_pages(vma, vma->vm_start, vma->vm_end);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_cache_flush_threshold) {
|
||||
if (vma->vm_mm->context.space_id)
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (!parisc_requires_coherency()
|
||||
|| end - start >= parisc_cache_flush_threshold) {
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_pages(vma, vma->vm_mm, start, end);
|
||||
flush_cache_pages(vma, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
if (pfn_valid(pfn)) {
|
||||
if (likely(vma->vm_mm->context.space_id)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
} else {
|
||||
__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
if (parisc_requires_coherency())
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
else
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
if (!PageAnon(page))
|
||||
return;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
|
@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
|
||||
|
||||
*need_unmap = 1;
|
||||
set_fixmap(fixmap, page_to_phys(page));
|
||||
if (flags)
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
else
|
||||
__acquire(&patch_lock);
|
||||
raw_spin_lock_irqsave(&patch_lock, *flags);
|
||||
|
||||
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
|
||||
{
|
||||
clear_fixmap(fixmap);
|
||||
|
||||
if (flags)
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
else
|
||||
__release(&patch_lock);
|
||||
raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
int mapped;
|
||||
|
||||
/* Make sure we don't have any aliases in cache */
|
||||
flush_kernel_vmap_range(addr, len);
|
||||
flush_icache_range(start, end);
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_kernel_icache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
|
||||
|
||||
@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
* We're crossing a page boundary, so
|
||||
* need to remap
|
||||
*/
|
||||
flush_kernel_vmap_range((void *)fixmap,
|
||||
(p-fixmap) * sizeof(*p));
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap,
|
||||
(unsigned long)p);
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
|
||||
@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
|
||||
}
|
||||
}
|
||||
|
||||
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
|
||||
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
|
||||
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
|
||||
if (mapped)
|
||||
patch_unmap(FIX_TEXT_POKE0, &flags);
|
||||
flush_icache_range(start, end);
|
||||
}
|
||||
|
||||
void __kprobes __patch_text(void *addr, u32 insn)
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#include <asm/traps.h>
|
||||
|
||||
#define DEBUG_NATLB 0
|
||||
|
||||
/* Various important other fields */
|
||||
#define bit22set(x) (x & 0x00000200)
|
||||
#define bits23_25set(x) (x & 0x000001c0)
|
||||
@ -450,8 +452,8 @@ handle_nadtlb_fault(struct pt_regs *regs)
|
||||
fallthrough;
|
||||
case 0x380:
|
||||
/* PDC and FIC instructions */
|
||||
if (printk_ratelimit()) {
|
||||
pr_warn("BUG: nullifying cache flush/purge instruction\n");
|
||||
if (DEBUG_NATLB && printk_ratelimit()) {
|
||||
pr_warn("WARNING: nullifying cache flush/purge instruction\n");
|
||||
show_regs(regs);
|
||||
}
|
||||
if (insn & 0x20) {
|
||||
|
@ -366,7 +366,7 @@ gpio0: gpio@20120000 {
|
||||
|
||||
gpio1: gpio@20121000 {
|
||||
compatible = "microchip,mpfs-gpio";
|
||||
reg = <000 0x20121000 0x0 0x1000>;
|
||||
reg = <0x0 0x20121000 0x0 0x1000>;
|
||||
interrupt-parent = <&plic>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -167,7 +167,7 @@ uart0: serial@10010000 {
|
||||
clocks = <&prci FU540_PRCI_CLK_TLCLK>;
|
||||
status = "disabled";
|
||||
};
|
||||
dma: dma@3000000 {
|
||||
dma: dma-controller@3000000 {
|
||||
compatible = "sifive,fu540-c000-pdma";
|
||||
reg = <0x0 0x3000000 0x0 0x8000>;
|
||||
interrupt-parent = <&plic0>;
|
||||
|
@ -1914,7 +1914,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
||||
struct hv_send_ipi_ex send_ipi_ex;
|
||||
struct hv_send_ipi send_ipi;
|
||||
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
|
||||
unsigned long valid_bank_mask;
|
||||
u64 valid_bank_mask;
|
||||
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
u32 vector;
|
||||
bool all_cpus;
|
||||
@ -1956,7 +1956,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
|
||||
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
|
||||
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
|
||||
|
||||
if (hc->var_cnt != bitmap_weight(&valid_bank_mask, 64))
|
||||
if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
|
||||
if (all_cpus)
|
||||
|
@ -5470,14 +5470,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
|
||||
uint i;
|
||||
|
||||
if (pcid == kvm_get_active_pcid(vcpu)) {
|
||||
mmu->invlpg(vcpu, gva, mmu->root.hpa);
|
||||
if (mmu->invlpg)
|
||||
mmu->invlpg(vcpu, gva, mmu->root.hpa);
|
||||
tlb_flush = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
||||
if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
|
||||
pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
|
||||
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
|
||||
if (mmu->invlpg)
|
||||
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
|
||||
tlb_flush = true;
|
||||
}
|
||||
}
|
||||
@ -5665,6 +5667,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mmu_page *sp, *node;
|
||||
int nr_zapped, batch = 0;
|
||||
bool unstable;
|
||||
|
||||
restart:
|
||||
list_for_each_entry_safe_reverse(sp, node,
|
||||
@ -5696,11 +5699,12 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
||||
goto restart;
|
||||
}
|
||||
|
||||
if (__kvm_mmu_prepare_zap_page(kvm, sp,
|
||||
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
|
||||
batch += nr_zapped;
|
||||
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
|
||||
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
|
||||
batch += nr_zapped;
|
||||
|
||||
if (unstable)
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,9 +171,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int cmp_u64(const void *a, const void *b)
|
||||
static int cmp_u64(const void *pa, const void *pb)
|
||||
{
|
||||
return *(__u64 *)a - *(__u64 *)b;
|
||||
u64 a = *(u64 *)pa;
|
||||
u64 b = *(u64 *)pb;
|
||||
|
||||
return (a > b) - (a < b);
|
||||
}
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
|
@ -742,6 +742,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
if (at_head) {
|
||||
list_add(&rq->queuelist, &per_prio->dispatch);
|
||||
rq->fifo_time = jiffies;
|
||||
} else {
|
||||
deadline_add_rq_rb(per_prio, rq);
|
||||
|
||||
|
@ -5,4 +5,5 @@
|
||||
BUILD_SYSTEM_DLKM=1
|
||||
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
|
||||
|
||||
BUILD_GKI_CERTIFICATION_TOOLS=1
|
||||
BUILD_GKI_ARTIFACTS=1
|
||||
BUILD_GKI_BOOT_IMG_SIZE=67108864
|
||||
|
@ -117,6 +117,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
|
||||
tmp_rate = parent_rate;
|
||||
else
|
||||
tmp_rate = parent_rate / div;
|
||||
|
||||
if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
|
||||
return;
|
||||
|
||||
tmp_diff = abs(req->rate - tmp_rate);
|
||||
|
||||
if (*best_diff < 0 || *best_diff >= tmp_diff) {
|
||||
|
@ -941,6 +941,7 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
|
||||
u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
|
||||
u32 div, mindiv, maxdiv;
|
||||
|
||||
do_div(temp, rate);
|
||||
div = temp;
|
||||
div &= ~unused_frac_mask;
|
||||
|
||||
|
@ -298,10 +298,6 @@ static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = {
|
||||
.hw_clks = &sun6i_rtc_ccu_hw_clks,
|
||||
};
|
||||
|
||||
static const struct clk_parent_data sun50i_h6_osc32k_fanout_parents[] = {
|
||||
{ .hw = &osc32k_clk.common.hw },
|
||||
};
|
||||
|
||||
static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = {
|
||||
{ .hw = &osc32k_clk.common.hw },
|
||||
{ .fw_name = "pll-32k" },
|
||||
@ -314,13 +310,6 @@ static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = {
|
||||
{ .hw = &osc24M_32k_clk.common.hw }
|
||||
};
|
||||
|
||||
static const struct sun6i_rtc_match_data sun50i_h6_rtc_ccu_data = {
|
||||
.have_ext_osc32k = true,
|
||||
.have_iosc_calibration = true,
|
||||
.osc32k_fanout_parents = sun50i_h6_osc32k_fanout_parents,
|
||||
.osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h6_osc32k_fanout_parents),
|
||||
};
|
||||
|
||||
static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = {
|
||||
.have_iosc_calibration = true,
|
||||
.rtc_32k_single_parent = true,
|
||||
@ -335,10 +324,6 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
|
||||
};
|
||||
|
||||
static const struct of_device_id sun6i_rtc_ccu_match[] = {
|
||||
{
|
||||
.compatible = "allwinner,sun50i-h6-rtc",
|
||||
.data = &sun50i_h6_rtc_ccu_data,
|
||||
},
|
||||
{
|
||||
.compatible = "allwinner,sun50i-h616-rtc",
|
||||
.data = &sun50i_h616_rtc_ccu_data,
|
||||
|
@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
|
||||
} else {
|
||||
/* copy only remaining bytes */
|
||||
memcpy(data, &val, max - currsize);
|
||||
break;
|
||||
}
|
||||
} while (currsize < max);
|
||||
|
||||
|
@ -407,6 +407,7 @@ static inline int is_dma_buf_file(struct file *file)
|
||||
|
||||
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
{
|
||||
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
|
||||
struct file *file;
|
||||
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
|
||||
|
||||
@ -416,6 +417,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
inode->i_size = dmabuf->size;
|
||||
inode_set_bytes(inode, dmabuf->size);
|
||||
|
||||
/*
|
||||
* The ->i_ino acquired from get_next_ino() is not unique thus
|
||||
* not suitable for using it as dentry name by dmabuf stats.
|
||||
* Override ->i_ino with the unique and dmabuffs specific
|
||||
* value.
|
||||
*/
|
||||
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
|
||||
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
|
||||
flags, &dma_buf_fops);
|
||||
if (IS_ERR(file))
|
||||
|
@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
||||
unsigned long flags;
|
||||
unsigned int on, off;
|
||||
|
||||
if (state->polarity != PWM_POLARITY_NORMAL)
|
||||
return -EINVAL;
|
||||
|
||||
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
|
||||
do_div(val, NSEC_PER_SEC);
|
||||
if (val > UINT_MAX + 1ULL)
|
||||
|
@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
|
||||
{
|
||||
struct vf610_gpio_port *port = gpiochip_get_data(chip);
|
||||
unsigned long mask = BIT(gpio);
|
||||
u32 val;
|
||||
|
||||
if (port->sdata && port->sdata->have_paddr)
|
||||
vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
|
||||
if (port->sdata && port->sdata->have_paddr) {
|
||||
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
|
||||
val |= mask;
|
||||
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
|
||||
}
|
||||
|
||||
vf610_gpio_set(chip, gpio, value);
|
||||
|
||||
|
@ -1342,9 +1342,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
#endif
|
||||
|
||||
|
@ -1045,6 +1045,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
|
||||
(pm_suspend_target_state == PM_SUSPEND_MEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_should_gpu_reset
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if should reset GPU, false if not
|
||||
*/
|
||||
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
|
@ -2336,7 +2336,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
if (!adev->in_s0ix)
|
||||
if (amdgpu_acpi_should_gpu_reset(adev))
|
||||
return amdgpu_asic_reset(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -4852,6 +4852,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
|
||||
drm_edid_get_monitor_name(mst_edid, name, namelen);
|
||||
kfree(mst_edid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
|
||||
const u32 *mmioaddr, u32 mmio_count,
|
||||
int header_ver, u8 dmc_id)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
u32 start_range, end_range;
|
||||
int i;
|
||||
|
||||
if (dmc_id >= DMC_FW_MAX) {
|
||||
drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header_ver == 1) {
|
||||
start_range = DMC_MMIO_START_RANGE;
|
||||
end_range = DMC_MMIO_END_RANGE;
|
||||
} else if (dmc_id == DMC_FW_MAIN) {
|
||||
start_range = TGL_MAIN_MMIO_START;
|
||||
end_range = TGL_MAIN_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 13) {
|
||||
start_range = ADLP_PIPE_MMIO_START;
|
||||
end_range = ADLP_PIPE_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 12) {
|
||||
start_range = TGL_PIPE_MMIO_START(dmc_id);
|
||||
end_range = TGL_PIPE_MMIO_END(dmc_id);
|
||||
} else {
|
||||
drm_warn(&i915->drm, "Unknown mmio range for sanity check");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
const struct intel_dmc_header_base *dmc_header,
|
||||
size_t rem_size, u8 dmc_id)
|
||||
@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
|
||||
dmc_header->header_ver, dmc_id)) {
|
||||
drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc_info->mmiodata[i] = mmiodata[i];
|
||||
|
@ -1252,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch,
|
||||
* Only attempt to pin the batch buffer to ggtt if the current batch
|
||||
* is not inside ggtt, or the batch buffer is not misplaced.
|
||||
*/
|
||||
if (!i915_is_ggtt(batch->vm)) {
|
||||
if (!i915_is_ggtt(batch->vm) ||
|
||||
!i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) {
|
||||
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK /* NOWARN */ |
|
||||
PIN_NOEVICT);
|
||||
} else if (i915_vma_is_map_and_fenceable(batch)) {
|
||||
__i915_vma_pin(batch);
|
||||
vma = batch;
|
||||
}
|
||||
|
||||
if (vma == ERR_PTR(-EDEADLK))
|
||||
|
@ -806,7 +806,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
|
||||
__intel_engine_reset(engine, stalled_mask & engine->mask);
|
||||
local_bh_enable();
|
||||
|
||||
intel_uc_reset(>->uc, true);
|
||||
intel_uc_reset(>->uc, ALL_ENGINES);
|
||||
|
||||
intel_ggtt_restore_fences(gt->ggtt);
|
||||
|
||||
|
@ -438,7 +438,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
|
||||
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
|
||||
|
||||
void intel_guc_submission_reset_prepare(struct intel_guc *guc);
|
||||
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
|
||||
void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
|
||||
void intel_guc_submission_reset_finish(struct intel_guc *guc);
|
||||
void intel_guc_submission_cancel_requests(struct intel_guc *guc);
|
||||
|
||||
|
@ -1590,9 +1590,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
|
||||
spin_unlock_irqrestore(&sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
static void __guc_reset_context(struct intel_context *ce, bool stalled)
|
||||
static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
|
||||
{
|
||||
bool local_stalled;
|
||||
bool guilty;
|
||||
struct i915_request *rq;
|
||||
unsigned long flags;
|
||||
u32 head;
|
||||
@ -1620,7 +1620,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
|
||||
if (!intel_context_is_pinned(ce))
|
||||
goto next_context;
|
||||
|
||||
local_stalled = false;
|
||||
guilty = false;
|
||||
rq = intel_context_find_active_request(ce);
|
||||
if (!rq) {
|
||||
head = ce->ring->tail;
|
||||
@ -1628,14 +1628,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
|
||||
}
|
||||
|
||||
if (i915_request_started(rq))
|
||||
local_stalled = true;
|
||||
guilty = stalled & ce->engine->mask;
|
||||
|
||||
GEM_BUG_ON(i915_active_is_idle(&ce->active));
|
||||
head = intel_ring_wrap(ce->ring, rq->head);
|
||||
|
||||
__i915_request_reset(rq, local_stalled && stalled);
|
||||
__i915_request_reset(rq, guilty);
|
||||
out_replay:
|
||||
guc_reset_state(ce, head, local_stalled && stalled);
|
||||
guc_reset_state(ce, head, guilty);
|
||||
next_context:
|
||||
if (i != number_children)
|
||||
ce = list_next_entry(ce, parallel.child_link);
|
||||
@ -1645,7 +1645,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
|
||||
intel_context_put(parent);
|
||||
}
|
||||
|
||||
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
|
||||
void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
unsigned long index;
|
||||
@ -4013,7 +4013,7 @@ static void guc_context_replay(struct intel_context *ce)
|
||||
{
|
||||
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
|
||||
|
||||
__guc_reset_context(ce, true);
|
||||
__guc_reset_context(ce, ce->engine->mask);
|
||||
tasklet_hi_schedule(&sched_engine->tasklet);
|
||||
}
|
||||
|
||||
|
@ -593,7 +593,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
|
||||
__uc_sanitize(uc);
|
||||
}
|
||||
|
||||
void intel_uc_reset(struct intel_uc *uc, bool stalled)
|
||||
void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
|
||||
{
|
||||
struct intel_guc *guc = &uc->guc;
|
||||
|
||||
|
@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
|
||||
void intel_uc_driver_remove(struct intel_uc *uc);
|
||||
void intel_uc_init_mmio(struct intel_uc *uc);
|
||||
void intel_uc_reset_prepare(struct intel_uc *uc);
|
||||
void intel_uc_reset(struct intel_uc *uc, bool stalled);
|
||||
void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
|
||||
void intel_uc_reset_finish(struct intel_uc *uc);
|
||||
void intel_uc_cancel_requests(struct intel_uc *uc);
|
||||
void intel_uc_suspend(struct intel_uc *uc);
|
||||
|
@ -5501,6 +5501,22 @@
|
||||
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
|
||||
#define DMC_MMIO_START_RANGE 0x80000
|
||||
#define DMC_MMIO_END_RANGE 0x8FFFF
|
||||
#define DMC_V1_MMIO_START_RANGE 0x80000
|
||||
#define TGL_MAIN_MMIO_START 0x8F000
|
||||
#define TGL_MAIN_MMIO_END 0x8FFFF
|
||||
#define _TGL_PIPEA_MMIO_START 0x92000
|
||||
#define _TGL_PIPEA_MMIO_END 0x93FFF
|
||||
#define _TGL_PIPEB_MMIO_START 0x96000
|
||||
#define _TGL_PIPEB_MMIO_END 0x97FFF
|
||||
#define ADLP_PIPE_MMIO_START 0x5F000
|
||||
#define ADLP_PIPE_MMIO_END 0x5FFFF
|
||||
|
||||
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
|
||||
_TGL_PIPEB_MMIO_START)
|
||||
|
||||
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
|
||||
_TGL_PIPEB_MMIO_END)
|
||||
|
||||
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
|
||||
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
|
||||
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
|
||||
|
@ -82,6 +82,7 @@
|
||||
|
||||
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
|
||||
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
|
||||
#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
|
||||
|
||||
/* Hardware Descriptor Constants - Control Field */
|
||||
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
|
||||
@ -175,6 +176,8 @@ struct ismt_priv {
|
||||
u8 head; /* ring buffer head pointer */
|
||||
struct completion cmp; /* interrupt completion */
|
||||
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
|
||||
dma_addr_t log_dma;
|
||||
u32 *log;
|
||||
};
|
||||
|
||||
static const struct pci_device_id ismt_ids[] = {
|
||||
@ -411,6 +414,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
||||
memset(desc, 0, sizeof(struct ismt_desc));
|
||||
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
|
||||
|
||||
/* Always clear the log entries */
|
||||
memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
|
||||
|
||||
/* Initialize common control bits */
|
||||
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
|
||||
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
|
||||
@ -708,6 +714,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
|
||||
/* initialize the Master Descriptor Base Address (MDBA) */
|
||||
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
|
||||
|
||||
writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
|
||||
|
||||
/* initialize the Master Control Register (MCTRL) */
|
||||
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
|
||||
|
||||
@ -795,6 +803,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
|
||||
priv->head = 0;
|
||||
init_completion(&priv->cmp);
|
||||
|
||||
priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
|
||||
ISMT_LOG_ENTRIES * sizeof(u32),
|
||||
&priv->log_dma, GFP_KERNEL);
|
||||
if (!priv->log)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
if (i2c->bus_freq == 0) {
|
||||
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_disable_clk;
|
||||
}
|
||||
|
||||
adap = &i2c->adap;
|
||||
@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
ret = i2c_add_adapter(adap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_disable_clk;
|
||||
|
||||
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_clk:
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
|
||||
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
|
||||
i2c->adap.dev.parent = dev;
|
||||
i2c->adap.dev.of_node = pdev->dev.of_node;
|
||||
i2c->adap.dev.fwnode = dev->fwnode;
|
||||
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
|
||||
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
|
||||
i2c_set_adapdata(&i2c->adap, i2c);
|
||||
|
@ -756,15 +756,12 @@ static int ili251x_firmware_reset(struct i2c_client *client)
|
||||
return ili251x_firmware_busy(client);
|
||||
}
|
||||
|
||||
static void ili251x_hardware_reset(struct device *dev)
|
||||
static void ili210x_hardware_reset(struct gpio_desc *reset_gpio)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct ili210x *priv = i2c_get_clientdata(client);
|
||||
|
||||
/* Reset the controller */
|
||||
gpiod_set_value_cansleep(priv->reset_gpio, 1);
|
||||
usleep_range(10000, 15000);
|
||||
gpiod_set_value_cansleep(priv->reset_gpio, 0);
|
||||
gpiod_set_value_cansleep(reset_gpio, 1);
|
||||
usleep_range(12000, 15000);
|
||||
gpiod_set_value_cansleep(reset_gpio, 0);
|
||||
msleep(300);
|
||||
}
|
||||
|
||||
@ -773,6 +770,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct ili210x *priv = i2c_get_clientdata(client);
|
||||
const char *fwname = ILI251X_FW_FILENAME;
|
||||
const struct firmware *fw;
|
||||
u16 ac_end, df_end;
|
||||
@ -803,7 +801,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
|
||||
|
||||
dev_dbg(dev, "Firmware update started, firmware=%s\n", fwname);
|
||||
|
||||
ili251x_hardware_reset(dev);
|
||||
ili210x_hardware_reset(priv->reset_gpio);
|
||||
|
||||
error = ili251x_firmware_reset(client);
|
||||
if (error)
|
||||
@ -858,7 +856,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
|
||||
error = count;
|
||||
|
||||
exit:
|
||||
ili251x_hardware_reset(dev);
|
||||
ili210x_hardware_reset(priv->reset_gpio);
|
||||
dev_dbg(dev, "Firmware update ended, error=%i\n", error);
|
||||
enable_irq(client->irq);
|
||||
kfree(fwbuf);
|
||||
@ -951,9 +949,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
usleep_range(50, 100);
|
||||
gpiod_set_value_cansleep(reset_gpio, 0);
|
||||
msleep(100);
|
||||
ili210x_hardware_reset(reset_gpio);
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
|
||||
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
|
||||
#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
|
||||
#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
|
||||
#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
|
||||
|
||||
static const u8 tuning_blk_pattern_4bit[] = {
|
||||
|
@ -1495,34 +1495,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_30X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_30X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
|
||||
break;
|
||||
case 31:
|
||||
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_31X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_31X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
|
||||
break;
|
||||
case 32:
|
||||
case 33:
|
||||
/* Support both MCAN version v3.2.x and v3.3.0 */
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_31X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_31X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
|
||||
|
||||
cdev->can.ctrlmode_supported |=
|
||||
(m_can_niso_supported(cdev) ?
|
||||
|
@ -85,9 +85,6 @@ struct m_can_classdev {
|
||||
struct sk_buff *tx_skb;
|
||||
struct phy *transceiver;
|
||||
|
||||
const struct can_bittiming_const *bit_timing;
|
||||
const struct can_bittiming_const *data_timing;
|
||||
|
||||
struct m_can_ops *ops;
|
||||
|
||||
int version;
|
||||
|
@ -18,14 +18,9 @@
|
||||
|
||||
#define M_CAN_PCI_MMIO_BAR 0
|
||||
|
||||
#define M_CAN_CLOCK_FREQ_EHL 200000000
|
||||
#define CTL_CSR_INT_CTL_OFFSET 0x508
|
||||
|
||||
struct m_can_pci_config {
|
||||
const struct can_bittiming_const *bit_timing;
|
||||
const struct can_bittiming_const *data_timing;
|
||||
unsigned int clock_freq;
|
||||
};
|
||||
|
||||
struct m_can_pci_priv {
|
||||
struct m_can_classdev cdev;
|
||||
|
||||
@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
|
||||
.read_fifo = iomap_read_fifo,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 128,
|
||||
.sjw_max = 128,
|
||||
.brp_min = 1,
|
||||
.brp_max = 512,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 32,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct m_can_pci_config m_can_pci_ehl = {
|
||||
.bit_timing = &m_can_bittiming_const_ehl,
|
||||
.data_timing = &m_can_data_bittiming_const_ehl,
|
||||
.clock_freq = 200000000,
|
||||
};
|
||||
|
||||
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pci->dev;
|
||||
const struct m_can_pci_config *cfg;
|
||||
struct m_can_classdev *mcan_class;
|
||||
struct m_can_pci_priv *priv;
|
||||
void __iomem *base;
|
||||
@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
if (!mcan_class)
|
||||
return -ENOMEM;
|
||||
|
||||
cfg = (const struct m_can_pci_config *)id->driver_data;
|
||||
|
||||
priv = cdev_to_priv(mcan_class);
|
||||
|
||||
priv->base = base;
|
||||
@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
mcan_class->dev = &pci->dev;
|
||||
mcan_class->net->irq = pci_irq_vector(pci, 0);
|
||||
mcan_class->pm_clock_support = 1;
|
||||
mcan_class->bit_timing = cfg->bit_timing;
|
||||
mcan_class->data_timing = cfg->data_timing;
|
||||
mcan_class->can.clock.freq = cfg->clock_freq;
|
||||
mcan_class->can.clock.freq = id->driver_data;
|
||||
mcan_class->ops = &m_can_pci_ops;
|
||||
|
||||
pci_set_drvdata(pci, mcan_class);
|
||||
@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
|
||||
m_can_pci_suspend, m_can_pci_resume);
|
||||
|
||||
static const struct pci_device_id m_can_pci_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ } /* Terminating Entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
|
||||
|
@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
device_set_wakeup_capable(&pdev->dev, 1);
|
||||
|
||||
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
|
||||
if (IS_ERR(priv->wol_clk))
|
||||
return PTR_ERR(priv->wol_clk);
|
||||
if (IS_ERR(priv->wol_clk)) {
|
||||
ret = PTR_ERR(priv->wol_clk);
|
||||
goto err_deregister_fixed_link;
|
||||
}
|
||||
|
||||
/* Set the needed headroom once and for all */
|
||||
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
|
||||
|
@ -1219,7 +1219,6 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
queue->rx_prepared_head++;
|
||||
desc = macb_rx_desc(queue, entry);
|
||||
|
||||
if (!queue->rx_skbuff[entry]) {
|
||||
@ -1258,6 +1257,7 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
dma_wmb();
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
}
|
||||
queue->rx_prepared_head++;
|
||||
}
|
||||
|
||||
/* Make descriptor updates visible to hardware */
|
||||
|
@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
||||
/* AST2400 doesn't have working HW checksum generation */
|
||||
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
|
||||
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
/* AST2600 tx checksum with NCSI is broken */
|
||||
if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
|
||||
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
if (np && of_get_property(np, "no-hw-checksum", NULL))
|
||||
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
@ -3043,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[i];
|
||||
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_setting;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_setting;
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_settings;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_settings;
|
||||
coalesce[i].intrl = q_vector->intrl;
|
||||
|
||||
if (i < vsi->num_txq)
|
||||
@ -3100,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
*/
|
||||
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[i].itr_rx;
|
||||
rc->itr_settings = coalesce[i].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_rxq) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[i].itr_tx;
|
||||
rc->itr_settings = coalesce[i].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_txq) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
@ -3128,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
for (; i < vsi->num_q_vectors; i++) {
|
||||
/* transmit */
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
/* receive */
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
|
||||
|
@ -6172,9 +6172,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
|
||||
ice_ptp_link_change(pf, pf->hw.pf_id, true);
|
||||
}
|
||||
|
||||
/* clear this now, and the first stats read will be used as baseline */
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
/* Perform an initial read of the statistics registers now to
|
||||
* set the baseline so counters are ready when interface is up
|
||||
*/
|
||||
ice_update_eth_stats(vsi);
|
||||
ice_service_task_schedule(pf);
|
||||
|
||||
return 0;
|
||||
|
@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
|
||||
* This function must be called periodically to ensure that the cached value
|
||||
* is never more than 2 seconds old. It must also be called whenever the PHC
|
||||
* time has been changed.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - OK, successfully updated
|
||||
* * -EAGAIN - PF was busy, need to reschedule the update
|
||||
*/
|
||||
static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
{
|
||||
u64 systime;
|
||||
int i;
|
||||
|
||||
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Read the current PHC time */
|
||||
systime = ice_ptp_read_src_clk_reg(pf, NULL);
|
||||
|
||||
@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
|
||||
}
|
||||
}
|
||||
clear_bit(ICE_CFG_BUSY, pf->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2330,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
|
||||
{
|
||||
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
|
||||
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
|
||||
int err;
|
||||
|
||||
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||
return;
|
||||
|
||||
ice_ptp_update_cached_phctime(pf);
|
||||
err = ice_ptp_update_cached_phctime(pf);
|
||||
|
||||
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
|
||||
|
||||
/* Run twice a second */
|
||||
/* Run twice a second or reschedule if phc update failed */
|
||||
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
|
||||
msecs_to_jiffies(500));
|
||||
msecs_to_jiffies(err ? 10 : 500));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -384,9 +384,14 @@ struct ice_ring_container {
|
||||
/* this matches the maximum number of ITR bits, but in usec
|
||||
* values, so it is shifted left one bit (bit zero is ignored)
|
||||
*/
|
||||
u16 itr_setting:13;
|
||||
u16 itr_reserved:2;
|
||||
u16 itr_mode:1;
|
||||
union {
|
||||
struct {
|
||||
u16 itr_setting:13;
|
||||
u16 itr_reserved:2;
|
||||
u16 itr_mode:1;
|
||||
};
|
||||
u16 itr_settings;
|
||||
};
|
||||
enum ice_container_type type;
|
||||
};
|
||||
|
||||
|
@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->link_speed != SPEED_1000)
|
||||
if (adapter->link_speed != SPEED_1000 ||
|
||||
!hw->phy.ops.read_reg)
|
||||
goto no_wait;
|
||||
|
||||
/* wait for Remote receiver status OK */
|
||||
|
@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
|
||||
};
|
||||
|
||||
struct mlx5_ct_fs_smfs_matchers {
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
|
||||
struct list_head used;
|
||||
};
|
||||
|
||||
@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
|
||||
};
|
||||
|
||||
static inline void
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
|
||||
bool gre)
|
||||
{
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
|
||||
|
||||
@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
|
||||
ntohs(MLX5_CT_TCP_FLAGS_MASK));
|
||||
} else {
|
||||
} else if (!gre) {
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
|
||||
}
|
||||
@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
||||
|
||||
static struct mlx5dr_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
|
||||
bool tcp, u32 priority)
|
||||
bool tcp, bool gre, u32 priority)
|
||||
{
|
||||
struct mlx5dr_matcher *dr_matcher;
|
||||
struct mlx5_flow_spec *spec;
|
||||
@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
||||
if (!spec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
|
||||
|
||||
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
|
||||
@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
||||
}
|
||||
|
||||
static struct mlx5_ct_fs_smfs_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
|
||||
{
|
||||
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
|
||||
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
|
||||
@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
||||
int prio;
|
||||
|
||||
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
|
||||
|
||||
if (refcount_inc_not_zero(&smfs_matcher->ref))
|
||||
return smfs_matcher;
|
||||
@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
||||
}
|
||||
|
||||
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
|
||||
if (IS_ERR(dr_matcher)) {
|
||||
netdev_warn(fs->netdev,
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
|
||||
nat, ipv4, tcp, PTR_ERR(dr_matcher));
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
|
||||
nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
|
||||
|
||||
smfs_matcher = ERR_CAST(dr_matcher);
|
||||
goto out_unlock;
|
||||
@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
|
||||
static inline bool
|
||||
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
|
||||
{
|
||||
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
|
||||
DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
|
||||
#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
|
||||
|
||||
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
|
||||
used_keys == ipv6_udp);
|
||||
used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
|
||||
flow_rule_match_control(flow_rule, &control);
|
||||
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
|
||||
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
if (basic.key->ip_proto != IPPROTO_GRE)
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
if (basic.key->ip_proto == IPPROTO_TCP)
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
|
||||
if (basic.mask->n_proto != htons(0xFFFF) ||
|
||||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
|
||||
basic.mask->ip_proto != 0xFF ||
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
|
||||
basic.key->ip_proto != IPPROTO_GRE)) {
|
||||
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
|
||||
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
|
||||
basic.key->ip_proto, basic.mask->ip_proto);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
|
||||
if (basic.key->ip_proto != IPPROTO_GRE &&
|
||||
(ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
|
||||
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
|
||||
ports.mask->src, ports.mask->dst);
|
||||
return false;
|
||||
@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
||||
struct mlx5dr_action *actions[5];
|
||||
struct mlx5dr_rule *rule;
|
||||
int num_actions = 0, err;
|
||||
bool nat, tcp, ipv4;
|
||||
bool nat, tcp, ipv4, gre;
|
||||
|
||||
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
||||
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
|
||||
tcp = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_TCP;
|
||||
gre = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_GRE;
|
||||
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
|
||||
if (IS_ERR(smfs_matcher)) {
|
||||
err = PTR_ERR(smfs_matcher);
|
||||
goto err_matcher;
|
||||
}
|
||||
|
||||
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
|
||||
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
|
||||
spec->flow_context.flow_source);
|
||||
if (!rule) {
|
||||
err = -EINVAL;
|
||||
goto err_create;
|
||||
|
@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
|
||||
bool busy = false;
|
||||
int work_done = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ch_stats->poll++;
|
||||
|
||||
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
|
||||
busy |= work_done == budget;
|
||||
busy |= rq->post_wqes(rq);
|
||||
|
||||
if (busy)
|
||||
return budget;
|
||||
if (busy) {
|
||||
work_done = budget;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!napi_complete_done(napi, work_done)))
|
||||
return work_done;
|
||||
goto out;
|
||||
|
||||
mlx5e_cq_arm(&rq->cq);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
|
||||
if (netdev->features & NETIF_F_NTUPLE)
|
||||
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
if (netdev->features & NETIF_F_GRO_HW)
|
||||
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
if (params->xdp_prog) {
|
||||
if (features & NETIF_F_LRO) {
|
||||
netdev_warn(netdev, "LRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_LRO;
|
||||
}
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->xsk.refcnt) {
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
|
||||
priv->xsk.refcnt);
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
features &= ~NETIF_F_RXHASH;
|
||||
if (netdev->features & NETIF_F_RXHASH)
|
||||
@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
if (!!MLX5_CAP_GEN(mdev, shampo) &&
|
||||
mlx5e_check_fragmented_striding_rq_cap(mdev))
|
||||
netdev->hw_features |= NETIF_F_GRO_HW;
|
||||
|
||||
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
|
||||
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_TSO;
|
||||
|
@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
|
||||
clean_tree(&root_ns->ns.node);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
kfree(steering);
|
||||
}
|
||||
|
||||
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
@ -3086,43 +3064,28 @@ static int init_egress_root_ns(struct mlx5_flow_steering *steering)
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
|
||||
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
|
||||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
|
||||
@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
kfree(steering);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_fs_core_free(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
|
||||
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
|
||||
enum mlx5_flow_steering_mode mode);
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
|
||||
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
|
@ -8,7 +8,8 @@
|
||||
enum {
|
||||
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
|
||||
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP,
|
||||
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
|
||||
};
|
||||
|
||||
struct mlx5_fw_reset {
|
||||
@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
|
||||
|
||||
if (fatal_error) {
|
||||
mlx5_core_warn(dev, "Got Device Reset\n");
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
else
|
||||
mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
|
||||
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
break;
|
||||
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
|
||||
mlx5_sync_reset_events_handle(fw_reset, eqe);
|
||||
@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
|
||||
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
|
||||
}
|
||||
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||
|
||||
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
|
||||
cancel_work_sync(&fw_reset->fw_live_patch_work);
|
||||
cancel_work_sync(&fw_reset->reset_request_work);
|
||||
cancel_work_sync(&fw_reset->reset_reload_work);
|
||||
cancel_work_sync(&fw_reset->reset_now_work);
|
||||
cancel_work_sync(&fw_reset->reset_abort_work);
|
||||
}
|
||||
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
|
||||
|
@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
|
@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
goto err_sf_table_cleanup;
|
||||
}
|
||||
|
||||
err = mlx5_fs_core_alloc(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to alloc flow steering\n");
|
||||
goto err_fs;
|
||||
}
|
||||
|
||||
dev->dm = mlx5_dm_create(dev);
|
||||
if (IS_ERR(dev->dm))
|
||||
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
|
||||
@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_fs:
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
err_sf_table_cleanup:
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
err_sf_hw_table_cleanup:
|
||||
@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
||||
mlx5_hv_vhca_destroy(dev->hv_vhca);
|
||||
mlx5_fw_tracer_destroy(dev->tracer);
|
||||
mlx5_dm_cleanup(dev);
|
||||
mlx5_fs_core_free(dev);
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
mlx5_vhca_event_cleanup(dev);
|
||||
@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
goto err_tls_start;
|
||||
}
|
||||
|
||||
err = mlx5_init_fs(dev);
|
||||
err = mlx5_fs_core_init(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to init flow steering\n");
|
||||
goto err_fs;
|
||||
@ -1236,7 +1245,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
err_vhca:
|
||||
mlx5_vhca_event_stop(dev);
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
err_tls_start:
|
||||
@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
||||
mlx5_ec_cleanup(dev);
|
||||
mlx5_sf_hw_table_destroy(dev);
|
||||
mlx5_vhca_event_stop(dev);
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev)
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
|
||||
* fw_reset before unregistering the devlink.
|
||||
*/
|
||||
mlx5_drain_fw_reset(dev);
|
||||
devlink_unregister(devlink);
|
||||
mlx5_sriov_disable(pdev);
|
||||
mlx5_crdump_disable(dev);
|
||||
|
@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
bool rx_rule,
|
||||
bool *recalc_cs_required)
|
||||
{
|
||||
*recalc_cs_required = false;
|
||||
|
||||
/* if device supports csum recalculation - no adjustment needed */
|
||||
if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
|
||||
return;
|
||||
|
||||
/* no adjustment needed on TX rules */
|
||||
if (!rx_rule)
|
||||
return;
|
||||
|
||||
if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
|
||||
/* Ignore the modify TTL action.
|
||||
* It is always kept as last HW action.
|
||||
*/
|
||||
attr->modify_actions--;
|
||||
return;
|
||||
}
|
||||
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
|
||||
/* Due to a HW bug on some devices, modifying TTL on RX flows
|
||||
* will cause an incorrect checksum calculation. In such cases
|
||||
* we will use a FW table to recalculate the checksum.
|
||||
*/
|
||||
*recalc_cs_required = true;
|
||||
}
|
||||
|
||||
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action *actions[],
|
||||
int last_idx)
|
||||
@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
||||
case DR_ACTION_TYP_MODIFY_HDR:
|
||||
attr.modify_index = action->rewrite->index;
|
||||
attr.modify_actions = action->rewrite->num_of_actions;
|
||||
recalc_cs_required = action->rewrite->modify_ttl &&
|
||||
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
|
||||
if (action->rewrite->modify_ttl)
|
||||
dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
|
||||
&recalc_cs_required);
|
||||
break;
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L2:
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L3:
|
||||
@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
||||
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
|
||||
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
|
||||
|
||||
/* Due to a HW bug in some devices, modifying TTL on RX flows will
|
||||
* cause an incorrect checksum calculation. In this case we will
|
||||
* use a FW table to recalculate.
|
||||
*/
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
|
||||
rx_rule && recalc_cs_required && dest_action) {
|
||||
if (recalc_cs_required && dest_action) {
|
||||
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn,
|
||||
@ -842,7 +869,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
|
||||
struct mlx5dr_action **ref_actions;
|
||||
@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
reformat_req,
|
||||
&action->dest_tbl->fw_tbl.id,
|
||||
&action->dest_tbl->fw_tbl.group_id,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (ret)
|
||||
goto free_action;
|
||||
|
||||
@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
|
||||
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
|
||||
}
|
||||
|
||||
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
|
||||
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
|
||||
}
|
||||
|
||||
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
u32 max_hw_actions,
|
||||
u32 num_sw_actions,
|
||||
@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
|
||||
struct mlx5dr_domain *dmn = action->rewrite->dmn;
|
||||
__be64 *modify_ttl_sw_action = NULL;
|
||||
int ret, i, hw_idx = 0;
|
||||
__be64 *sw_action;
|
||||
__be64 hw_action;
|
||||
@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
action->rewrite->allow_rx = 1;
|
||||
action->rewrite->allow_tx = 1;
|
||||
|
||||
for (i = 0; i < num_sw_actions; i++) {
|
||||
sw_action = &sw_actions[i];
|
||||
for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
|
||||
/* modify TTL is handled separately, as a last action */
|
||||
if (i == num_sw_actions) {
|
||||
sw_action = modify_ttl_sw_action;
|
||||
modify_ttl_sw_action = NULL;
|
||||
} else {
|
||||
sw_action = &sw_actions[i];
|
||||
}
|
||||
|
||||
ret = dr_action_modify_check_field_limitation(action,
|
||||
sw_action);
|
||||
@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
|
||||
if (!(*modify_ttl) &&
|
||||
dr_action_modify_check_is_ttl_modify(sw_action)) {
|
||||
if (dr_action_modify_ttl_ignore(dmn))
|
||||
continue;
|
||||
|
||||
modify_ttl_sw_action = sw_action;
|
||||
*modify_ttl = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Convert SW action to HW action */
|
||||
|
@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
|
||||
struct mlx5dr_cmd_fte_info fte_info = {};
|
||||
@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
fte_info.val = val;
|
||||
fte_info.dest_arr = dest;
|
||||
fte_info.ignore_flow_level = ignore_flow_level;
|
||||
fte_info.flow_context.flow_source = flow_source;
|
||||
|
||||
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
|
||||
if (ret) {
|
||||
|
@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
|
||||
* encapsulation. The reason for that is that we support
|
||||
* modify headers for outer headers only
|
||||
*/
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
|
||||
dr_ste_v0_set_rewrite_actions(last_ste,
|
||||
attr->modify_actions,
|
||||
@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
|
||||
}
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
|
||||
dr_ste_v0_arr_init_next(&last_ste,
|
||||
added_stes,
|
||||
|
@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
|
||||
u32 group_id);
|
||||
#endif /* _DR_TYPES_H_ */
|
||||
|
@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
} else if (num_term_actions > 1) {
|
||||
bool ignore_flow_level =
|
||||
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
|
||||
u32 flow_source = fte->flow_context.flow_source;
|
||||
|
||||
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
|
||||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
||||
@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
|
||||
term_actions,
|
||||
num_term_actions,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (!tmp_action) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto free_actions;
|
||||
|
@ -99,7 +99,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
|
||||
struct mlx5dr_action *mlx5dr_action_create_drop(void);
|
||||
|
||||
|
@ -103,6 +103,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool lan966x_port_unique_address(struct net_device *dev)
|
||||
{
|
||||
struct lan966x_port *port = netdev_priv(dev);
|
||||
struct lan966x *lan966x = port->lan966x;
|
||||
int p;
|
||||
|
||||
for (p = 0; p < lan966x->num_phys_ports; ++p) {
|
||||
port = lan966x->ports[p];
|
||||
if (!port || port->dev == dev)
|
||||
continue;
|
||||
|
||||
if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
|
||||
{
|
||||
struct lan966x_port *port = netdev_priv(dev);
|
||||
@ -110,16 +128,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
|
||||
const struct sockaddr *addr = p;
|
||||
int ret;
|
||||
|
||||
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
|
||||
return 0;
|
||||
|
||||
/* Learn the new net device MAC address in the mac table. */
|
||||
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If there is another port with the same address as the dev, then don't
|
||||
* delete it from the MAC table
|
||||
*/
|
||||
if (!lan966x_port_unique_address(dev))
|
||||
goto out;
|
||||
|
||||
/* Then forget the previous one. */
|
||||
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
out:
|
||||
eth_hw_addr_set(dev, addr->sa_data);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
|
||||
qdev->mem_map_registers;
|
||||
unsigned long hw_flags;
|
||||
|
||||
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
|
||||
if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
|
||||
test_bit(QL_RESET_START, &qdev->flags)) {
|
||||
clear_bit(QL_LINK_MASTER, &qdev->flags);
|
||||
|
||||
/*
|
||||
|
@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
struct gsi_event *event_done;
|
||||
struct gsi_event *event;
|
||||
struct gsi_trans *trans;
|
||||
u32 trans_count = 0;
|
||||
u32 byte_count = 0;
|
||||
u32 old_index;
|
||||
u32 event_avail;
|
||||
u32 old_index;
|
||||
|
||||
trans_info = &channel->trans_info;
|
||||
|
||||
@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
do {
|
||||
trans->len = __le16_to_cpu(event->len);
|
||||
byte_count += trans->len;
|
||||
trans_count++;
|
||||
|
||||
/* Move on to the next event and transaction */
|
||||
if (--event_avail)
|
||||
@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
|
||||
/* We record RX bytes when they are received */
|
||||
channel->byte_count += byte_count;
|
||||
channel->trans_count++;
|
||||
channel->trans_count += trans_count;
|
||||
}
|
||||
|
||||
/* Initialize a ring, including allocating DMA memory for its entries */
|
||||
|
@ -1150,13 +1150,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
|
||||
return;
|
||||
|
||||
skb = __dev_alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
/* Copy the data into the socket buffer and receive it */
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
if (skb) {
|
||||
/* Copy the data into the socket buffer and receive it */
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
}
|
||||
|
||||
ipa_modem_skb_rx(endpoint->netdev, skb);
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
|
||||
*/
|
||||
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
|
||||
{
|
||||
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
|
||||
struct ipa *ipa;
|
||||
int ret;
|
||||
|
||||
/* We aren't ready until the modem and microcontroller are */
|
||||
|
@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
|
||||
path->encap.proto = htons(ETH_P_PPP_SES);
|
||||
path->encap.id = be16_to_cpu(po->num);
|
||||
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
|
||||
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
|
||||
path->dev = ctx->dev;
|
||||
ctx->dev = dev;
|
||||
|
||||
|
@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
dev_kfree_skb_any(rbi->skb);
|
||||
rbi->skb = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
put_page(rbi->page);
|
||||
rbi->page = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
||||
u32 i, ring_idx;
|
||||
struct Vmxnet3_RxDesc *rxd;
|
||||
|
||||
/* ring has already been cleaned up */
|
||||
if (!rq->rx_ring[0].base)
|
||||
return;
|
||||
|
||||
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
||||
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
|
@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
|
||||
{
|
||||
struct pn533_cmd *cmd, *n;
|
||||
|
||||
/* delete the timer before cleanup the worker */
|
||||
del_timer_sync(&priv->listen_timer);
|
||||
|
||||
flush_delayed_work(&priv->poll_work);
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
skb_queue_purge(&priv->resp_q);
|
||||
|
||||
del_timer(&priv->listen_timer);
|
||||
|
||||
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
|
||||
list_del(&cmd->queue);
|
||||
kfree(cmd);
|
||||
|
@ -1236,18 +1236,17 @@ FUNC_GROUP_DECL(SALT8, AA12);
|
||||
FUNC_GROUP_DECL(WDTRST4, AA12);
|
||||
|
||||
#define AE12 196
|
||||
SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
|
||||
SIG_DESC_SET(SCU438, 4));
|
||||
SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4));
|
||||
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
|
||||
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
|
||||
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
|
||||
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
|
||||
|
||||
#define AF12 197
|
||||
SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
|
||||
SIG_DESC_SET(SCU438, 5));
|
||||
SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5));
|
||||
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
|
||||
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
|
||||
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
|
||||
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
|
||||
FUNC_GROUP_DECL(FWQSPI, AE12, AF12);
|
||||
|
||||
#define AC12 198
|
||||
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
|
||||
@ -1520,9 +1519,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
|
||||
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
|
||||
|
||||
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
|
||||
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
|
||||
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
|
||||
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
|
||||
FUNC_DECL_1(FWSPID, FWSPID);
|
||||
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
|
||||
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
|
||||
/*
|
||||
@ -1918,7 +1916,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
|
||||
ASPEED_PINCTRL_GROUP(FSI2),
|
||||
ASPEED_PINCTRL_GROUP(FWSPIABR),
|
||||
ASPEED_PINCTRL_GROUP(FWSPID),
|
||||
ASPEED_PINCTRL_GROUP(FWQSPID),
|
||||
ASPEED_PINCTRL_GROUP(FWQSPI),
|
||||
ASPEED_PINCTRL_GROUP(FWSPIWP),
|
||||
ASPEED_PINCTRL_GROUP(GPIT0),
|
||||
ASPEED_PINCTRL_GROUP(GPIT1),
|
||||
@ -2160,6 +2158,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
|
||||
ASPEED_PINCTRL_FUNC(FSI2),
|
||||
ASPEED_PINCTRL_FUNC(FWSPIABR),
|
||||
ASPEED_PINCTRL_FUNC(FWSPID),
|
||||
ASPEED_PINCTRL_FUNC(FWQSPI),
|
||||
ASPEED_PINCTRL_FUNC(FWSPIWP),
|
||||
ASPEED_PINCTRL_FUNC(GPIT0),
|
||||
ASPEED_PINCTRL_FUNC(GPIT1),
|
||||
|
@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
|
||||
MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
|
||||
MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
|
||||
MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
|
||||
MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
|
||||
MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
|
||||
MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
|
||||
MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
|
||||
MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
|
||||
|
@ -129,6 +129,7 @@ enum {
|
||||
FUNC_PTP1,
|
||||
FUNC_PTP2,
|
||||
FUNC_PTP3,
|
||||
FUNC_PTPSYNC_0,
|
||||
FUNC_PTPSYNC_1,
|
||||
FUNC_PTPSYNC_2,
|
||||
FUNC_PTPSYNC_3,
|
||||
@ -252,6 +253,7 @@ static const char *const ocelot_function_names[] = {
|
||||
[FUNC_PTP1] = "ptp1",
|
||||
[FUNC_PTP2] = "ptp2",
|
||||
[FUNC_PTP3] = "ptp3",
|
||||
[FUNC_PTPSYNC_0] = "ptpsync_0",
|
||||
[FUNC_PTPSYNC_1] = "ptpsync_1",
|
||||
[FUNC_PTPSYNC_2] = "ptpsync_2",
|
||||
[FUNC_PTPSYNC_3] = "ptpsync_3",
|
||||
@ -983,7 +985,7 @@ LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NON
|
||||
LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
|
||||
LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
|
||||
LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
|
||||
LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
|
||||
LAN966X_P(35, GPIO, FC1_b, PTPSYNC_0, SGPIO_a, CAN0_b, NONE, NONE, R);
|
||||
LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
|
||||
LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
|
||||
LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
|
||||
|
@ -51,7 +51,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
|
||||
SUNXI_FUNCTION(0x3, "pwm0"), /* PWM0 */
|
||||
SUNXI_FUNCTION(0x4, "i2s"), /* IN */
|
||||
SUNXI_FUNCTION(0x5, "uart1"), /* RX */
|
||||
SUNXI_FUNCTION(0x6, "spi1")), /* MOSI */
|
||||
SUNXI_FUNCTION(0x6, "spi1")), /* CLK */
|
||||
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
|
||||
SUNXI_FUNCTION(0x0, "gpio_in"),
|
||||
SUNXI_FUNCTION(0x1, "gpio_out"),
|
||||
@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
|
||||
SUNXI_FUNCTION(0x0, "gpio_in"),
|
||||
SUNXI_FUNCTION(0x1, "gpio_out"),
|
||||
SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
|
||||
SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
|
||||
SUNXI_FUNCTION(0x3, "uart2"), /* RX */
|
||||
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
|
||||
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
|
||||
SUNXI_FUNCTION(0x0, "gpio_in"),
|
||||
|
@ -300,7 +300,7 @@ struct ptp_ocp {
|
||||
struct platform_device *spi_flash;
|
||||
struct clk_hw *i2c_clk;
|
||||
struct timer_list watchdog;
|
||||
const struct ocp_attr_group *attr_tbl;
|
||||
const struct attribute_group **attr_group;
|
||||
const struct ptp_ocp_eeprom_map *eeprom_map;
|
||||
struct dentry *debug_root;
|
||||
time64_t gnss_lost;
|
||||
@ -841,7 +841,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
|
||||
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
@ -850,7 +850,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
|
||||
if (likely(!err)) {
|
||||
timespec64_add_ns(&ts, delta_ns);
|
||||
set_normalized_timespec64(&ts, ts.tv_sec,
|
||||
ts.tv_nsec + delta_ns);
|
||||
__ptp_ocp_settime_locked(bp, &ts);
|
||||
}
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
@ -1835,6 +1836,42 @@ ptp_ocp_signal_init(struct ptp_ocp *bp)
|
||||
bp->signal_out[i]->mem);
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_attr_group_del(struct ptp_ocp *bp)
|
||||
{
|
||||
sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
|
||||
kfree(bp->attr_group);
|
||||
}
|
||||
|
||||
static int
|
||||
ptp_ocp_attr_group_add(struct ptp_ocp *bp,
|
||||
const struct ocp_attr_group *attr_tbl)
|
||||
{
|
||||
int count, i;
|
||||
int err;
|
||||
|
||||
count = 0;
|
||||
for (i = 0; attr_tbl[i].cap; i++)
|
||||
if (attr_tbl[i].cap & bp->fw_cap)
|
||||
count++;
|
||||
|
||||
bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
|
||||
GFP_KERNEL);
|
||||
if (!bp->attr_group)
|
||||
return -ENOMEM;
|
||||
|
||||
count = 0;
|
||||
for (i = 0; attr_tbl[i].cap; i++)
|
||||
if (attr_tbl[i].cap & bp->fw_cap)
|
||||
bp->attr_group[count++] = attr_tbl[i].group;
|
||||
|
||||
err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
|
||||
if (err)
|
||||
bp->attr_group[0] = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_sma_init(struct ptp_ocp *bp)
|
||||
{
|
||||
@ -1904,7 +1941,6 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
bp->flash_start = 1024 * 4096;
|
||||
bp->eeprom_map = fb_eeprom_map;
|
||||
bp->fw_version = ioread32(&bp->image->version);
|
||||
bp->attr_tbl = fb_timecard_groups;
|
||||
bp->fw_cap = OCP_CAP_BASIC;
|
||||
|
||||
ver = bp->fw_version & 0xffff;
|
||||
@ -1918,6 +1954,10 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
ptp_ocp_sma_init(bp);
|
||||
ptp_ocp_signal_init(bp);
|
||||
|
||||
err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ptp_ocp_fb_set_pins(bp);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3388,7 +3428,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
|
||||
{
|
||||
struct pps_device *pps;
|
||||
char buf[32];
|
||||
int i, err;
|
||||
|
||||
if (bp->gnss_port != -1) {
|
||||
sprintf(buf, "ttyS%d", bp->gnss_port);
|
||||
@ -3413,14 +3452,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
|
||||
if (pps)
|
||||
ptp_ocp_symlink(bp, pps->dev, "pps");
|
||||
|
||||
for (i = 0; bp->attr_tbl[i].cap; i++) {
|
||||
if (!(bp->attr_tbl[i].cap & bp->fw_cap))
|
||||
continue;
|
||||
err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
ptp_ocp_debugfs_add_device(bp);
|
||||
|
||||
return 0;
|
||||
@ -3492,15 +3523,11 @@ static void
|
||||
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
|
||||
{
|
||||
struct device *dev = &bp->dev;
|
||||
int i;
|
||||
|
||||
sysfs_remove_link(&dev->kobj, "ttyGNSS");
|
||||
sysfs_remove_link(&dev->kobj, "ttyMAC");
|
||||
sysfs_remove_link(&dev->kobj, "ptp");
|
||||
sysfs_remove_link(&dev->kobj, "pps");
|
||||
if (bp->attr_tbl)
|
||||
for (i = 0; bp->attr_tbl[i].cap; i++)
|
||||
sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3510,6 +3537,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
|
||||
|
||||
ptp_ocp_debugfs_remove_device(bp);
|
||||
ptp_ocp_detach_sysfs(bp);
|
||||
ptp_ocp_attr_group_del(bp);
|
||||
if (timer_pending(&bp->watchdog))
|
||||
del_timer_sync(&bp->watchdog);
|
||||
if (bp->ts0)
|
||||
|
@ -370,6 +370,23 @@ CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
|
||||
CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
|
||||
sun8i_h3_rtc_clk_init);
|
||||
|
||||
static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
|
||||
.rc_osc_rate = 16000000,
|
||||
.fixed_prescaler = 32,
|
||||
.has_prescaler = 1,
|
||||
.has_out_clk = 1,
|
||||
.export_iosc = 1,
|
||||
.has_losc_en = 1,
|
||||
.has_auto_swt = 1,
|
||||
};
|
||||
|
||||
static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
|
||||
{
|
||||
sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
|
||||
}
|
||||
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
|
||||
sun50i_h6_rtc_clk_init);
|
||||
|
||||
/*
|
||||
* The R40 user manual is self-conflicting on whether the prescaler is
|
||||
* fixed or configurable. The clock diagram shows it as fixed, but there
|
||||
|
@ -1254,6 +1254,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
|
||||
int data_seg_len;
|
||||
|
||||
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
|
||||
& MASK_RSP_UPIU_DATA_SEG_LEN;
|
||||
|
||||
/* If data segment length is zero, rsp_field is not valid */
|
||||
if (!data_seg_len)
|
||||
return;
|
||||
|
||||
if (unlikely(lrbp->lun != rsp_field->lun)) {
|
||||
struct scsi_device *sdev;
|
||||
bool found = false;
|
||||
@ -1288,18 +1295,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
return;
|
||||
}
|
||||
|
||||
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
|
||||
& MASK_RSP_UPIU_DATA_SEG_LEN;
|
||||
|
||||
/* To flush remained rsp_list, we queue the map_work task */
|
||||
if (!data_seg_len) {
|
||||
if (!ufshpb_is_general_lun(hpb->lun))
|
||||
return;
|
||||
|
||||
ufshpb_kick_map_work(hpb);
|
||||
return;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
|
||||
|
||||
if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
|
||||
|
@ -3596,10 +3596,7 @@ static int iscsit_send_reject(
|
||||
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
|
||||
{
|
||||
int ord, cpu;
|
||||
cpumask_t conn_allowed_cpumask;
|
||||
|
||||
cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
|
||||
cpu_online_mask);
|
||||
cpumask_var_t conn_allowed_cpumask;
|
||||
|
||||
/*
|
||||
* bitmap_id is assigned from iscsit_global->ts_bitmap from
|
||||
@ -3609,13 +3606,28 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
|
||||
* iSCSI connection's RX/TX threads will be scheduled to
|
||||
* execute upon.
|
||||
*/
|
||||
cpumask_clear(conn->conn_cpumask);
|
||||
ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
|
||||
for_each_cpu(cpu, &conn_allowed_cpumask) {
|
||||
if (ord-- == 0) {
|
||||
cpumask_set_cpu(cpu, conn->conn_cpumask);
|
||||
return;
|
||||
if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
|
||||
ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
|
||||
for_each_online_cpu(cpu) {
|
||||
if (ord-- == 0) {
|
||||
cpumask_set_cpu(cpu, conn->conn_cpumask);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
|
||||
cpu_online_mask);
|
||||
|
||||
cpumask_clear(conn->conn_cpumask);
|
||||
ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
|
||||
for_each_cpu(cpu, conn_allowed_cpumask) {
|
||||
if (ord-- == 0) {
|
||||
cpumask_set_cpu(cpu, conn->conn_cpumask);
|
||||
free_cpumask_var(conn_allowed_cpumask);
|
||||
return;
|
||||
}
|
||||
}
|
||||
free_cpumask_var(conn_allowed_cpumask);
|
||||
}
|
||||
/*
|
||||
* This should never be reached..
|
||||
|
@ -1137,23 +1137,27 @@ static ssize_t lio_target_wwn_cpus_allowed_list_show(
|
||||
static ssize_t lio_target_wwn_cpus_allowed_list_store(
|
||||
struct config_item *item, const char *page, size_t count)
|
||||
{
|
||||
int ret;
|
||||
int ret = -ENOMEM;
|
||||
char *orig;
|
||||
cpumask_t new_allowed_cpumask;
|
||||
cpumask_var_t new_allowed_cpumask;
|
||||
|
||||
if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL))
|
||||
goto out;
|
||||
|
||||
orig = kstrdup(page, GFP_KERNEL);
|
||||
if (!orig)
|
||||
return -ENOMEM;
|
||||
goto out_free_cpumask;
|
||||
|
||||
cpumask_clear(&new_allowed_cpumask);
|
||||
ret = cpulist_parse(orig, &new_allowed_cpumask);
|
||||
ret = cpulist_parse(orig, new_allowed_cpumask);
|
||||
if (!ret)
|
||||
cpumask_copy(iscsit_global->allowed_cpumask,
|
||||
new_allowed_cpumask);
|
||||
|
||||
kfree(orig);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
|
||||
return count;
|
||||
out_free_cpumask:
|
||||
free_cpumask_var(new_allowed_cpumask);
|
||||
out:
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
|
||||
|
@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
|
||||
struct mlx5_flow_handle *rx_rule_mcast;
|
||||
bool setup;
|
||||
u32 cur_num_vqs;
|
||||
u32 rqt_size;
|
||||
struct notifier_block nb;
|
||||
struct vdpa_callback config_cb;
|
||||
struct mlx5_vdpa_wq_ent cvq_ent;
|
||||
@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
|
||||
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
|
||||
}
|
||||
|
||||
static inline u32 mlx5_vdpa_max_qps(int max_vqs)
|
||||
{
|
||||
return max_vqs / 2;
|
||||
}
|
||||
|
||||
static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
|
||||
{
|
||||
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
|
||||
return 2;
|
||||
|
||||
return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
|
||||
return mvdev->max_vqs;
|
||||
}
|
||||
|
||||
static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
|
||||
@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
|
||||
static int create_rqt(struct mlx5_vdpa_net *ndev)
|
||||
{
|
||||
__be32 *list;
|
||||
int max_rqt;
|
||||
void *rqtc;
|
||||
int inlen;
|
||||
void *in;
|
||||
int i, j;
|
||||
int err;
|
||||
int num;
|
||||
|
||||
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
|
||||
num = 1;
|
||||
else
|
||||
num = ndev->cur_num_vqs / 2;
|
||||
|
||||
max_rqt = min_t(int, roundup_pow_of_two(num),
|
||||
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
|
||||
if (max_rqt < 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
|
||||
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
|
||||
|
||||
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
|
||||
MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
|
||||
MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
|
||||
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
|
||||
for (i = 0, j = 0; i < max_rqt; i++, j += 2)
|
||||
list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
|
||||
for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
|
||||
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
|
||||
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
|
||||
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
|
||||
kfree(in);
|
||||
if (err)
|
||||
@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
|
||||
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
|
||||
{
|
||||
__be32 *list;
|
||||
int max_rqt;
|
||||
void *rqtc;
|
||||
int inlen;
|
||||
void *in;
|
||||
int i, j;
|
||||
int err;
|
||||
|
||||
max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
|
||||
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
|
||||
if (max_rqt < 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
|
||||
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
|
||||
|
||||
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
|
||||
for (i = 0, j = 0; i < max_rqt; i++, j += 2)
|
||||
for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
|
||||
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
|
||||
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
|
||||
MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
|
||||
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
|
||||
kfree(in);
|
||||
if (err)
|
||||
@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
|
||||
|
||||
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
|
||||
if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
|
||||
newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
|
||||
newqps > ndev->rqt_size)
|
||||
break;
|
||||
|
||||
if (ndev->cur_num_vqs == 2 * newqps) {
|
||||
@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
|
||||
for (i = 0; i < mvdev->max_vqs; i++) {
|
||||
err = setup_vq(ndev, &ndev->vqs[i]);
|
||||
if (err)
|
||||
goto err_vq;
|
||||
@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
|
||||
|
||||
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
|
||||
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
|
||||
ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
|
||||
ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
|
||||
else
|
||||
ndev->cur_num_vqs = 2;
|
||||
ndev->rqt_size = 1;
|
||||
|
||||
ndev->cur_num_vqs = 2 * ndev->rqt_size;
|
||||
|
||||
update_cvq_info(mvdev);
|
||||
return err;
|
||||
@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
|
||||
struct mlx5_vdpa_virtqueue *mvq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
|
||||
for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
|
||||
mvq = &ndev->vqs[i];
|
||||
memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
|
||||
mvq->index = i;
|
||||
@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
|
||||
max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
|
||||
1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
|
||||
if (max_vqs < 2) {
|
||||
dev_warn(mdev->device,
|
||||
"%d virtqueues are supported. At least 2 are required\n",
|
||||
@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
|
||||
}
|
||||
|
||||
config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
|
||||
config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
|
||||
mvdev->vdev.dma_dev = &mdev->pdev->dev;
|
||||
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
|
||||
if (err)
|
||||
@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||
ndev->nb.notifier_call = event_handler;
|
||||
mlx5_notifier_register(mdev, &ndev->nb);
|
||||
mvdev->vdev.mdev = &mgtdev->mgtdev;
|
||||
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
|
||||
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
|
||||
if (err)
|
||||
goto err_reg;
|
||||
|
||||
|
@ -1450,13 +1450,9 @@ static struct socket *get_raw_socket(int fd)
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
static struct ptr_ring *get_tap_ptr_ring(int fd)
|
||||
static struct ptr_ring *get_tap_ptr_ring(struct file *file)
|
||||
{
|
||||
struct ptr_ring *ring;
|
||||
struct file *file = fget(fd);
|
||||
|
||||
if (!file)
|
||||
return NULL;
|
||||
ring = tun_get_tx_ring(file);
|
||||
if (!IS_ERR(ring))
|
||||
goto out;
|
||||
@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
|
||||
goto out;
|
||||
ring = NULL;
|
||||
out:
|
||||
fput(file);
|
||||
return ring;
|
||||
}
|
||||
|
||||
@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
||||
r = vhost_net_enable_vq(n, vq);
|
||||
if (r)
|
||||
goto err_used;
|
||||
if (index == VHOST_NET_VQ_RX)
|
||||
nvq->rx_ring = get_tap_ptr_ring(fd);
|
||||
if (index == VHOST_NET_VQ_RX) {
|
||||
if (sock)
|
||||
nvq->rx_ring = get_tap_ptr_ring(sock->file);
|
||||
else
|
||||
nvq->rx_ring = NULL;
|
||||
}
|
||||
|
||||
oldubufs = nvq->ubufs;
|
||||
nvq->ubufs = ubufs;
|
||||
|
@ -740,10 +740,22 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
struct afs_vnode *vnode = AFS_FS_I(inode);
|
||||
int seq = 0;
|
||||
struct key *key;
|
||||
int ret, seq = 0;
|
||||
|
||||
_enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
|
||||
|
||||
if (!(query_flags & AT_STATX_DONT_SYNC) &&
|
||||
!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
|
||||
key = afs_request_key(vnode->volume->cell);
|
||||
if (IS_ERR(key))
|
||||
return PTR_ERR(key);
|
||||
ret = afs_validate(vnode, key);
|
||||
key_put(key);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
@ -4481,6 +4481,9 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
__io_req_complete(req, issue_flags, ret, 0);
|
||||
/* put file to avoid an attempt to IOPOLL the req */
|
||||
io_put_file(req->file);
|
||||
req->file = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -339,7 +339,7 @@ static inline void audit_uring_entry(u8 op)
|
||||
}
|
||||
static inline void audit_uring_exit(int success, long code)
|
||||
{
|
||||
if (unlikely(!audit_dummy_context()))
|
||||
if (unlikely(audit_context()))
|
||||
__audit_uring_exit(success, code);
|
||||
}
|
||||
static inline void audit_syscall_entry(int major, unsigned long a0,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user