This is the 5.10.115 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmJ84ScACgkQONu9yGCS
 aT7E9hAAhsDVk1xE/vaWbEiMy5H+dWR6zoP57lHf2G/lKvkQ0dByZZTOyxlKSf+y
 6tZyysI3iXk8bGNDsRzHh0N4CpnjnRPug6JJlmeLTPTwY94xqKvspJhGubATjLSE
 QX11CRqFFE4gsJnlnGRh6LLnX0pTbqnvJ49TUf1uUbQ1qJE9l3eY+Jzac2m4N/k7
 f13075u8iZS3Qe6l4CcED07L+5Dg4ZvtzF68JFrT27jt25kk8kjZ+rXqP+AeHUd7
 THGTLvW5PH0GPG5oalb1ySDsuBcQJaCkteoAuHJ2HJN/HhFEyRLHfbSOwgmFzlny
 0uMkn2STJ/WlQzYjZ3RgmVvskkIEys2g0/WNS313Z5XHQo+Beb6TossMI8Ti/fBt
 W0c27iOxBHtK5PMm6G8/fznQmSOmBggWQE+VKCaPAWWxbKkghxa9FxeXd9QoLJgk
 V8VxlipblQzg7TZzhxXcBeXLRvltUVkUxWJZCuRGWcPy8jpTu1s9QmIT/EHPpcA8
 53uvkZGGFP7sQCL0h0c49cpMgFSQ6dLkEPDQ6C0A/Du1vy0b4cgdlEfwxY0nLHiL
 9s7gP215waUY37p+vp1aq/D+Dh1ayPObmERrkyU6/t0nmaqJZdg+Cr3CP0NFzITe
 KpHpcCwBxOj+PRQNlK7su3eCwEl5bbzusqrdt6NTbNQ9VmQHatY=
 =Rf1h
 -----END PGP SIGNATURE-----

Merge 5.10.115 into android12-5.10-lts

Changes in 5.10.115
	MIPS: Fix CP0 counter erratum detection for R4k CPUs
	parisc: Merge model and model name into one line in /proc/cpuinfo
	ALSA: hda/realtek: Add quirk for Yoga Duet 7 13ITL6 speakers
	ALSA: fireworks: fix wrong return count shorter than expected by 4 bytes
	mmc: sdhci-msm: Reset GCC_SDCC_BCR register for SDHC
	mmc: core: Set HS clock speed before sending HS CMD13
	gpiolib: of: fix bounds check for 'gpio-reserved-ranges'
	KVM: x86/svm: Account for family 17h event renumberings in amd_pmc_perf_hw_id
	iommu/vt-d: Calculate mask for non-aligned flushes
	drm/amd/display: Avoid reading audio pattern past AUDIO_CHANNELS_COUNT
	Revert "SUNRPC: attempt AF_LOCAL connect on setup"
	firewire: fix potential uaf in outbound_phy_packet_callback()
	firewire: remove check of list iterator against head past the loop body
	firewire: core: extend card->lock in fw_core_handle_bus_reset
	net: stmmac: disable Split Header (SPH) for Intel platforms
	genirq: Synchronize interrupt thread startup
	ASoC: da7219: Fix change notifications for tone generator frequency
	ASoC: wm8958: Fix change notifications for DSP controls
	ASoC: meson: Fix event generation for AUI ACODEC mux
	ASoC: meson: Fix event generation for G12A tohdmi mux
	ASoC: meson: Fix event generation for AUI CODEC mux
	s390/dasd: fix data corruption for ESE devices
	s390/dasd: prevent double format of tracks for ESE devices
	s390/dasd: Fix read for ESE with blksize < 4k
	s390/dasd: Fix read inconsistency for ESE DASD devices
	can: grcan: grcan_close(): fix deadlock
	can: isotp: remove re-binding of bound socket
	can: grcan: use ofdev->dev when allocating DMA memory
	can: grcan: grcan_probe(): fix broken system id check for errata workaround needs
	can: grcan: only use the NAPI poll budget for RX
	nfc: replace improper check device_is_registered() in netlink related functions
	nfc: nfcmrvl: main: reorder destructive operations in nfcmrvl_nci_unregister_dev to avoid bugs
	NFC: netlink: fix sleep in atomic bug when firmware download timeout
	gpio: pca953x: fix irq_stat not updated when irq is disabled (irq_mask not set)
	hwmon: (adt7470) Fix warning on module removal
	ASoC: dmaengine: Restore NULL prepare_slave_config() callback
	net/mlx5e: Fix trust state reset in reload
	net/mlx5e: Don't match double-vlan packets if cvlan is not set
	net/mlx5e: CT: Fix queued up restore put() executing after relevant ft release
	net/mlx5e: Fix the calling of update_buffer_lossy() API
	net/mlx5: Avoid double clear or set of sync reset requested
	selftests/seccomp: Don't call read() on TTY from background pgrp
	RDMA/siw: Fix a condition race issue in MPA request processing
	NFSv4: Don't invalidate inode attributes on delegation return
	net: ethernet: mediatek: add missing of_node_put() in mtk_sgmii_init()
	net: dsa: mt7530: add missing of_node_put() in mt7530_setup()
	net: stmmac: dwmac-sun8i: add missing of_node_put() in sun8i_dwmac_register_mdio_mux()
	net: cpsw: add missing of_node_put() in cpsw_probe_dt()
	net: emaclite: Add error handling for of_address_to_resource()
	hinic: fix bug of wq out of bound access
	selftests: mirror_gre_bridge_1q: Avoid changing PVID while interface is operational
	bnxt_en: Fix possible bnxt_open() failure caused by wrong RFS flag
	bnxt_en: Fix unnecessary dropping of RX packets
	selftests: ocelot: tc_flower_chains: specify conform-exceed action for policer
	smsc911x: allow using IRQ0
	btrfs: always log symlinks in full mode
	net: igmp: respect RCU rules in ip_mc_source() and ip_mc_msfilter()
	kvm: x86/cpuid: Only provide CPUID leaf 0xA if host has architectural PMU
	net/mlx5: Fix slab-out-of-bounds while reading resource dump menu
	x86/kvm: Preserve BSP MSR_KVM_POLL_CONTROL across suspend/resume
	KVM: x86: Do not change ICR on write to APIC_SELF_IPI
	KVM: x86/mmu: avoid NULL-pointer dereference on page freeing bugs
	KVM: LAPIC: Enable timer posted-interrupt only when mwait/hlt is advertised
	rcu: Fix callbacks processing time limit retaining cond_resched()
	rcu: Apply callbacks processing time limit only on softirq
	block-map: add __GFP_ZERO flag for alloc_page in function bio_copy_kern
	dm: interlock pending dm_io and dm_wait_for_bios_completion
	PCI: aardvark: Clear all MSIs at setup
	PCI: aardvark: Fix reading MSI interrupt number
	mmc: rtsx: add 74 Clocks in power on flow
	Linux 5.10.115

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7fb58d6de4b0d2c26f4d83f4e59d4915993571a5
This commit is contained in:
Greg Kroah-Hartman 2022-05-16 08:45:26 +02:00
commit 3f70116e5f
70 changed files with 536 additions and 237 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 114
SUBLEVEL = 115
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -40,9 +40,9 @@
typedef unsigned int cycles_t;
/*
* On R4000/R4400 before version 5.0 an erratum exists such that if the
* cycle counter is read in the exact moment that it is matching the
* compare register, no interrupt will be generated.
* On R4000/R4400 an erratum exists such that if the cycle counter is
* read in the exact moment that it is matching the compare register,
* no interrupt will be generated.
*
* There is a suggested workaround and also the erratum can't strike if
* the compare interrupt isn't being used as the clock source device.
@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
return 1;
else
return 0;

View File

@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug.
* has the mfc0 from count bug. This seems the last version
* produced.
*/
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
return 1;
/*
* we assume newer revisions are ok
*/
return 0;
return 1;
}
return 0;

View File

@ -419,8 +419,7 @@ show_cpuinfo (struct seq_file *m, void *v)
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");

View File

@ -66,6 +66,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
static int has_guest_poll = 0;
/*
* No need for any "IO delay" on KVM
*/
@ -624,14 +625,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
static int kvm_suspend(void)
{
u64 val = 0;
kvm_guest_cpu_offline(false);
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
rdmsrl(MSR_KVM_POLL_CONTROL, val);
has_guest_poll = !(val & 1);
#endif
return 0;
}
static void kvm_resume(void)
{
kvm_cpu_online(raw_smp_processor_id());
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
wrmsrl(MSR_KVM_POLL_CONTROL, 0);
#endif
}
static struct syscore_ops kvm_syscore_ops = {

View File

@ -668,6 +668,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;
if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
perf_get_x86_pmu_capability(&cap);
/*

View File

@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
{
return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
(kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
}
bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
@ -2106,10 +2107,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
case APIC_SELF_IPI:
if (apic_x2apic_mode(apic)) {
kvm_lapic_reg_write(apic, APIC_ICR,
APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
} else
if (apic_x2apic_mode(apic))
kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
else
ret = 1;
break;
default:

View File

@ -3140,6 +3140,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
return;
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (WARN_ON(!sp))
return;
if (kvm_mmu_put_root(kvm, sp)) {
if (sp->tdp_mmu_page)

View File

@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@ -128,19 +144,25 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
if (event_mapping[i].eventsel == event_select
&& event_mapping[i].unit_mask == unit_mask)
break;
if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;
return amd_event_mapping[i].event_type;
return event_mapping[i].event_type;
}
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */

View File

@ -488,7 +488,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len)
bytes = len;
page = alloc_page(q->bounce_gfp | gfp_mask);
page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask);
if (!page)
goto cleanup;

View File

@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);

View File

@ -1480,6 +1480,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
struct client *e_client;
switch (status) {
/* expected: */
@ -1496,9 +1497,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
}
e->phy_packet.data[0] = packet->timestamp;
e_client = e->client;
queue_event(e->client, &e->event, &e->phy_packet,
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
client_put(e->client);
client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)

View File

@ -374,16 +374,13 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
spin_unlock_irqrestore(&card->lock, flags);
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
@ -509,6 +506,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
@ -520,8 +519,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_retries = 0;
}
spin_lock_irqsave(&card->lock, flags);
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*

View File

@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
struct fw_transaction *t;
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
if (!try_cancel_split_timeout(t)) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter == transaction) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
if (t) {
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t;
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
if (!try_cancel_split_timeout(t)) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);

View File

@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb;
struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(orb, &lu->orb_list, link) {
list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
orb->rcode = RCODE_COMPLETE;
list_del(&orb->link);
STATUS_GET_ORB_LOW(status) == iter->request_bus) {
iter->rcode = RCODE_COMPLETE;
list_del(&iter->link);
orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (&orb->link != &lu->orb_list) {
if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {

View File

@ -761,11 +761,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
if (bitmap_empty(trigger, gc->ngpio))
return false;
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);

View File

@ -912,7 +912,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
i, &start);
of_property_read_u32_index(np, "gpio-reserved-ranges",
i + 1, &count);
if (start >= chip->ngpio || start + count >= chip->ngpio)
if (start >= chip->ngpio || start + count > chip->ngpio)
continue;
bitmap_clear(chip->valid_mask, start, count);

View File

@ -2822,7 +2822,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
channel_count = dpcd_test_mode.bits.channel_count + 1;
channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||

View File

@ -20,6 +20,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/util_macros.h>
#include <linux/sched.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
@ -260,11 +261,10 @@ static int adt7470_update_thread(void *p)
adt7470_read_temperatures(client, data);
mutex_unlock(&data->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
}
return 0;

View File

@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_set_inuse(new_cep);
rv = siw_proc_mpareq(new_cep);
siw_cep_set_free(new_cep);
if (rv != -EAGAIN) {
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv)
if (rv) {
siw_cep_set_free(new_cep);
goto error;
}
}
siw_cep_set_free(new_cep);
}
return;

View File

@ -1626,7 +1626,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned long pfn, unsigned int pages,
int ih, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain->iommu_did[iommu->seq_id];
@ -1638,10 +1639,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (domain_use_first_level(domain)) {
domain_flush_piotlb(iommu, domain, addr, pages, ih);
} else {
unsigned long bitmask = aligned_pages - 1;
/*
* PSI masks the low order bits of the base address. If the
* address isn't aligned to the mask, then compute a mask value
* needed to ensure the target range is flushed.
*/
if (unlikely(bitmask & pfn)) {
unsigned long end_pfn = pfn + pages - 1, shared_bits;
/*
* Since end_pfn <= pfn + bitmask, the only way bits
* higher than bitmask can differ in pfn and end_pfn is
* by carrying. This means after masking out bitmask,
* high bits starting with the first set bit in
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
}
/*
* Fallback to domain selective flush if no PSI support or
* the size is too big. PSI requires page size to be 2 ^ x,
* and the base address is naturally aligned to the size.
* the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))

View File

@ -613,13 +613,15 @@ static void end_io_acct(struct mapped_device *md, struct bio *bio,
{
unsigned long duration = jiffies - start_time;
bio_end_io_acct(bio, start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, stats_aux);
smp_wmb();
bio_end_io_acct(bio, start_time);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
@ -2364,6 +2366,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state
}
finish_wait(&md->wait, &wait);
smp_rmb();
return r;
}

View File

@ -1382,13 +1382,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
*/
mmc_set_timing(host, MMC_TIMING_MMC_HS);
mmc_set_bus_speed(card);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
mmc_set_clock(host, card->ext_csd.hs_max_dtr);
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@ -1446,7 +1450,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int old_timing, old_signal_voltage;
unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@ -1477,8 +1481,17 @@ static int mmc_select_hs200(struct mmc_card *card)
false, true);
if (err)
goto err;
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
* NB: We can't move to full (HS200) speeds until after we've
* successfully switched over.
*/
old_timing = host->ios.timing;
old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@ -1491,8 +1504,10 @@ static int mmc_select_hs200(struct mmc_card *card)
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
if (err == -EBADMSG)
if (err == -EBADMSG) {
mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
}
}
err:
if (err) {

View File

@ -37,10 +37,7 @@ struct realtek_pci_sdmmc {
bool double_clk;
bool eject;
bool initial_mode;
int power_state;
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
int prev_power_state;
int sg_count;
s32 cookie;
int cookie_sg_count;
@ -902,14 +899,21 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
return err;
}
static int sd_power_on(struct realtek_pci_sdmmc *host)
static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
{
struct rtsx_pcr *pcr = host->pcr;
int err;
if (host->power_state == SDMMC_POWER_ON)
if (host->prev_power_state == MMC_POWER_ON)
return 0;
if (host->prev_power_state == MMC_POWER_UP) {
rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
goto finish;
}
msleep(100);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
@ -928,11 +932,17 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
if (err < 0)
return err;
mdelay(1);
err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
if (err < 0)
return err;
host->power_state = SDMMC_POWER_ON;
/* send at least 74 clocks */
rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
finish:
host->prev_power_state = power_mode;
return 0;
}
@ -941,7 +951,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
struct rtsx_pcr *pcr = host->pcr;
int err;
host->power_state = SDMMC_POWER_OFF;
host->prev_power_state = MMC_POWER_OFF;
rtsx_pci_init_cmd(pcr);
@ -967,7 +977,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
if (power_mode == MMC_POWER_OFF)
err = sd_power_off(host);
else
err = sd_power_on(host);
err = sd_power_on(host, power_mode);
return err;
}
@ -1404,10 +1414,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->pcr = pcr;
mmc->ios.power_delay_ms = 5;
host->mmc = mmc;
host->pdev = pdev;
host->cookie = -1;
host->power_state = SDMMC_POWER_OFF;
host->prev_power_state = MMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;

View File

@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "cqhci.h"
@ -2488,6 +2489,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
}
static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
{
struct reset_control *reset;
int ret = 0;
reset = reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(reset))
return dev_err_probe(dev, PTR_ERR(reset),
"unable to acquire core_reset\n");
if (!reset)
return ret;
ret = reset_control_assert(reset);
if (ret) {
reset_control_put(reset);
return dev_err_probe(dev, ret, "core_reset assert failed\n");
}
/*
* The hardware requirement for delay between assert/deassert
* is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
* ~125us (4/32768). To be on the safe side add 200us delay.
*/
usleep_range(200, 210);
ret = reset_control_deassert(reset);
if (ret) {
reset_control_put(reset);
return dev_err_probe(dev, ret, "core_reset deassert failed\n");
}
usleep_range(200, 210);
reset_control_put(reset);
return ret;
}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@ -2536,6 +2574,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
ret = sdhci_msm_gcc_reset(&pdev->dev, host);
if (ret)
goto pltfm_free;
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {

View File

@ -241,13 +241,14 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@ -924,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@ -949,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
dma->base_buf = dma_alloc_coherent(&dev->dev,
dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@ -1113,8 +1114,10 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
spin_unlock_irqrestore(&priv->lock, flags);
del_timer_sync(&priv->hang_timer);
del_timer_sync(&priv->rr_timer);
spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@ -1134,7 +1137,7 @@ static int grcan_close(struct net_device *dev)
return 0;
}
static int grcan_transmit_catch_up(struct net_device *dev, int budget)
static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@ -1142,7 +1145,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
work_done = catch_up_echo_skb(dev, budget, true);
work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@ -1156,8 +1159,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@ -1239,19 +1240,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
int tx_work_done, rx_work_done;
int rx_budget = budget / 2;
int tx_budget = budget - rx_budget;
int work_done;
/* Half of the budget for receiving messages */
rx_work_done = grcan_receive(dev, rx_budget);
work_done = grcan_receive(dev, budget);
/* Half of the budget for transmitting messages as that can trigger echo
* frames being received
*/
tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
grcan_transmit_catch_up(dev);
if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@ -1268,7 +1263,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
return rx_work_done + tx_work_done;
return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@ -1600,6 +1595,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@ -1652,6 +1648,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@ -1660,10 +1657,15 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
err = of_property_read_u32(np, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK)
>= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
txbug = false;
sysid_parent = of_find_node_by_path("/ambapp0");
if (sysid_parent) {
of_node_get(sysid_parent);
err = of_property_read_u32(sysid_parent, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK) >=
GRCAN_TXBUG_SAFE_GRLIB_VERSION))
txbug = false;
of_node_put(sysid_parent);
}
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {

View File

@ -1663,6 +1663,7 @@ mt7530_setup(struct dsa_switch *ds)
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);

View File

@ -2575,6 +2575,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
/* No more budget for RX work */
if (budget && work_done >= budget && idx == BNXT_RX_HDL)
break;
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
@ -10453,7 +10457,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@ -12481,10 +12485,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
bp->dev->features |= NETIF_F_NTUPLE;
}
bnxt_set_dflt_rfs(bp);
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;

View File

@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*cons_idx = curr_cons_idx;
if (curr_pg != end_pg) {
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);

View File

@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
break;
ss->regmap[i] = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
}

View File

@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
struct mlx5_core_mkey mkey;
u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
return -EINVAL;
}
static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
int size = 0;
void *menu;
int i;
menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
if (!start_idx) {
menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
num_of_records);
size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
data += size;
}
num_of_items = rsc_dump->number_of_menu_items;
for (i = 0; i < num_of_items; i++) {
member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
for (i = 0; start_idx + i < num_of_items; i++) {
size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
if (size >= read_size)
return start_idx + i;
member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
int start_idx = 0;
int size;
int err;
@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
if (err < 0)
goto destroy_cmd;
mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);

View File

@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
xoff, &port_buffer, &update_buffer);
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
}

View File

@ -1629,6 +1629,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
struct mlx5e_priv *priv;
if (!refcount_dec_and_test(&ft->refcount))
return;
@ -1638,6 +1640,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
priv = netdev_priv(ct_priv->netdev);
flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);

View File

@ -1210,6 +1210,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
if (err)
return err;
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
/*
* Align the driver state with the register state.
* Temporary state change is required to enable the app list reset.
*/
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
mlx5e_dcbnl_delete_app(priv);
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
}
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);

View File

@ -2396,6 +2396,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
match.mask->vlan_eth_type &&
MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
ft_field_support.outer_second_vid,
fs_type)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
spec->match_criteria_enable |=
MLX5_MATCH_MISC_PARAMETERS;
}
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and

View File

@ -134,14 +134,19 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
mlx5_core_warn(dev, "Reset request was already cleared\n");
return -EALREADY;
}
mlx5_stop_sync_reset_poll(dev);
clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
if (poll_health)
mlx5_start_health_poll(dev);
return 0;
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@ -185,13 +190,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
}
static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
mlx5_core_warn(dev, "Reset request was already set\n");
return -EALREADY;
}
mlx5_stop_health_poll(dev, true);
set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
mlx5_start_sync_reset_poll(dev);
return 0;
}
static void mlx5_fw_live_patch_event(struct work_struct *work)
@ -225,7 +234,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
err ? "Failed" : "Sent");
return;
}
mlx5_sync_reset_set_reset_requested(dev);
if (mlx5_sync_reset_set_reset_requested(dev))
return;
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
@ -325,7 +336,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
@ -354,10 +366,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}

View File

@ -2422,7 +2422,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (irq == -EPROBE_DEFER) {
retval = -EPROBE_DEFER;
goto out_0;
} else if (irq <= 0) {
} else if (irq < 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;

View File

@ -243,6 +243,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
plat->sph_disable = 1;
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;

View File

@ -895,6 +895,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
&gmac->mux_handle, priv, priv->mii);
of_node_put(mdio_mux);
return ret;
}

View File

@ -5046,7 +5046,7 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
if (priv->dma_cap.sphen) {
if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph = true;
dev_info(priv->device, "SPH feature enabled\n");

View File

@ -1255,8 +1255,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
sizeof(struct cpsw_slave_data),
GFP_KERNEL);
if (!data->slave_data)
if (!data->slave_data) {
of_node_put(tmp_node);
return -ENOMEM;
}
/* Populate all the child nodes here...
*/
@ -1353,6 +1355,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
err_node_put:
of_node_put(port_np);
of_node_put(tmp_node);
return ret;
}

View File

@ -820,10 +820,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
int rc, ret;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
@ -833,8 +833,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
return -ENODEV;
}
npp = of_get_parent(np);
of_address_to_resource(npp, 0, &res);
ret = of_address_to_resource(npp, 0, &res);
of_node_put(npp);
if (ret) {
dev_err(dev, "%s resource error!\n",
dev->of_node->full_name);
of_node_put(np);
return ret;
}
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
phydev = of_phy_find_device(lp->phy_node);
@ -843,6 +849,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
"MDIO of the phy is not registered yet\n");
else
put_device(&phydev->mdio.dev);
of_node_put(np);
return 0;
}
@ -855,6 +862,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
of_node_put(np);
return -ENOMEM;
}
@ -867,6 +875,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->parent = dev;
rc = of_mdiobus_register(bus, np);
of_node_put(np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;

View File

@ -194,6 +194,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
{
struct nci_dev *ndev = priv->ndev;
nci_unregister_device(ndev);
if (priv->ndev->nfc_dev->fw_download_in_progress)
nfcmrvl_fw_dnld_abort(priv);
@ -202,7 +203,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
if (gpio_is_valid(priv->config.reset_n_io))
gpio_free(priv->config.reset_n_io);
nci_unregister_device(ndev);
nci_free_device(ndev);
kfree(priv);
}

View File

@ -114,6 +114,7 @@
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
@ -577,6 +578,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
/* Clear all interrupts */
advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
@ -589,7 +591,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
/* Unmask all MSIs */
advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@ -1386,23 +1388,19 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
static void advk_pcie_handle_msi(struct advk_pcie *pcie)
{
u32 msi_val, msi_mask, msi_status, msi_idx;
u16 msi_data;
int virq;
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
msi_status = msi_val & ~msi_mask;
msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
if (!(BIT(msi_idx) & msi_status))
continue;
/*
* msi_idx contains bits [4:0] of the msi_data and msi_data
* contains 16bit MSI interrupt number
*/
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
generic_handle_irq(msi_data);
virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx);
generic_handle_irq(virq);
}
advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,

View File

@ -1462,6 +1462,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
}
/*
* remember the amount of formatted tracks to prevent double format on
* ESE devices
*/
if (cqr->block)
cqr->trkcount = atomic_read(&cqr->block->trkcount);
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
@ -1680,6 +1687,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
struct request *req;
u8 *sense = NULL;
int expires;
@ -1780,7 +1788,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
if (dasd_ese_needs_format(cqr->block, irb)) {
if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
req = dasd_get_callback_data(cqr);
if (!req) {
cqr->status = DASD_CQR_ERROR;
return;
}
if (rq_data_dir(req) == READ) {
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
@ -2799,8 +2812,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
* complete a request partially.
*/
if (proc_bytes) {
blk_update_request(req, BLK_STS_OK,
blk_rq_bytes(req) - proc_bytes);
blk_update_request(req, BLK_STS_OK, proc_bytes);
blk_mq_requeue_request(req, true);
} else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);

View File

@ -3026,13 +3026,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
struct dasd_block *block)
struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
if (cqr->trkcount != atomic_read(&block->trkcount)) {
/*
* The number of formatted tracks has changed after request
* start and we can not tell if the current track was involved.
* To avoid data corruption treat it as if the current track is
* involved
*/
rc = true;
goto out;
}
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
@ -3052,6 +3063,7 @@ static void clear_format_track(struct dasd_format_entry *format,
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
@ -3088,7 +3100,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
sector_t curr_trk;
int rc;
req = cqr->callback_data;
req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
@ -3113,8 +3125,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
if (test_and_set_format_track(format, block))
if (test_and_set_format_track(format, cqr)) {
/* this is no real error so do not count down retries */
cqr->retries++;
return ERR_PTR(-EEXIST);
}
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
@ -3213,12 +3228,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
cqr->proc_bytes = blk_count * blksize;
return 0;
}
if (dst && !skip_block) {
dst += off;
if (dst && !skip_block)
memset(dst, 0, blksize);
} else {
else
skip_block--;
}
dst += blksize;
blk_count++;
}
}

View File

@ -188,6 +188,7 @@ struct dasd_ccw_req {
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
unsigned int proc_bytes; /* bytes for partial completion */
unsigned int trkcount; /* count formatted tracks */
};
/*
@ -575,6 +576,7 @@ struct dasd_block {
struct list_head format_list;
spinlock_t format_lock;
atomic_t trkcount;
};
struct dasd_attention_data {
@ -723,6 +725,18 @@ dasd_check_blocksize(int bsize)
return 0;
}
/*
* return the callback data of the original request in case there are
* ERP requests build on top of it
*/
static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
{
while (cqr->refers)
cqr = cqr->refers;
return cqr->callback_data;
}
/* externals in dasd.c */
#define DASD_PROFILE_OFF 0
#define DASD_PROFILE_ON 1

View File

@ -5333,6 +5333,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
mutex_lock(&inode->log_mutex);
}
/*
* For symlinks, we must always log their content, which is stored in an
* inline extent, otherwise we could end up with an empty symlink after
* log replay, which is invalid on linux (symlink(2) returns -ENOENT if
* one attempts to create an empty symlink).
* We don't need to worry about flushing delalloc, because when we create
* the inline extent when the symlink is created (we never have delalloc
* for symlinks).
*/
if (S_ISLNK(inode->vfs_inode.i_mode))
inode_only = LOG_INODE_ALL;
/*
* a brute force approach to making sure we get the most uptodate
* copies of everything.
@ -5723,7 +5735,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
if (type == BTRFS_FT_DIR)
log_mode = LOG_INODE_ALL;
ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
log_mode, ctx);

View File

@ -369,6 +369,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
kunmap_atomic(start);
}
static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
{
if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
fattr->pre_change_attr = version;
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
}
static void nfs4_test_and_free_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
const struct cred *cred)
@ -6464,7 +6472,9 @@ static void nfs4_delegreturn_release(void *calldata)
pnfs_roc_release(&data->lr.arg, &data->lr.res,
data->res.lr_ret);
if (inode) {
nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
nfs4_fattr_set_prechange(&data->fattr,
inode_peek_iversion_raw(inode));
nfs_refresh_inode(inode, &data->fattr);
nfs_iput_and_deactive(inode);
}
kfree(calldata);

View File

@ -203,5 +203,6 @@ struct plat_stmmacenet_data {
bool vlan_fail_q_en;
u8 vlan_fail_q;
unsigned int eee_usecs_rate;
bool sph_disable;
};
#endif

View File

@ -29,12 +29,14 @@ extern struct irqaction chained_action;
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
* IRQTF_FORCED_THREAD - irq action is force threaded
* IRQTF_READY - signals that irq thread is ready
*/
enum {
IRQTF_RUNTHREAD,
IRQTF_WARNED,
IRQTF_AFFINITY,
IRQTF_FORCED_THREAD,
IRQTF_READY,
};
/*

View File

@ -405,6 +405,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
mutex_init(&desc->request_mutex);
init_rcu_head(&desc->rcu);
init_waitqueue_head(&desc->wait_for_threads);
desc_set_defaults(irq, desc, node, affinity, owner);
irqd_set(&desc->irq_data, flags);
@ -573,6 +574,7 @@ int __init early_irq_init(void)
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
mutex_init(&desc[i].request_mutex);
init_waitqueue_head(&desc[i].wait_for_threads);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
}
return arch_early_irq_init();

View File

@ -1149,6 +1149,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
raw_spin_unlock_irq(&desc->lock);
}
/*
* Internal function to notify that a interrupt thread is ready.
*/
static void irq_thread_set_ready(struct irq_desc *desc,
struct irqaction *action)
{
set_bit(IRQTF_READY, &action->thread_flags);
wake_up(&desc->wait_for_threads);
}
/*
* Internal function to wake up a interrupt thread and wait until it is
* ready.
*/
static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
struct irqaction *action)
{
if (!action || !action->thread)
return;
wake_up_process(action->thread);
wait_event(desc->wait_for_threads,
test_bit(IRQTF_READY, &action->thread_flags));
}
/*
* Interrupt handler thread
*/
@ -1160,6 +1185,8 @@ static int irq_thread(void *data)
irqreturn_t (*handler_fn)(struct irq_desc *desc,
struct irqaction *action);
irq_thread_set_ready(desc, action);
if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags))
handler_fn = irq_forced_thread_fn;
@ -1584,8 +1611,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
if (!shared) {
init_waitqueue_head(&desc->wait_for_threads);
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc,
@ -1675,14 +1700,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq_setup_timings(desc, new);
/*
* Strictly no need to wake it up, but hung_task complains
* when no hard interrupt wakes the thread up.
*/
if (new->thread)
wake_up_process(new->thread);
if (new->secondary)
wake_up_process(new->secondary->thread);
wake_up_and_wait_for_irq_thread_ready(desc, new);
wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
register_irq_proc(irq, desc);
new->dir = NULL;

View File

@ -2456,7 +2456,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
div = READ_ONCE(rcu_divisor);
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
bl = max(rdp->blimit, pending >> div);
if (unlikely(bl > 100)) {
if (in_serving_softirq() && unlikely(bl > 100)) {
long rrn = READ_ONCE(rcu_resched_ns);
rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
@ -2490,19 +2490,23 @@ static void rcu_do_batch(struct rcu_data *rdp)
* Stop only if limit reached and CPU has something to do.
* Note: The rcl structure counts down from zero.
*/
if (-rcl.len >= bl && !offloaded &&
(need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
if (likely((-rcl.len & 31) || local_clock() < tlimit))
continue;
/* Exceeded the time limit, so leave. */
break;
}
if (offloaded) {
WARN_ON_ONCE(in_serving_softirq());
if (in_serving_softirq()) {
if (-rcl.len >= bl && (need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
/*
* Make sure we don't spend too much time here and deprive other
* softirq vectors of CPU cycles.
*/
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
if (likely((-rcl.len & 31) || local_clock() < tlimit))
continue;
/* Exceeded the time limit, so leave. */
break;
}
} else {
local_bh_enable();
lockdep_assert_irqs_enabled();
cond_resched_tasks_rcu_qs();

View File

@ -1144,6 +1144,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
lock_sock(sk);
if (so->bound) {
err = -EINVAL;
goto out;
}
/* do not register frame reception for functional addressing */
if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
do_rx_reg = 0;
@ -1154,10 +1159,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
goto out;
}
if (so->bound && addr->can_ifindex == so->ifindex &&
rx_id == so->rxid && tx_id == so->txid)
goto out;
dev = dev_get_by_index(net, addr->can_ifindex);
if (!dev) {
err = -ENODEV;
@ -1184,19 +1185,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
dev_put(dev);
if (so->bound && do_rx_reg) {
/* unregister old filter */
if (so->ifindex) {
dev = dev_get_by_index(net, so->ifindex);
if (dev) {
can_rx_unregister(net, dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
dev_put(dev);
}
}
}
/* switch to new settings */
so->ifindex = ifindex;
so->rxid = rx_id;

View File

@ -2394,9 +2394,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
newpsl->sl_addr[i] = psl->sl_addr[i];
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
}
rcu_assign_pointer(pmc->sflist, newpsl);
if (psl)
kfree_rcu(psl, rcu);
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@ -2494,11 +2495,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
psl->sl_count, psl->sl_addr, 0);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
} else
} else {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
}
rcu_assign_pointer(pmc->sflist, newpsl);
if (psl)
kfree_rcu(psl, rcu);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:

View File

@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -206,7 +206,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -245,7 +245,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -290,7 +290,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -334,7 +334,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -400,7 +400,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -446,7 +446,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -493,7 +493,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
kfree_skb(skb);
goto error;
@ -550,7 +550,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -599,7 +599,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
device_lock(&dev->dev);
if (!device_is_registered(&dev->dev)) {
if (dev->shutting_down) {
rc = -ENODEV;
goto error;
}
@ -1126,6 +1126,7 @@ int nfc_register_device(struct nfc_dev *dev)
dev->rfkill = NULL;
}
}
dev->shutting_down = false;
device_unlock(&dev->dev);
rc = nfc_genl_device_added(dev);
@ -1158,12 +1159,10 @@ void nfc_unregister_device(struct nfc_dev *dev)
rfkill_unregister(dev->rfkill);
rfkill_destroy(dev->rfkill);
}
dev->shutting_down = true;
device_unlock(&dev->dev);
if (dev->ops->check_presence) {
device_lock(&dev->dev);
dev->shutting_down = true;
device_unlock(&dev->dev);
del_timer_sync(&dev->check_pres_timer);
cancel_work_sync(&dev->check_pres_work);
}

View File

@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
genlmsg_end(msg, hdr);
genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
return 0;

View File

@ -2826,9 +2826,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
}
xprt_set_bound(xprt);
xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
ret = ERR_PTR(xs_local_setup_socket(transport));
if (ret)
goto out_err;
break;
default:
ret = ERR_PTR(-EAFNOSUPPORT);

View File

@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
if (copy_to_user(buf, &type, sizeof(type)))
return -EFAULT;
count += sizeof(type);
remained -= sizeof(type);
buf += sizeof(type);

View File

@ -8969,6 +8969,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),

View File

@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mixer_ctrl =
(struct soc_mixer_control *) kcontrol->private_value;
unsigned int reg = mixer_ctrl->reg;
__le16 val;
__le16 val_new, val_old;
int ret;
/*
@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
* Therefore we need to convert to little endian here to align with
* HW registers.
*/
val = cpu_to_le16(ucontrol->value.integer.value[0]);
val_new = cpu_to_le16(ucontrol->value.integer.value[0]);
mutex_lock(&da7219->ctrl_lock);
ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old));
if (ret == 0 && (val_old != val_new))
ret = regmap_raw_write(da7219->regmap, reg,
&val_new, sizeof(val_new));
mutex_unlock(&da7219->ctrl_lock);
return ret;
if (ret < 0)
return ret;
return val_old != val_new;
}

View File

@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]);
return 0;
return 1;
}
#define WM8958_MBC_SWITCH(xname, xval) {\
@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]);
return 0;
return 1;
}
@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]);
return 0;
return 1;
}
#define WM8958_HPF_SWITCH(xname, xval) {\
@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]);
return 0;
return 1;
}
#define WM8958_ENH_EQ_SWITCH(xname, xval) {\

View File

@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
return 0;
return 1;
}
static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL,

View File

@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
return 0;
return 1;
}
static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL,

View File

@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
return 0;
return 1;
}
static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,

View File

@ -83,10 +83,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
memset(&slave_config, 0, sizeof(slave_config));
if (pcm->config && pcm->config->prepare_slave_config)
prepare_slave_config = pcm->config->prepare_slave_config;
else
if (!pcm->config)
prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
else
prepare_slave_config = pcm->config->prepare_slave_config;
if (prepare_slave_config) {
ret = prepare_slave_config(substream, params, &slave_config);

View File

@ -185,7 +185,7 @@ setup_prepare()
tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
action police rate 50mbit burst 64k \
action police rate 50mbit burst 64k conform-exceed drop/pipe \
action goto chain $(IS2 1 0)
}

View File

@ -61,9 +61,12 @@ setup_prepare()
vrf_prepare
mirror_gre_topo_create
# Avoid changing br1's PVID while it is operational as a L3 interface.
ip link set dev br1 down
ip link set dev $swp3 master br1
bridge vlan add dev br1 vid 555 pvid untagged self
ip link set dev br1 up
ip address add dev br1 192.0.2.129/28
ip address add dev br1 2001:db8:2::1/64

View File

@ -951,7 +951,7 @@ TEST(ERRNO_valid)
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(E2BIG, errno);
}
@ -970,7 +970,7 @@ TEST(ERRNO_zero)
EXPECT_EQ(parent, syscall(__NR_getppid));
/* "errno" of 0 is ok. */
EXPECT_EQ(0, read(0, NULL, 0));
EXPECT_EQ(0, read(-1, NULL, 0));
}
/*
@ -991,7 +991,7 @@ TEST(ERRNO_capped)
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(4095, errno);
}
@ -1022,7 +1022,7 @@ TEST(ERRNO_order)
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(-1, read(-1, NULL, 0));
EXPECT_EQ(12, errno);
}
@ -2575,7 +2575,7 @@ void *tsync_sibling(void *data)
ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
if (!ret)
return (void *)SIBLING_EXIT_NEWPRIVS;
read(0, NULL, 0);
read(-1, NULL, 0);
return (void *)SIBLING_EXIT_UNKILLED;
}