This is the 5.10.226 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmbiry8ACgkQONu9yGCS aT6N3g//bWYo3l5q543ygloK7UBTkuyJFWb0ENBbu0J9qlnYaSeKDvqjmMUPBHKi ZCAzL5nHmAfPMSbjRnltsl54Z7X69264BNLv62M86VnGVDrD7Y93Sn+Ts9jJoTYR k97HrSojKsqrC2MZLaLuDnoeReoHpeAn4rwIs8mAaApxv48NfRh65KRkipkkRi0N S7UXC82a8NyjF9wQaB2+Cdt2S0SD2706074X/0jXBAM3YR+5lF3NFgXylmUULTWi kmCCauGhvfsR9vGIXBAYfX/thF3FSuffJjrWQ3i3/v75PyfoLZ4CruRGXIKM5UBN TDEqx1Fx+fDXjgH07DYjFLBxQnv9wTgAtflXJj4qfaULO3NonBZHW3xIBe8foO2b 6858JdPcSA2LJ1wUxTc8BuYzgiwz5aCbGa0cLJCyJKYhJXGToweFyDM1nS2V66MD TF43J/8zv9OAbj6TIT8WisfCgDMIIeMg/RsoaduGZViEN2Sg46XHN4ciZ7eakJOq j3JFaAan+WPDlYpBLv1tCz+e6IDexugnbP43+E+eY8Xl6UDKUaXd3NiT728W84ll 0KULycqOteiFy7KN6NJx0oLA3YarQciatRm99zA8pnBBvqy0yJXYxWxmmaSQiGo+ VvNTrz6uc+ISP9TJfuPm8KH7NwQVhrjsndXaW2HWgoQ+fWgSZjU= =abQi -----END PGP SIGNATURE----- Merge 5.10.226 into android12-5.10-lts Changes in 5.10.226 drm: panel-orientation-quirks: Add quirk for OrangePi Neo ALSA: hda/generic: Add a helper to mute speakers at suspend/shutdown ALSA: hda/conexant: Mute speakers at suspend / shutdown i2c: Fix conditional for substituting empty ACPI functions dma-debug: avoid deadlock between dma debug vs printk and netconsole net: usb: qmi_wwan: add MeiG Smart SRM825L drm/amdgpu: Fix uninitialized variable warning in amdgpu_afmt_acr drm/amdgpu: fix overflowed array index read warning drm/amd/pm: fix uninitialized variable warning for smu8_hwmgr drm/amd/pm: fix warning using uninitialized value of max_vid_step drm/amd/pm: fix the Out-of-bounds read warning drm/amdgpu: fix uninitialized scalar variable warning drm/amd/pm: fix uninitialized variable warnings for vega10_hwmgr drm/amdgpu: avoid reading vf2pf info size from FB drm/amd/display: Check gpio_id before used as array index drm/amd/display: Stop amdgpu_dm initialize when stream nums greater than 6 drm/amd/display: Add array index check for hdcp ddc access drm/amd/display: Check num_valid_sets before accessing reader_wm_sets[] drm/amd/display: Check msg_id before processing transcation drm/amd/display: Fix Coverity INTEGER_OVERFLOW within dal_gpio_service_create drm/amdgpu/pm: Fix uninitialized variable agc_btc_response drm/amdgpu: Fix out-of-bounds write warning drm/amdgpu: Fix out-of-bounds read of df_v1_7_channel_number drm/amdgpu: fix ucode out-of-bounds read warning drm/amdgpu: fix mc_data out-of-bounds read warning drm/amdkfd: Reconcile the definition and use of oem_id in struct kfd_topology_device apparmor: fix possible NULL pointer dereference drm/amdgpu/pm: Check input value for CUSTOM profile mode setting on legacy SOCs drm/amdgpu: the warning dereferencing obj for nbio_v7_4 drm/amd/pm: check negtive return for table entries wifi: iwlwifi: remove fw_running op PCI: al: Check IORESOURCE_BUS existence during probe hwspinlock: Introduce hwspin_lock_bust() ionic: fix potential irq name truncation usbip: Don't submit special requests twice usb: typec: ucsi: Fix null pointer dereference in trace fsnotify: clear PARENT_WATCHED flags lazily smack: tcp: ipv4, fix incorrect labeling drm/meson: plane: Add error handling wifi: cfg80211: make hash table duplicates more survivable block: remove the blk_flush_integrity call in blk_integrity_unregister drm/amd/display: Skip wbscl_set_scaler_filter if filter is null media: uvcvideo: Enforce alignment of frame and interval block: initialize integrity buffer to zero before writing it to media drm/amd/pm: Fix the null pointer dereference for vega10_hwmgr bpf, cgroups: Fix cgroup v2 fallback on v1/v2 mixed mode net: set SOCK_RCU_FREE before inserting socket into hashtable virtio_net: Fix napi_skb_cache_put warning rcu-tasks: Fix show_rcu_tasks_trace_gp_kthread buffer overflow udf: Limit file size to 4TB ext4: handle redirtying in ext4_bio_write_page() i2c: Use IS_REACHABLE() for substituting empty ACPI functions bpf, cgroup: Assign cgroup in cgroup_sk_alloc when called from interrupt sch/netem: fix use after free in netem_dequeue ASoC: dapm: Fix UAF for snd_soc_pcm_runtime object ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices ALSA: hda/realtek: add patch for internal mic in Lenovo V145 ALSA: hda/realtek: Support mute LED on HP Laptop 14-dq2xxx ata: libata: Fix memory leak for error path in ata_host_alloc() irqchip/gic-v2m: Fix refcount leak in gicv2m_of_init() Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE" Bluetooth: MGMT: Ignore keys being loaded with invalid type mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K mmc: sdhci-of-aspeed: fix module autoloading fuse: update stats for pages in dropped aux writeback list fuse: use unsigned type for getxattr/listxattr size truncation clk: qcom: clk-alpha-pll: Fix the pll post div mask clk: qcom: clk-alpha-pll: Fix the trion pll postdiv set rate API can: mcp251x: fix deadlock if an interrupt occurs during mcp251x_open tracing: Avoid possible softlockup in tracing_iter_reset() ila: call nf_unregister_net_hooks() sooner sched: sch_cake: fix bulk flow accounting logic for host fairness nilfs2: fix missing cleanup on rollforward recovery error nilfs2: fix state management in error path of log writing function btrfs: fix use-after-free after failure to create a snapshot mptcp: pr_debug: add missing \n at the end mptcp: pm: avoid possible UaF when selecting endp nfsd: move reply cache initialization into nfsd startup nfsd: move init of percpu reply_cache_stats counters back to nfsd_init_net NFSD: Refactor nfsd_reply_cache_free_locked() NFSD: Rename nfsd_reply_cache_alloc() NFSD: Replace nfsd_prune_bucket() NFSD: Refactor the duplicate reply cache shrinker NFSD: simplify error paths in nfsd_svc() NFSD: Rewrite synopsis of nfsd_percpu_counters_init() NFSD: Fix frame size warning in svc_export_parse() sunrpc: don't change ->sv_stats if it doesn't exist nfsd: stop setting ->pg_stats for unused stats sunrpc: pass in the sv_stats struct through svc_create_pooled sunrpc: remove ->pg_stats from svc_program sunrpc: use the struct net as the svc proc private nfsd: rename NFSD_NET_* to NFSD_STATS_* nfsd: expose /proc/net/sunrpc/nfsd in net namespaces nfsd: make all of the nfsd stats per-network namespace nfsd: remove nfsd_stats, make th_cnt a global counter nfsd: make svc_stat per-network namespace instead of global ALSA: hda: Add input value sanity checks to HDMI channel map controls smack: unix sockets: fix accept()ed socket label irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1 af_unix: Remove put_pid()/put_cred() in copy_peercred(). iommu: sun50i: clear bypass register netfilter: nf_conncount: fix wrong variable type udf: Avoid excessive partition lengths media: vivid: fix wrong sizeimage value for mplane leds: spi-byte: Call of_node_put() on error path wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3 usb: uas: set host status byte on data completion error media: vivid: don't set HDMI TX controls if there are no HDMI outputs PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0) media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse pcmcia: Use resource_size function on resource object can: bcm: Remove proc entry when dev is unregistered. igb: Fix not clearing TimeSync interrupts for 82580 svcrdma: Catch another Reply chunk overflow case platform/x86: dell-smbios: Fix error path in dell_smbios_init() tcp_bpf: fix return value of tcp_bpf_sendmsg() igc: Unlock on error in igc_io_resume() drivers/net/usb: Remove all strcpy() uses net: usb: don't write directly to netdev->dev_addr usbnet: modern method to get random MAC bareudp: Fix device stats updates. fou: remove sparse errors gro: remove rcu_read_lock/rcu_read_unlock from gro_receive handlers gro: remove rcu_read_lock/rcu_read_unlock from gro_complete handlers fou: Fix null-ptr-deref in GRO. net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN net: dsa: vsc73xx: fix possible subblocks range of CAPT block ASoC: topology: Properly initialize soc_enum values dm init: Handle minors larger than 255 iommu/vt-d: Handle volatile descriptor status read cgroup: Protect css->cgroup write under css_set_lock um: line: always fill *error_out in setup_one_line() devres: Initialize an uninitialized struct member pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv hwmon: (adc128d818) Fix underflows seen when writing limit attributes hwmon: (lm95234) Fix underflows seen when writing limit attributes hwmon: (nct6775-core) Fix underflows seen when writing limit attributes hwmon: (w83627ehf) Fix underflows seen when writing limit attributes libbpf: Add NULL checks to bpf_object__{prev_map,next_map} wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id() smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu() btrfs: replace BUG_ON with ASSERT in walk_down_proc() btrfs: clean up our handling of refs == 0 in snapshot delete PCI: Add missing bridge lock to pci_bus_lock() net: dpaa: avoid on-stack arrays of NR_CPUS elements kselftests: dmabuf-heaps: Ensure the driver name is null-terminated btrfs: initialize location to fix -Wmaybe-uninitialized in btrfs_lookup_dentry() s390/vmlinux.lds.S: Move ro_after_init section behind rodata section HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup Input: uinput - reject requests with unreasonable number of slots usbnet: ipheth: race between ipheth_close and error handling Squashfs: sanity check symbolic link size of/irq: Prevent device address out-of-bounds read in interrupt map walk lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc() MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is installed ata: pata_macio: Use WARN instead of BUG NFSv4: Add missing rescheduling points in nfs_client_return_marked_delegations staging: iio: frequency: ad9834: Validate frequency parameter value iio: buffer-dmaengine: fix releasing dma channel on error iio: fix scale application in iio_convert_raw_to_processed_unlocked iio: adc: ad7124: fix chip ID mismatch binder: fix UAF caused by offsets overwrite nvmem: Fix return type of devm_nvmem_device_get() in kerneldoc uio_hv_generic: Fix kernel NULL pointer dereference in hv_uio_rescind Drivers: hv: vmbus: Fix rescind handling in uio_hv_generic VMCI: Fix use-after-free when removing resource in vmci_resource_remove() clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX clocksource/drivers/imx-tpm: Fix next event not taking effect sometime clocksource/drivers/timer-of: Remove percpu irq related code uprobes: Use kzalloc to allocate xol area perf/aux: Fix AUX buffer serialization nilfs2: replace snprintf in show functions with sysfs_emit nilfs2: protect references to superblock parameters exposed in sysfs ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add() ACPI: processor: Fix memory leaks in error paths of processor_add() arm64: acpi: Move get_cpu_for_acpi_id() to a header arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry nvmet-tcp: fix kernel crash if commands allocation fails drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused drm/i915/fence: Mark debug_fence_free() with __maybe_unused mmc: cqhci: Fix checking of CQHCI_HALT state rtmutex: Drop rt_mutex::wait_lock before scheduling x86/mm: Fix PTI for i386 some more net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket memcg: protect concurrent access to mem_cgroup_idr Linux 5.10.226 Change-Id: I3c0afd32ba78775f67cde6d73b4dbf931bbc4770 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
7d3ca1ed3f
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 225
|
||||
SUBLEVEL = 226
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -97,6 +97,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
|
||||
return acpi_cpu_get_madt_gicc(cpu)->uid;
|
||||
}
|
||||
|
||||
static inline int get_cpu_for_acpi_id(u32 uid)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (acpi_cpu_get_madt_gicc(cpu) &&
|
||||
uid == get_acpi_id_for_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
|
||||
void __init acpi_init_cpus(void);
|
||||
int apei_claim_sea(struct pt_regs *regs);
|
||||
|
@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
|
||||
return acpi_early_node_map[cpu];
|
||||
}
|
||||
|
||||
static inline int get_cpu_for_acpi_id(u32 uid)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (uid == get_acpi_id_for_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
|
@ -307,13 +307,6 @@ int r4k_clockevent_init(void)
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
|
||||
cd->name = "MIPS";
|
||||
@ -324,7 +317,6 @@ int r4k_clockevent_init(void)
|
||||
min_delta = calculate_min_delta();
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
cd->cpumask = cpumask_of(cpu);
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->event_handler = mips_event_handler;
|
||||
@ -336,6 +328,13 @@ int r4k_clockevent_init(void)
|
||||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
|
||||
c0_compare_interrupt))
|
||||
pr_err("Failed to request irq %d (timer)\n", irq);
|
||||
|
@ -69,6 +69,15 @@ SECTIONS
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
||||
.data.rel.ro : {
|
||||
*(.data.rel.ro .data.rel.ro.*)
|
||||
}
|
||||
.got : {
|
||||
__got_start = .;
|
||||
*(.got)
|
||||
__got_end = .;
|
||||
}
|
||||
|
||||
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
||||
BOOT_DATA_PRESERVED
|
||||
|
||||
|
@ -378,6 +378,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
||||
parse_chan_pair(NULL, line, n, opts, error_out);
|
||||
err = 0;
|
||||
}
|
||||
*error_out = "configured as 'none'";
|
||||
} else {
|
||||
char *new = kstrdup(init, GFP_KERNEL);
|
||||
if (!new) {
|
||||
@ -401,6 +402,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
||||
}
|
||||
}
|
||||
if (err) {
|
||||
*error_out = "failed to parse channel pair";
|
||||
line->init_str = NULL;
|
||||
line->valid = 0;
|
||||
kfree(new);
|
||||
|
@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
||||
*
|
||||
* Returns a pointer to a PTE on success, or NULL on failure.
|
||||
*/
|
||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
|
||||
{
|
||||
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||
pmd_t *pmd;
|
||||
@ -251,11 +251,16 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
||||
if (!pmd)
|
||||
return NULL;
|
||||
|
||||
/* We can't do anything sensible if we hit a large mapping. */
|
||||
/* Large PMD mapping found */
|
||||
if (pmd_large(*pmd)) {
|
||||
WARN_ON(1);
|
||||
/* Clear the PMD if we hit a large mapping from the first round */
|
||||
if (late_text) {
|
||||
set_pmd(pmd, __pmd(0));
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (pmd_none(*pmd)) {
|
||||
unsigned long new_pte_page = __get_free_page(gfp);
|
||||
@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
|
||||
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
|
||||
return;
|
||||
|
||||
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
|
||||
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
@ -301,7 +306,7 @@ enum pti_clone_level {
|
||||
|
||||
static void
|
||||
pti_clone_pgtable(unsigned long start, unsigned long end,
|
||||
enum pti_clone_level level)
|
||||
enum pti_clone_level level, bool late_text)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
||||
return;
|
||||
|
||||
/* Allocate PTE in the user page-table */
|
||||
target_pte = pti_user_pagetable_walk_pte(addr);
|
||||
target_pte = pti_user_pagetable_walk_pte(addr, late_text);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
@ -453,7 +458,7 @@ static void __init pti_clone_user_shared(void)
|
||||
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
|
||||
pte_t *target_pte;
|
||||
|
||||
target_pte = pti_user_pagetable_walk_pte(va);
|
||||
target_pte = pti_user_pagetable_walk_pte(va, false);
|
||||
if (WARN_ON(!target_pte))
|
||||
return;
|
||||
|
||||
@ -476,7 +481,7 @@ static void __init pti_clone_user_shared(void)
|
||||
start = CPU_ENTRY_AREA_BASE;
|
||||
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
|
||||
|
||||
pti_clone_pgtable(start, end, PTI_CLONE_PMD);
|
||||
pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
@ -493,11 +498,11 @@ static void __init pti_setup_espfix64(void)
|
||||
/*
|
||||
* Clone the populated PMDs of the entry text and force it RO.
|
||||
*/
|
||||
static void pti_clone_entry_text(void)
|
||||
static void pti_clone_entry_text(bool late)
|
||||
{
|
||||
pti_clone_pgtable((unsigned long) __entry_text_start,
|
||||
(unsigned long) __entry_text_end,
|
||||
PTI_LEVEL_KERNEL_IMAGE);
|
||||
PTI_LEVEL_KERNEL_IMAGE, late);
|
||||
|
||||
/*
|
||||
* If CFI is enabled, also map jump tables, so the entry code can
|
||||
@ -581,7 +586,7 @@ static void pti_clone_kernel_text(void)
|
||||
* pti_set_kernel_image_nonglobal() did to clear the
|
||||
* global bit.
|
||||
*/
|
||||
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
|
||||
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
|
||||
|
||||
/*
|
||||
* pti_clone_pgtable() will set the global bit in any PMDs
|
||||
@ -648,8 +653,15 @@ void __init pti_init(void)
|
||||
|
||||
/* Undo all global bits from the init pagetables in head_64.S: */
|
||||
pti_set_kernel_image_nonglobal();
|
||||
|
||||
/* Replace some of the global bits just for shared entry text: */
|
||||
pti_clone_entry_text();
|
||||
/*
|
||||
* This is very early in boot. Device and Late initcalls can do
|
||||
* modprobe before free_initmem() and mark_readonly(). This
|
||||
* pti_clone_entry_text() allows those user-mode-helpers to function,
|
||||
* but notably the text is still RW.
|
||||
*/
|
||||
pti_clone_entry_text(false);
|
||||
pti_setup_espfix64();
|
||||
pti_setup_vsyscall();
|
||||
}
|
||||
@ -666,10 +678,11 @@ void pti_finalize(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_PTI))
|
||||
return;
|
||||
/*
|
||||
* We need to clone everything (again) that maps parts of the
|
||||
* kernel image.
|
||||
* This is after free_initmem() (all initcalls are done) and we've done
|
||||
* mark_readonly(). Text is now NX which might've split some PMDs
|
||||
* relative to the early clone.
|
||||
*/
|
||||
pti_clone_entry_text();
|
||||
pti_clone_entry_text(true);
|
||||
pti_clone_kernel_text();
|
||||
|
||||
debug_checkwx_user();
|
||||
|
@ -387,7 +387,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
|
||||
result = acpi_processor_get_info(device);
|
||||
if (result) /* Processor is not physically present or unavailable */
|
||||
return 0;
|
||||
goto err_clear_driver_data;
|
||||
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
@ -402,7 +402,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
"BIOS reported wrong ACPI id %d for the processor\n",
|
||||
pr->id);
|
||||
/* Give up, but do not abort the namespace scan. */
|
||||
goto err;
|
||||
goto err_clear_driver_data;
|
||||
}
|
||||
/*
|
||||
* processor_device_array is not cleared on errors to allow buggy BIOS
|
||||
@ -414,12 +414,12 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
dev = get_cpu_device(pr->id);
|
||||
if (!dev) {
|
||||
result = -ENODEV;
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
}
|
||||
|
||||
result = acpi_bind_one(dev, device);
|
||||
if (result)
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
|
||||
pr->dev = dev;
|
||||
|
||||
@ -430,10 +430,11 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
dev_err(dev, "Processor driver could not be attached\n");
|
||||
acpi_unbind_one(dev);
|
||||
|
||||
err:
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
device->driver_data = NULL;
|
||||
err_clear_per_cpu:
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
err_clear_driver_data:
|
||||
device->driver_data = NULL;
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
err_free_pr:
|
||||
kfree(pr);
|
||||
return result;
|
||||
|
@ -540,7 +540,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
while (sg_len) {
|
||||
/* table overflow should never happen */
|
||||
BUG_ON (pi++ >= MAX_DCMDS);
|
||||
if (WARN_ON_ONCE(pi >= MAX_DCMDS))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
|
||||
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
|
||||
@ -552,11 +553,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
addr += len;
|
||||
sg_len -= len;
|
||||
++table;
|
||||
++pi;
|
||||
}
|
||||
}
|
||||
|
||||
/* Should never happen according to Tejun */
|
||||
BUG_ON(!pi);
|
||||
if (WARN_ON_ONCE(!pi))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
/* Convert the last command to an input/output */
|
||||
table--;
|
||||
|
@ -577,6 +577,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
|
||||
grp->id = grp;
|
||||
if (id)
|
||||
grp->id = id;
|
||||
grp->color = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->devres_lock, flags);
|
||||
add_dr(dev, &grp->node[0]);
|
||||
|
@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
|
||||
static int tpm_set_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long next, now;
|
||||
unsigned long next, prev, now;
|
||||
|
||||
next = tpm_read_counter();
|
||||
next += delta;
|
||||
prev = tpm_read_counter();
|
||||
next = prev + delta;
|
||||
writel(next, timer_base + TPM_C0V);
|
||||
now = tpm_read_counter();
|
||||
|
||||
/*
|
||||
* Need to wait CNT increase at least 1 cycle to make sure
|
||||
* the C0V has been updated into HW.
|
||||
*/
|
||||
if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
|
||||
while (now == tpm_read_counter())
|
||||
;
|
||||
|
||||
/*
|
||||
* NOTE: We observed in a very small probability, the bus fabric
|
||||
* contention between GPU and A7 may results a few cycles delay
|
||||
* of writing CNT registers which may cause the min_delta event got
|
||||
* missed, so we need add a ETIME check here in case it happened.
|
||||
*/
|
||||
return (int)(next - now) <= 0 ? -ETIME : 0;
|
||||
return (now - prev) >= delta ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static int tpm_set_state_oneshot(struct clock_event_device *evt)
|
||||
|
@ -25,9 +25,6 @@ static void timer_of_irq_exit(struct of_timer_irq *of_irq)
|
||||
|
||||
struct clock_event_device *clkevt = &to->clkevt;
|
||||
|
||||
if (of_irq->percpu)
|
||||
free_percpu_irq(of_irq->irq, clkevt);
|
||||
else
|
||||
free_irq(of_irq->irq, clkevt);
|
||||
}
|
||||
|
||||
@ -42,9 +39,6 @@ static void timer_of_irq_exit(struct of_timer_irq *of_irq)
|
||||
* - Get interrupt number by name
|
||||
* - Get interrupt number by index
|
||||
*
|
||||
* When the interrupt is per CPU, 'request_percpu_irq()' is called,
|
||||
* otherwise 'request_irq()' is used.
|
||||
*
|
||||
* Returns 0 on success, < 0 otherwise
|
||||
*/
|
||||
static int timer_of_irq_init(struct device_node *np,
|
||||
@ -69,10 +63,7 @@ static int timer_of_irq_init(struct device_node *np,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_irq->percpu ?
|
||||
request_percpu_irq(of_irq->irq, of_irq->handler,
|
||||
np->full_name, clkevt) :
|
||||
request_irq(of_irq->irq, of_irq->handler,
|
||||
ret = request_irq(of_irq->irq, of_irq->handler,
|
||||
of_irq->flags ? of_irq->flags : IRQF_TIMER,
|
||||
np->full_name, clkevt);
|
||||
if (ret) {
|
||||
|
@ -11,7 +11,6 @@
|
||||
struct of_timer_irq {
|
||||
int irq;
|
||||
int index;
|
||||
int percpu;
|
||||
const char *name;
|
||||
unsigned long flags;
|
||||
irq_handler_t handler;
|
||||
|
@ -44,7 +44,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
@ -70,7 +70,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_free(fence, &i915_sw_fence_debug_descr);
|
||||
smp_wmb(); /* flush the change in state before reallocation */
|
||||
@ -87,7 +87,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
|
||||
static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
(rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
|
||||
hid_info(hdev,
|
||||
"usage count exceeds max: fixing up report descriptor\n");
|
||||
|
@ -1965,6 +1965,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_device_unregister);
|
||||
|
||||
/*
|
||||
* vmbus_remove_channel_attr_group - remove the channel's attribute group
|
||||
|
@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
/* 10 mV LSB on limit registers */
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
|
||||
data->in[index][nr] = regval << 4;
|
||||
reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
|
||||
i2c_smbus_write_byte_data(data->client, reg, regval);
|
||||
@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
|
||||
return err;
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
data->temp[index] = regval << 1;
|
||||
i2c_smbus_write_byte_data(data->client,
|
||||
index == 1 ? ADC128_REG_TEMP_MAX
|
||||
|
@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
|
||||
1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit2[index] = val;
|
||||
@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit1[index] = val;
|
||||
@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = DIV_ROUND_CLOSEST(val, 1000);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
|
||||
val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
|
||||
return ret;
|
||||
|
||||
/* Accuracy is 1/2 degrees C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->toffset[index] = val;
|
||||
|
@ -2374,7 +2374,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->temp_offset[nr] = val;
|
||||
|
@ -897,7 +897,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->target_temp[nr] = val;
|
||||
@ -922,7 +922,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
|
||||
return err;
|
||||
|
||||
/* Limit the temp to 0C - 15C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
|
||||
|
@ -500,6 +500,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
fsleep(200);
|
||||
timeout = 100;
|
||||
do {
|
||||
ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
|
||||
|
@ -180,7 +180,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
|
||||
|
||||
ret = dma_get_slave_caps(chan, &caps);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
goto err_release;
|
||||
|
||||
/* Needs to be aligned to the maximum of the minimums */
|
||||
if (caps.src_addr_widths)
|
||||
@ -207,6 +207,8 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
|
||||
|
||||
return &dmaengine_buffer->queue.buffer;
|
||||
|
||||
err_release:
|
||||
dma_release_channel(chan);
|
||||
err_free:
|
||||
kfree(dmaengine_buffer);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -610,17 +610,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_MICRO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val;
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val;
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000LL);
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_NANO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val;
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val;
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000000LL);
|
||||
break;
|
||||
|
@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit number of contacts to a reasonable value (100). This
|
||||
* ensures that we need less than 2 pages for struct input_mt
|
||||
* (we are not using in-kernel slot assignment so not going to
|
||||
* allocate memory for the "red" table), and we should have no
|
||||
* trouble getting this much memory.
|
||||
*/
|
||||
if (code == ABS_MT_SLOT && max > 99) {
|
||||
printk(KERN_DEBUG
|
||||
"%s: unreasonably large number of slots requested: %d\n",
|
||||
UINPUT_NAME, max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1333,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
|
||||
*/
|
||||
writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
|
||||
|
||||
while (qi->desc_status[wait_index] != QI_DONE) {
|
||||
while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
|
||||
/*
|
||||
* We will leave the interrupts disabled, to prevent interrupt
|
||||
* context to queue another cmd while a cmd is already submitted
|
||||
|
@ -380,6 +380,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
|
||||
IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
|
||||
IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
|
||||
IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
|
||||
iommu_write(iommu, IOMMU_BYPASS_REG, 0);
|
||||
iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
|
||||
iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
|
||||
IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
|
||||
|
@ -546,6 +546,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
|
||||
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
||||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
/* IRQs 0 and 1 cannot be mapped, they are handled internally */
|
||||
if (hw <= 1)
|
||||
return -EINVAL;
|
||||
|
||||
armada_370_xp_irq_mask(irq_get_irq_data(virq));
|
||||
if (!is_percpu_irq(hw))
|
||||
writel(hw, per_cpu_int_base +
|
||||
|
@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
|
||||
dev_err(dev, "Device must have exactly one LED sub-node.");
|
||||
return -EINVAL;
|
||||
}
|
||||
child = of_get_next_available_child(dev_of_node(dev), NULL);
|
||||
|
||||
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
|
||||
if (!led)
|
||||
@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi)
|
||||
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
|
||||
led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
|
||||
|
||||
child = of_get_next_available_child(dev_of_node(dev), NULL);
|
||||
state = of_get_property(child, "default-state", NULL);
|
||||
if (state) {
|
||||
if (!strcmp(state, "on")) {
|
||||
led->ldev.brightness = led->ldev.max_brightness;
|
||||
} else if (strcmp(state, "off")) {
|
||||
of_node_put(child);
|
||||
/* all other cases except "off" */
|
||||
dev_err(dev, "default-state can only be 'on' or 'off'");
|
||||
return -EINVAL;
|
||||
@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi)
|
||||
|
||||
ret = devm_led_classdev_register(&spi->dev, &led->ldev);
|
||||
if (ret) {
|
||||
of_node_put(child);
|
||||
mutex_destroy(&led->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
of_node_put(child);
|
||||
spi_set_drvdata(spi, led);
|
||||
|
||||
return 0;
|
||||
|
@ -207,8 +207,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
|
||||
strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
|
||||
/* minor */
|
||||
if (strlen(field[2])) {
|
||||
if (kstrtoull(field[2], 0, &dev->dmi.dev))
|
||||
if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
|
||||
dev->dmi.dev >= (1 << MINORBITS))
|
||||
return ERR_PTR(-EINVAL);
|
||||
dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
|
||||
dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
|
||||
}
|
||||
/* flags */
|
||||
|
@ -431,8 +431,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
|
||||
struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2;
|
||||
struct v4l2_fwnode_endpoint vep = { { 0 } };
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
|
||||
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
csd->interface.csiphy_id = vep.base.port;
|
||||
|
||||
|
@ -113,7 +113,8 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
|
||||
if (*nplanes != buffers)
|
||||
return -EINVAL;
|
||||
for (p = 0; p < buffers; p++) {
|
||||
if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
|
||||
if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
|
||||
dev->fmt_cap->vdownsampling[p] +
|
||||
dev->fmt_cap->data_offset[p])
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1801,8 +1802,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
|
||||
return -EINVAL;
|
||||
if (edid->blocks == 0) {
|
||||
dev->edid_blocks = 0;
|
||||
if (dev->num_outputs) {
|
||||
v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
|
||||
v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
|
||||
}
|
||||
phys_addr = CEC_PHYS_ADDR_INVALID;
|
||||
goto set_phys_addr;
|
||||
}
|
||||
@ -1826,8 +1829,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
|
||||
display_present |=
|
||||
dev->display_present[i] << j++;
|
||||
|
||||
if (dev->num_outputs) {
|
||||
v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
|
||||
v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
|
||||
}
|
||||
|
||||
set_phys_addr:
|
||||
/* TODO: a proper hotplug detect cycle should be emulated here */
|
||||
|
@ -63,13 +63,15 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
|
||||
if (sizes[0] < size)
|
||||
return -EINVAL;
|
||||
for (p = 1; p < planes; p++) {
|
||||
if (sizes[p] < dev->bytesperline_out[p] * h +
|
||||
if (sizes[p] < dev->bytesperline_out[p] * h /
|
||||
vfmt->vdownsampling[p] +
|
||||
vfmt->data_offset[p])
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
for (p = 0; p < planes; p++)
|
||||
sizes[p] = p ? dev->bytesperline_out[p] * h +
|
||||
sizes[p] = p ? dev->bytesperline_out[p] * h /
|
||||
vfmt->vdownsampling[p] +
|
||||
vfmt->data_offset[p] : size;
|
||||
}
|
||||
|
||||
@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
|
||||
|
||||
for (p = 0; p < planes; p++) {
|
||||
if (p)
|
||||
size = dev->bytesperline_out[p] * h;
|
||||
size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
|
||||
size += vb->planes[p].data_offset;
|
||||
|
||||
if (vb2_get_plane_payload(vb, p) < size) {
|
||||
@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
|
||||
for (p = 0; p < mp->num_planes; p++) {
|
||||
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
|
||||
mp->plane_fmt[p].sizeimage =
|
||||
mp->plane_fmt[p].bytesperline * mp->height +
|
||||
fmt->data_offset[p];
|
||||
mp->plane_fmt[p].bytesperline * mp->height /
|
||||
fmt->vdownsampling[p] + fmt->data_offset[p];
|
||||
}
|
||||
for (p = fmt->buffers; p < fmt->planes; p++) {
|
||||
unsigned stride = dev->bytesperline_out[p];
|
||||
|
@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
|
||||
spin_lock(&vmci_resource_table.lock);
|
||||
|
||||
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
|
||||
if (vmci_handle_is_equal(r->handle, resource->handle)) {
|
||||
if (vmci_handle_is_equal(r->handle, resource->handle) &&
|
||||
resource->type == r->type) {
|
||||
hlist_del_init_rcu(&r->node);
|
||||
break;
|
||||
}
|
||||
|
@ -607,7 +607,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
cqhci_writel(cq_host, 0, CQHCI_CTL);
|
||||
mmc->cqe_on = true;
|
||||
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
|
||||
if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
|
||||
if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
|
||||
pr_err("%s: cqhci: CQE failed to exit halt state\n",
|
||||
mmc_hostname(mmc));
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
|
||||
sizeof(ipversion))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
ipversion >>= 4;
|
||||
@ -85,7 +85,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
|
||||
proto = htons(ETH_P_IPV6);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
|
||||
@ -99,7 +99,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
ipv4_is_multicast(tunnel_hdr->daddr)) {
|
||||
proto = htons(ETH_P_MPLS_MC);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
@ -115,7 +115,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
(addr_type & IPV6_ADDR_MULTICAST)) {
|
||||
proto = htons(ETH_P_MPLS_MC);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@ -127,12 +127,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
proto,
|
||||
!net_eq(bareudp->net,
|
||||
dev_net(bareudp->dev)))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
|
||||
if (!tun_dst) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
@ -157,8 +157,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
&((struct ipv6hdr *)oiph)->saddr);
|
||||
}
|
||||
if (err > 1) {
|
||||
++bareudp->dev->stats.rx_frame_errors;
|
||||
++bareudp->dev->stats.rx_errors;
|
||||
DEV_STATS_INC(bareudp->dev, rx_frame_errors);
|
||||
DEV_STATS_INC(bareudp->dev, rx_errors);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@ -453,11 +453,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
if (err == -ELOOP)
|
||||
dev->stats.collisions++;
|
||||
DEV_STATS_INC(dev, collisions);
|
||||
else if (err == -ENETUNREACH)
|
||||
dev->stats.tx_carrier_errors++;
|
||||
DEV_STATS_INC(dev, tx_carrier_errors);
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
|
||||
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
|
||||
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
|
||||
|
||||
@ -371,13 +371,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
|
||||
break;
|
||||
|
||||
case VSC73XX_BLOCK_MII:
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
case VSC73XX_BLOCK_ARBITER:
|
||||
switch (subblock) {
|
||||
case 0 ... 1:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
switch (subblock) {
|
||||
case 0 ... 4:
|
||||
case 6 ... 7:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -908,14 +908,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
static void dpaa_fq_setup(struct dpaa_priv *priv,
|
||||
static int dpaa_fq_setup(struct dpaa_priv *priv,
|
||||
const struct dpaa_fq_cbs *fq_cbs,
|
||||
struct fman_port *tx_port)
|
||||
{
|
||||
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
|
||||
const cpumask_t *affine_cpus = qman_affine_cpus();
|
||||
u16 channels[NR_CPUS];
|
||||
struct dpaa_fq *fq;
|
||||
u16 *channels;
|
||||
|
||||
channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
|
||||
if (!channels)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
|
||||
channels[num_portals++] = qman_affine_channel(cpu);
|
||||
@ -974,6 +978,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(channels);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
|
||||
@ -3015,7 +3023,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||
*/
|
||||
dpaa_eth_add_channel(priv->channel, &pdev->dev);
|
||||
|
||||
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
|
||||
err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
|
||||
if (err)
|
||||
goto free_dpaa_bps;
|
||||
|
||||
/* Create a congestion group for this netdev, with
|
||||
* dynamically-allocated CGR ID.
|
||||
|
@ -537,12 +537,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
|
||||
struct ethtool_coalesce *c)
|
||||
{
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
bool needs_revert[NR_CPUS] = {false};
|
||||
struct qman_portal *portal;
|
||||
u32 period, prev_period;
|
||||
u8 thresh, prev_thresh;
|
||||
bool *needs_revert;
|
||||
int cpu, res;
|
||||
|
||||
needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
|
||||
if (!needs_revert)
|
||||
return -ENOMEM;
|
||||
|
||||
period = c->rx_coalesce_usecs;
|
||||
thresh = c->rx_max_coalesced_frames;
|
||||
|
||||
@ -565,6 +569,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
|
||||
needs_revert[cpu] = true;
|
||||
}
|
||||
|
||||
kfree(needs_revert);
|
||||
|
||||
return 0;
|
||||
|
||||
revert_values:
|
||||
@ -578,6 +584,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
|
||||
qman_dqrr_set_ithresh(portal, prev_thresh);
|
||||
}
|
||||
|
||||
kfree(needs_revert);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -6805,10 +6805,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
|
||||
static void igb_tsync_interrupt(struct igb_adapter *adapter)
|
||||
{
|
||||
const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
|
||||
TSINTR_TT0 | TSINTR_TT1 |
|
||||
TSINTR_AUTT0 | TSINTR_AUTT1);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 tsicr = rd32(E1000_TSICR);
|
||||
struct ptp_clock_event event;
|
||||
|
||||
if (hw->mac.type == e1000_82580) {
|
||||
/* 82580 has a hardware bug that requires an explicit
|
||||
* write to clear the TimeSync interrupt cause.
|
||||
*/
|
||||
wr32(E1000_TSICR, tsicr & mask);
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_SYS_WRAP) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
if (adapter->ptp_caps.pps)
|
||||
|
@ -5740,6 +5740,7 @@ static void igc_io_resume(struct pci_dev *pdev)
|
||||
rtnl_lock();
|
||||
if (netif_running(netdev)) {
|
||||
if (igc_open(netdev)) {
|
||||
rtnl_unlock();
|
||||
netdev_err(netdev, "igc_open failed after reset\n");
|
||||
return;
|
||||
}
|
||||
|
@ -514,18 +514,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
|
||||
|
||||
type = gh->proto_type;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
skb_gro_pull(skb, gh_len);
|
||||
skb_gro_postpull_rcsum(skb, gh, gh_len);
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -545,13 +542,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
gh_len = geneve_hlen(gh);
|
||||
type = gh->proto_type;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + gh_len);
|
||||
|
||||
return err;
|
||||
|
@ -336,6 +336,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned char data[2];
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
retval = usbnet_get_endpoints(dev, intf);
|
||||
if (retval)
|
||||
@ -383,7 +384,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
|
||||
CONTROL_TIMEOUT_MS);
|
||||
|
||||
retval = get_mac_address(dev, dev->net->dev_addr);
|
||||
retval = get_mac_address(dev, addr);
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -146,6 +146,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
u8 link[3];
|
||||
int timeout = 50;
|
||||
struct cx82310_priv *priv;
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
|
||||
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
|
||||
@ -202,12 +203,12 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
goto err;
|
||||
|
||||
/* get the MAC address */
|
||||
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
|
||||
dev->net->dev_addr, ETH_ALEN);
|
||||
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN);
|
||||
if (ret) {
|
||||
netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
|
||||
/* start (does not seem to have any effect?) */
|
||||
ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
|
||||
|
@ -353,8 +353,8 @@ static int ipheth_close(struct net_device *net)
|
||||
{
|
||||
struct ipheth_device *dev = netdev_priv(net);
|
||||
|
||||
cancel_delayed_work_sync(&dev->carrier_work);
|
||||
netif_stop_queue(net);
|
||||
cancel_delayed_work_sync(&dev->carrier_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -443,7 +443,7 @@ static int ipheth_probe(struct usb_interface *intf,
|
||||
|
||||
netdev->netdev_ops = &ipheth_netdev_ops;
|
||||
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
|
||||
strcpy(netdev->name, "eth%d");
|
||||
strscpy(netdev->name, "eth%d", sizeof(netdev->name));
|
||||
|
||||
dev = netdev_priv(netdev);
|
||||
dev->udev = udev;
|
||||
|
@ -1044,8 +1044,7 @@ static int kaweth_probe(
|
||||
goto err_all_but_rxbuf;
|
||||
|
||||
memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
|
||||
memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
|
||||
sizeof(kaweth->configuration.hw_addr));
|
||||
eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr);
|
||||
|
||||
netdev->netdev_ops = &kaweth_netdev_ops;
|
||||
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
|
||||
|
@ -480,17 +480,19 @@ static const struct net_device_ops mcs7830_netdev_ops = {
|
||||
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
|
||||
{
|
||||
struct net_device *net = dev->net;
|
||||
u8 addr[ETH_ALEN];
|
||||
int ret;
|
||||
int retry;
|
||||
|
||||
/* Initial startup: Gather MAC address setting from EEPROM */
|
||||
ret = -EINVAL;
|
||||
for (retry = 0; retry < 5 && ret; retry++)
|
||||
ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
|
||||
ret = mcs7830_hif_get_mac_address(dev, addr);
|
||||
if (ret) {
|
||||
dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
|
||||
goto out;
|
||||
}
|
||||
eth_hw_addr_set(net, addr);
|
||||
|
||||
mcs7830_data_set_multicast(net);
|
||||
|
||||
|
@ -669,6 +669,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
|
||||
static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
|
||||
0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
|
||||
u8 mod[2];
|
||||
|
||||
dev_dbg(&dev->udev->dev, "%s", __func__);
|
||||
|
||||
@ -698,8 +699,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
dev->net->netdev_ops = &sierra_net_device_ops;
|
||||
|
||||
/* change MAC addr to include, ifacenum, and to be unique */
|
||||
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
|
||||
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
|
||||
mod[0] = atomic_inc_return(&iface_counter);
|
||||
mod[1] = ifacenum;
|
||||
dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2);
|
||||
|
||||
/* prepare shutdown message template */
|
||||
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
|
||||
|
@ -326,6 +326,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct mii_if_info *mii;
|
||||
u8 addr[ETH_ALEN];
|
||||
int ret;
|
||||
|
||||
ret = usbnet_get_endpoints(dev, intf);
|
||||
@ -356,11 +357,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
* EEPROM automatically to PAR. In case there is no EEPROM externally,
|
||||
* a default MAC address is stored in PAR for making chip work properly.
|
||||
*/
|
||||
if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
|
||||
if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) {
|
||||
netdev_err(netdev, "Error reading MAC address\n");
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
eth_hw_addr_set(netdev, addr);
|
||||
|
||||
/* power up and reset phy */
|
||||
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
|
||||
|
@ -731,6 +731,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
struct sr_data *data = (struct sr_data *)&dev->data;
|
||||
u16 led01_mux, led23_mux;
|
||||
int ret, embd_phy;
|
||||
u8 addr[ETH_ALEN];
|
||||
u32 phyid;
|
||||
u16 rx_ctl;
|
||||
|
||||
@ -756,12 +757,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
}
|
||||
|
||||
/* Get the MAC address */
|
||||
ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
|
||||
dev->net->dev_addr);
|
||||
ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr);
|
||||
if (ret < 0) {
|
||||
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
|
||||
|
||||
/* Initialize MII structure */
|
||||
|
@ -64,9 +64,6 @@
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
// randomly generated ethernet address
|
||||
static u8 node_id [ETH_ALEN];
|
||||
|
||||
/* use ethtool to change the level for any given device */
|
||||
static int msg_level = -1;
|
||||
module_param (msg_level, int, 0);
|
||||
@ -148,12 +145,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
|
||||
|
||||
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
|
||||
{
|
||||
u8 addr[ETH_ALEN];
|
||||
int tmp = -1, ret;
|
||||
unsigned char buf [13];
|
||||
|
||||
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
|
||||
if (ret == 12)
|
||||
tmp = hex2bin(dev->net->dev_addr, buf, 6);
|
||||
tmp = hex2bin(addr, buf, 6);
|
||||
if (tmp < 0) {
|
||||
dev_dbg(&dev->udev->dev,
|
||||
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
|
||||
@ -161,6 +159,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
|
||||
@ -1693,8 +1692,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
dev->interrupt_count = 0;
|
||||
|
||||
dev->net = net;
|
||||
strcpy (net->name, "usb%d");
|
||||
memcpy (net->dev_addr, node_id, sizeof node_id);
|
||||
strscpy(net->name, "usb%d", sizeof(net->name));
|
||||
|
||||
/* rx and tx sides can use different message sizes;
|
||||
* bind() should set rx_urb_size in that case.
|
||||
@ -1720,13 +1718,13 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
|
||||
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
|
||||
(net->dev_addr [0] & 0x02) == 0))
|
||||
strcpy (net->name, "eth%d");
|
||||
strscpy(net->name, "eth%d", sizeof(net->name));
|
||||
/* WLAN devices should always be named "wlan%d" */
|
||||
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
|
||||
strcpy(net->name, "wlan%d");
|
||||
strscpy(net->name, "wlan%d", sizeof(net->name));
|
||||
/* WWAN devices should always be named "wwan%d" */
|
||||
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
|
||||
strcpy(net->name, "wwan%d");
|
||||
strscpy(net->name, "wwan%d", sizeof(net->name));
|
||||
|
||||
/* devices that cannot do ARP */
|
||||
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
|
||||
@ -1768,9 +1766,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* let userspace know we have a random address */
|
||||
if (ether_addr_equal(net->dev_addr, node_id))
|
||||
net->addr_assign_type = NET_ADDR_RANDOM;
|
||||
/* this flags the device for user space */
|
||||
if (!is_valid_ether_addr(net->dev_addr))
|
||||
eth_hw_addr_random(net);
|
||||
|
||||
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
|
||||
SET_NETDEV_DEVTYPE(net, &wlan_type);
|
||||
@ -2180,7 +2178,6 @@ static int __init usbnet_init(void)
|
||||
BUILD_BUG_ON(
|
||||
sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
|
||||
|
||||
eth_random_addr(node_id);
|
||||
return 0;
|
||||
}
|
||||
module_init(usbnet_init);
|
||||
|
@ -1089,6 +1089,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
|
||||
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
|
||||
ieee80211_hw_set(hw, SIGNAL_DBM);
|
||||
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
|
||||
ieee80211_hw_set(hw, MFP_CAPABLE);
|
||||
|
||||
hw->extra_tx_headroom = brcms_c_get_header_len();
|
||||
hw->queues = N_TX_QUEUES;
|
||||
|
@ -1310,6 +1310,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
|
||||
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
if (adapter->priv[i]) {
|
||||
if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
|
||||
continue;
|
||||
|
||||
if ((adapter->priv[i]->bss_num == bss_num) &&
|
||||
(adapter->priv[i]->bss_type == bss_type))
|
||||
break;
|
||||
|
@ -1787,8 +1787,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
|
||||
}
|
||||
|
||||
queue->nr_cmds = sq->size * 2;
|
||||
if (nvmet_tcp_alloc_cmds(queue))
|
||||
if (nvmet_tcp_alloc_cmds(queue)) {
|
||||
queue->nr_cmds = 0;
|
||||
return NVME_SC_INTERNAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -967,13 +967,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
|
||||
EXPORT_SYMBOL_GPL(nvmem_device_put);
|
||||
|
||||
/**
|
||||
* devm_nvmem_device_get() - Get nvmem cell of device form a given id
|
||||
* devm_nvmem_device_get() - Get nvmem device of device form a given id
|
||||
*
|
||||
* @dev: Device that requests the nvmem device.
|
||||
* @id: name id for the requested nvmem device.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
|
||||
* on success. The nvmem_cell will be freed by the automatically once the
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
|
||||
* on success. The nvmem_device will be freed by the automatically once the
|
||||
* device is freed.
|
||||
*/
|
||||
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
|
||||
|
@ -288,7 +288,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
struct device_node *p;
|
||||
const __be32 *addr;
|
||||
u32 intsize;
|
||||
int i, res;
|
||||
int i, res, addr_len;
|
||||
__be32 addr_buf[3] = { 0 };
|
||||
|
||||
pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
|
||||
|
||||
@ -297,13 +298,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
return of_irq_parse_oldworld(device, index, out_irq);
|
||||
|
||||
/* Get the reg property (if any) */
|
||||
addr = of_get_property(device, "reg", NULL);
|
||||
addr = of_get_property(device, "reg", &addr_len);
|
||||
|
||||
/* Prevent out-of-bounds read in case of longer interrupt parent address size */
|
||||
if (addr_len > (3 * sizeof(__be32)))
|
||||
addr_len = 3 * sizeof(__be32);
|
||||
if (addr)
|
||||
memcpy(addr_buf, addr, addr_len);
|
||||
|
||||
/* Try the new-style interrupts-extended first */
|
||||
res = of_parse_phandle_with_args(device, "interrupts-extended",
|
||||
"#interrupt-cells", index, out_irq);
|
||||
if (!res)
|
||||
return of_irq_parse_raw(addr, out_irq);
|
||||
return of_irq_parse_raw(addr_buf, out_irq);
|
||||
|
||||
/* Look for the interrupt parent. */
|
||||
p = of_irq_find_parent(device);
|
||||
@ -333,7 +340,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
|
||||
|
||||
/* Check if there are any interrupt-map translations to process */
|
||||
res = of_irq_parse_raw(addr, out_irq);
|
||||
res = of_irq_parse_raw(addr_buf, out_irq);
|
||||
out:
|
||||
of_node_put(p);
|
||||
return res;
|
||||
|
@ -35,6 +35,11 @@
|
||||
#define PCIE_DEVICEID_SHIFT 16
|
||||
|
||||
/* Application registers */
|
||||
#define PID 0x000
|
||||
#define RTL GENMASK(15, 11)
|
||||
#define RTL_SHIFT 11
|
||||
#define AM6_PCI_PG1_RTL_VER 0x15
|
||||
|
||||
#define CMD_STATUS 0x004
|
||||
#define LTSSM_EN_VAL BIT(0)
|
||||
#define OB_XLAT_EN_VAL BIT(1)
|
||||
@ -105,6 +110,8 @@
|
||||
|
||||
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
#define PCI_DEVICE_ID_TI_AM654X 0xb00c
|
||||
|
||||
struct ks_pcie_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
const struct dw_pcie_host_ops *host_ops;
|
||||
@ -537,7 +544,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
|
||||
static void ks_pcie_quirk(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct keystone_pcie *ks_pcie;
|
||||
struct device *bridge_dev;
|
||||
struct pci_dev *bridge;
|
||||
u32 val;
|
||||
|
||||
static const struct pci_device_id rc_pci_devids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
@ -549,6 +560,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ 0, },
|
||||
};
|
||||
static const struct pci_device_id am6_pci_devids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
if (pci_is_root_bus(bus))
|
||||
bridge = dev;
|
||||
@ -570,10 +586,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
|
||||
*/
|
||||
if (pci_match_id(rc_pci_devids, bridge)) {
|
||||
if (pcie_get_readrq(dev) > 256) {
|
||||
dev_info(&dev->dev, "limiting MRRS to 256\n");
|
||||
dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
|
||||
pcie_set_readrq(dev, 256);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory transactions fail with PCI controller in AM654 PG1.0
|
||||
* when MRRS is set to more than 128 bytes. Force the MRRS to
|
||||
* 128 bytes in all downstream devices.
|
||||
*/
|
||||
if (pci_match_id(am6_pci_devids, bridge)) {
|
||||
bridge_dev = pci_get_host_bridge_device(dev);
|
||||
if (!bridge_dev && !bridge_dev->parent)
|
||||
return;
|
||||
|
||||
ks_pcie = dev_get_drvdata(bridge_dev->parent);
|
||||
if (!ks_pcie)
|
||||
return;
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, PID);
|
||||
val &= RTL;
|
||||
val >>= RTL_SHIFT;
|
||||
if (val != AM6_PCI_PG1_RTL_VER)
|
||||
return;
|
||||
|
||||
if (pcie_get_readrq(dev) > 128) {
|
||||
dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
|
||||
pcie_set_readrq(dev, 128);
|
||||
}
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
|
||||
|
||||
|
@ -38,7 +38,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
||||
bool disable_device)
|
||||
{
|
||||
struct pci_dev *pdev = php_slot->pdev;
|
||||
int irq = php_slot->irq;
|
||||
u16 ctrl;
|
||||
|
||||
if (php_slot->irq > 0) {
|
||||
@ -57,7 +56,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
||||
php_slot->wq = NULL;
|
||||
}
|
||||
|
||||
if (disable_device || irq > 0) {
|
||||
if (disable_device) {
|
||||
if (pdev->msix_enabled)
|
||||
pci_disable_msix(pdev);
|
||||
else if (pdev->msi_enabled)
|
||||
|
@ -5269,10 +5269,12 @@ static void pci_bus_lock(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
pci_dev_lock(bus->self);
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
pci_dev_lock(dev);
|
||||
if (dev->subordinate)
|
||||
pci_bus_lock(dev->subordinate);
|
||||
else
|
||||
pci_dev_lock(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5284,8 +5286,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
pci_dev_unlock(bus->self);
|
||||
}
|
||||
|
||||
/* Return 1 on successful lock, 0 on contention */
|
||||
@ -5293,15 +5297,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
if (!pci_dev_trylock(bus->self))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
if (dev->subordinate) {
|
||||
if (!pci_bus_trylock(dev->subordinate)) {
|
||||
pci_dev_unlock(dev);
|
||||
if (!pci_bus_trylock(dev->subordinate))
|
||||
goto unlock;
|
||||
} else if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
|
||||
@ -5309,8 +5313,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
|
||||
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
pci_dev_unlock(bus->self);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5342,9 +5348,10 @@ static void pci_slot_lock(struct pci_slot *slot)
|
||||
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
|
||||
if (!dev->slot || dev->slot != slot)
|
||||
continue;
|
||||
pci_dev_lock(dev);
|
||||
if (dev->subordinate)
|
||||
pci_bus_lock(dev->subordinate);
|
||||
else
|
||||
pci_dev_lock(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5370,14 +5377,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
|
||||
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
|
||||
if (!dev->slot || dev->slot != slot)
|
||||
continue;
|
||||
if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
if (dev->subordinate) {
|
||||
if (!pci_bus_trylock(dev->subordinate)) {
|
||||
pci_dev_unlock(dev);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
} else if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
}
|
||||
return 1;
|
||||
|
||||
@ -5388,6 +5394,7 @@ static int pci_slot_trylock(struct pci_slot *slot)
|
||||
continue;
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
return 0;
|
||||
|
@ -636,11 +636,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
|
||||
start = PCIBIOS_MIN_CARDBUS_IO;
|
||||
end = ~0U;
|
||||
} else {
|
||||
unsigned long avail = root->end - root->start;
|
||||
unsigned long avail = resource_size(root);
|
||||
int i;
|
||||
size = BRIDGE_MEM_MAX;
|
||||
if (size > avail/8) {
|
||||
size = (avail+1)/8;
|
||||
if (size > (avail - 1) / 8) {
|
||||
size = avail / 8;
|
||||
/* round size down to next power of 2 */
|
||||
i = 0;
|
||||
while ((size /= 2) != 0)
|
||||
|
@ -610,7 +610,10 @@ static int __init dell_smbios_init(void)
|
||||
return 0;
|
||||
|
||||
fail_sysfs:
|
||||
free_group(platform_device);
|
||||
if (!wmi)
|
||||
exit_dell_smbios_wmi();
|
||||
if (!smm)
|
||||
exit_dell_smbios_smm();
|
||||
|
||||
fail_create_group:
|
||||
platform_device_del(platform_device);
|
||||
|
@ -115,7 +115,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
|
||||
|
||||
clk_freq = clk_get_rate(st->mclk);
|
||||
|
||||
if (fout > (clk_freq / 2))
|
||||
if (!clk_freq || fout > (clk_freq / 2))
|
||||
return -EINVAL;
|
||||
|
||||
regval = ad9834_calc_freqreg(clk_freq, fout);
|
||||
|
@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
|
||||
|
||||
/*
|
||||
* Callback from vmbus_event when channel is rescinded.
|
||||
* It is meant for rescind of primary channels only.
|
||||
*/
|
||||
static void hv_uio_rescind(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_device *hv_dev = channel->primary_channel->device_obj;
|
||||
struct hv_device *hv_dev = channel->device_obj;
|
||||
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
|
||||
|
||||
/*
|
||||
@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
|
||||
|
||||
/* Wake up reader */
|
||||
uio_event_notify(&pdata->info);
|
||||
|
||||
/*
|
||||
* With rescind callback registered, rescind path will not unregister the device
|
||||
* from vmbus when the primary channel is rescinded.
|
||||
* Without it, rescind handling is incomplete and next onoffer msg does not come.
|
||||
* Unregister the device from vmbus here.
|
||||
*/
|
||||
vmbus_device_unregister(channel->device_obj);
|
||||
}
|
||||
|
||||
/* Sysfs API to allow mmap of the ring buffers
|
||||
|
@ -424,6 +424,7 @@ static void uas_data_cmplt(struct urb *urb)
|
||||
uas_log_cmd_state(cmnd, "data cmplt err", status);
|
||||
/* error: no data transfered */
|
||||
scsi_set_resid(cmnd, sdb->length);
|
||||
set_host_byte(cmnd, DID_ERROR);
|
||||
} else {
|
||||
scsi_set_resid(cmnd, sdb->length - urb->actual_length);
|
||||
}
|
||||
|
@ -4806,7 +4806,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
|
||||
/* We don't care about errors in readahead. */
|
||||
if (ret < 0)
|
||||
continue;
|
||||
BUG_ON(refs == 0);
|
||||
|
||||
/*
|
||||
* This could be racey, it's conceivable that we raced and end
|
||||
* up with a bogus refs count, if that's the case just skip, if
|
||||
* we are actually corrupt we will notice when we look up
|
||||
* everything again with our locks.
|
||||
*/
|
||||
if (refs == 0)
|
||||
continue;
|
||||
|
||||
if (wc->stage == DROP_REFERENCE) {
|
||||
if (refs == 1)
|
||||
@ -4865,7 +4873,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
if (lookup_info &&
|
||||
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
|
||||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
|
||||
BUG_ON(!path->locks[level]);
|
||||
ASSERT(path->locks[level]);
|
||||
ret = btrfs_lookup_extent_info(trans, fs_info,
|
||||
eb->start, level, 1,
|
||||
&wc->refs[level],
|
||||
@ -4873,7 +4881,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
BUG_ON(ret == -ENOMEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
BUG_ON(wc->refs[level] == 0);
|
||||
if (unlikely(wc->refs[level] == 0)) {
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
eb->start);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
}
|
||||
|
||||
if (wc->stage == DROP_REFERENCE) {
|
||||
@ -4889,7 +4901,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
|
||||
/* wc->stage == UPDATE_BACKREF */
|
||||
if (!(wc->flags[level] & flag)) {
|
||||
BUG_ON(!path->locks[level]);
|
||||
ASSERT(path->locks[level]);
|
||||
ret = btrfs_inc_ref(trans, root, eb, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
ret = btrfs_dec_ref(trans, root, eb, 0);
|
||||
@ -5006,8 +5018,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(wc->refs[level - 1] == 0)) {
|
||||
btrfs_err(fs_info, "Missing references.");
|
||||
ret = -EIO;
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
bytenr);
|
||||
ret = -EUCLEAN;
|
||||
goto out_unlock;
|
||||
}
|
||||
*lookup_info = 0;
|
||||
@ -5209,7 +5222,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
|
||||
path->locks[level] = 0;
|
||||
return ret;
|
||||
}
|
||||
BUG_ON(wc->refs[level] == 0);
|
||||
if (unlikely(wc->refs[level] == 0)) {
|
||||
btrfs_tree_unlock_rw(eb, path->locks[level]);
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
eb->start);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (wc->refs[level] == 1) {
|
||||
btrfs_tree_unlock_rw(eb, path->locks[level]);
|
||||
path->locks[level] = 0;
|
||||
|
@ -5527,7 +5527,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
||||
struct inode *inode;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct btrfs_root *sub_root = root;
|
||||
struct btrfs_key location;
|
||||
struct btrfs_key location = { 0 };
|
||||
u8 di_type = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/netdevice.h>
|
||||
@ -211,6 +212,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
|
||||
ret = fn(server, data);
|
||||
if (ret)
|
||||
goto out;
|
||||
cond_resched();
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -108,7 +108,7 @@ static ssize_t
|
||||
nilfs_snapshot_inodes_count_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)atomic64_read(&root->inodes_count));
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ static ssize_t
|
||||
nilfs_snapshot_blocks_count_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)atomic64_read(&root->blocks_count));
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ static ssize_t
|
||||
nilfs_snapshot_README_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, snapshot_readme_str);
|
||||
return sysfs_emit(buf, snapshot_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SNAPSHOT_RO_ATTR(inodes_count);
|
||||
@ -230,7 +230,7 @@ static ssize_t
|
||||
nilfs_mounted_snapshots_README_show(struct nilfs_mounted_snapshots_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, mounted_snapshots_readme_str);
|
||||
return sysfs_emit(buf, mounted_snapshots_readme_str);
|
||||
}
|
||||
|
||||
NILFS_MOUNTED_SNAPSHOTS_RO_ATTR(README);
|
||||
@ -268,7 +268,7 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
|
||||
|
||||
ncheckpoints = cpstat.cs_ncps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", ncheckpoints);
|
||||
return sysfs_emit(buf, "%llu\n", ncheckpoints);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -291,7 +291,7 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
|
||||
|
||||
nsnapshots = cpstat.cs_nsss;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nsnapshots);
|
||||
return sysfs_emit(buf, "%llu\n", nsnapshots);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -305,7 +305,7 @@ nilfs_checkpoints_last_seg_checkpoint_show(struct nilfs_checkpoints_attr *attr,
|
||||
last_cno = nilfs->ns_last_cno;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
|
||||
return sysfs_emit(buf, "%llu\n", last_cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -319,7 +319,7 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr,
|
||||
cno = nilfs->ns_cno;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
|
||||
return sysfs_emit(buf, "%llu\n", cno);
|
||||
}
|
||||
|
||||
static const char checkpoints_readme_str[] =
|
||||
@ -335,7 +335,7 @@ static ssize_t
|
||||
nilfs_checkpoints_README_show(struct nilfs_checkpoints_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, checkpoints_readme_str);
|
||||
return sysfs_emit(buf, checkpoints_readme_str);
|
||||
}
|
||||
|
||||
NILFS_CHECKPOINTS_RO_ATTR(checkpoints_number);
|
||||
@ -366,7 +366,7 @@ nilfs_segments_segments_number_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_nsegments);
|
||||
return sysfs_emit(buf, "%lu\n", nilfs->ns_nsegments);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -374,7 +374,7 @@ nilfs_segments_blocks_per_segment_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_blocks_per_segment);
|
||||
return sysfs_emit(buf, "%lu\n", nilfs->ns_blocks_per_segment);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -388,7 +388,7 @@ nilfs_segments_clean_segments_show(struct nilfs_segments_attr *attr,
|
||||
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
|
||||
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", ncleansegs);
|
||||
return sysfs_emit(buf, "%lu\n", ncleansegs);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -408,7 +408,7 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sustat.ss_ndirtysegs);
|
||||
return sysfs_emit(buf, "%llu\n", sustat.ss_ndirtysegs);
|
||||
}
|
||||
|
||||
static const char segments_readme_str[] =
|
||||
@ -424,7 +424,7 @@ nilfs_segments_README_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, segments_readme_str);
|
||||
return sysfs_emit(buf, segments_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SEGMENTS_RO_ATTR(segments_number);
|
||||
@ -461,7 +461,7 @@ nilfs_segctor_last_pseg_block_show(struct nilfs_segctor_attr *attr,
|
||||
last_pseg = nilfs->ns_last_pseg;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)last_pseg);
|
||||
}
|
||||
|
||||
@ -476,7 +476,7 @@ nilfs_segctor_last_seg_sequence_show(struct nilfs_segctor_attr *attr,
|
||||
last_seq = nilfs->ns_last_seq;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_seq);
|
||||
return sysfs_emit(buf, "%llu\n", last_seq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -490,7 +490,7 @@ nilfs_segctor_last_seg_checkpoint_show(struct nilfs_segctor_attr *attr,
|
||||
last_cno = nilfs->ns_last_cno;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
|
||||
return sysfs_emit(buf, "%llu\n", last_cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -504,7 +504,7 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr,
|
||||
seg_seq = nilfs->ns_seg_seq;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq);
|
||||
return sysfs_emit(buf, "%llu\n", seg_seq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -518,7 +518,7 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr,
|
||||
segnum = nilfs->ns_segnum;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", segnum);
|
||||
return sysfs_emit(buf, "%llu\n", segnum);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -532,7 +532,7 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr,
|
||||
nextnum = nilfs->ns_nextnum;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum);
|
||||
return sysfs_emit(buf, "%llu\n", nextnum);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -546,7 +546,7 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr,
|
||||
pseg_offset = nilfs->ns_pseg_offset;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset);
|
||||
return sysfs_emit(buf, "%lu\n", pseg_offset);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -560,7 +560,7 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr,
|
||||
cno = nilfs->ns_cno;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
|
||||
return sysfs_emit(buf, "%llu\n", cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -588,7 +588,7 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
ctime = nilfs->ns_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", ctime);
|
||||
return sysfs_emit(buf, "%llu\n", ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -616,7 +616,7 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
nongc_ctime = nilfs->ns_nongc_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime);
|
||||
return sysfs_emit(buf, "%llu\n", nongc_ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -630,7 +630,7 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr,
|
||||
ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks);
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks);
|
||||
return sysfs_emit(buf, "%u\n", ndirtyblks);
|
||||
}
|
||||
|
||||
static const char segctor_readme_str[] =
|
||||
@ -667,7 +667,7 @@ static ssize_t
|
||||
nilfs_segctor_README_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, segctor_readme_str);
|
||||
return sysfs_emit(buf, segctor_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SEGCTOR_RO_ATTR(last_pseg_block);
|
||||
@ -736,7 +736,7 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr,
|
||||
sbwtime = nilfs->ns_sbwtime;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime);
|
||||
return sysfs_emit(buf, "%llu\n", sbwtime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -750,7 +750,7 @@ nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr,
|
||||
sbwcount = nilfs->ns_sbwcount;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", sbwcount);
|
||||
return sysfs_emit(buf, "%u\n", sbwcount);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -764,7 +764,7 @@ nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr,
|
||||
sb_update_freq = nilfs->ns_sb_update_freq;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", sb_update_freq);
|
||||
return sysfs_emit(buf, "%u\n", sb_update_freq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -812,7 +812,7 @@ static ssize_t
|
||||
nilfs_superblock_README_show(struct nilfs_superblock_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, sb_readme_str);
|
||||
return sysfs_emit(buf, sb_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SUPERBLOCK_RO_ATTR(sb_write_time);
|
||||
@ -843,11 +843,17 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
u32 major = le32_to_cpu(sbp[0]->s_rev_level);
|
||||
u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
|
||||
struct nilfs_super_block *raw_sb;
|
||||
u32 major;
|
||||
u16 minor;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%d\n", major, minor);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
major = le32_to_cpu(raw_sb->s_rev_level);
|
||||
minor = le16_to_cpu(raw_sb->s_minor_rev_level);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return sysfs_emit(buf, "%d.%d\n", major, minor);
|
||||
}
|
||||
|
||||
static
|
||||
@ -855,7 +861,7 @@ ssize_t nilfs_dev_blocksize_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", nilfs->ns_blocksize);
|
||||
return sysfs_emit(buf, "%u\n", nilfs->ns_blocksize);
|
||||
}
|
||||
|
||||
static
|
||||
@ -863,10 +869,15 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
|
||||
struct nilfs_super_block *raw_sb;
|
||||
u64 dev_size;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", dev_size);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
dev_size = le64_to_cpu(raw_sb->s_dev_size);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return sysfs_emit(buf, "%llu\n", dev_size);
|
||||
}
|
||||
|
||||
static
|
||||
@ -877,7 +888,7 @@ ssize_t nilfs_dev_free_blocks_show(struct nilfs_dev_attr *attr,
|
||||
sector_t free_blocks = 0;
|
||||
|
||||
nilfs_count_free_blocks(nilfs, &free_blocks);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)free_blocks);
|
||||
}
|
||||
|
||||
@ -886,9 +897,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct nilfs_super_block *raw_sb;
|
||||
ssize_t len;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pUb\n", sbp[0]->s_uuid);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static
|
||||
@ -896,10 +913,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct nilfs_super_block *raw_sb;
|
||||
ssize_t len;
|
||||
|
||||
return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
|
||||
sbp[0]->s_volume_name);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
|
||||
raw_sb->s_volume_name);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static const char dev_readme_str[] =
|
||||
@ -916,7 +939,7 @@ static ssize_t nilfs_dev_README_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, dev_readme_str);
|
||||
return sysfs_emit(buf, dev_readme_str);
|
||||
}
|
||||
|
||||
NILFS_DEV_RO_ATTR(revision);
|
||||
@ -1060,7 +1083,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
|
||||
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%d\n",
|
||||
return sysfs_emit(buf, "%d.%d\n",
|
||||
NILFS_CURRENT_REV, NILFS_MINOR_REV);
|
||||
}
|
||||
|
||||
@ -1073,7 +1096,7 @@ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, features_readme_str);
|
||||
return sysfs_emit(buf, features_readme_str);
|
||||
}
|
||||
|
||||
NILFS_FEATURE_RO_ATTR(revision);
|
||||
|
@ -276,8 +276,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
|
||||
if (err < 0)
|
||||
goto failed_read;
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
|
||||
if (inode->i_size > PAGE_SIZE) {
|
||||
ERROR("Corrupted symlink\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_op = &squashfs_symlink_inode_ops;
|
||||
inode_nohighmem(inode);
|
||||
inode->i_data.a_ops = &squashfs_symlink_aops;
|
||||
|
@ -1083,12 +1083,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
|
||||
struct udf_part_map *map;
|
||||
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||
struct partitionHeaderDesc *phd;
|
||||
u32 sum;
|
||||
int err;
|
||||
|
||||
map = &sbi->s_partmaps[p_index];
|
||||
|
||||
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
|
||||
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
|
||||
if (check_add_overflow(map->s_partition_root, map->s_partition_len,
|
||||
&sum)) {
|
||||
udf_err(sb, "Partition %d has invalid location %u + %u\n",
|
||||
p_index, map->s_partition_root, map->s_partition_len);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
|
||||
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
|
||||
@ -1144,6 +1151,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
|
||||
bitmap->s_extPosition = le32_to_cpu(
|
||||
phd->unallocSpaceBitmap.extPosition);
|
||||
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
|
||||
/* Check whether math over bitmap won't overflow. */
|
||||
if (check_add_overflow(map->s_partition_len,
|
||||
sizeof(struct spaceBitmapDesc) << 3,
|
||||
&sum)) {
|
||||
udf_err(sb, "Partition %d is too long (%u)\n", p_index,
|
||||
map->s_partition_len);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
|
||||
p_index, bitmap->s_extPosition);
|
||||
}
|
||||
|
@ -1797,9 +1797,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
||||
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
|
||||
rcu_assign_pointer(dcgrp->subsys[ssid], css);
|
||||
ss->root = dst_root;
|
||||
css->cgroup = dcgrp;
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
css->cgroup = dcgrp;
|
||||
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
|
||||
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
|
||||
e_cset_node[ss->id]) {
|
||||
|
@ -1336,8 +1336,9 @@ static void put_ctx(struct perf_event_context *ctx)
|
||||
* perf_event_context::mutex
|
||||
* perf_event::child_mutex;
|
||||
* perf_event_context::lock
|
||||
* perf_event::mmap_mutex
|
||||
* mmap_lock
|
||||
* perf_event::mmap_mutex
|
||||
* perf_buffer::aux_mutex
|
||||
* perf_addr_filters_head::lock
|
||||
*
|
||||
* cpu_hotplug_lock
|
||||
@ -6026,12 +6027,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
event->pmu->event_unmapped(event, vma->vm_mm);
|
||||
|
||||
/*
|
||||
* rb->aux_mmap_count will always drop before rb->mmap_count and
|
||||
* event->mmap_count, so it is ok to use event->mmap_mutex to
|
||||
* serialize with perf_mmap here.
|
||||
* The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
|
||||
* to avoid complications.
|
||||
*/
|
||||
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
|
||||
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
|
||||
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
|
||||
/*
|
||||
* Stop all AUX events that are writing to this buffer,
|
||||
* so that we can free its AUX pages and corresponding PMU
|
||||
@ -6048,7 +6048,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
rb_free_aux(rb);
|
||||
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
|
||||
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
mutex_unlock(&rb->aux_mutex);
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&rb->mmap_count))
|
||||
@ -6136,6 +6136,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
struct perf_event *event = file->private_data;
|
||||
unsigned long user_locked, user_lock_limit;
|
||||
struct user_struct *user = current_user();
|
||||
struct mutex *aux_mutex = NULL;
|
||||
struct perf_buffer *rb = NULL;
|
||||
unsigned long locked, lock_limit;
|
||||
unsigned long vma_size;
|
||||
@ -6184,6 +6185,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!rb)
|
||||
goto aux_unlock;
|
||||
|
||||
aux_mutex = &rb->aux_mutex;
|
||||
mutex_lock(aux_mutex);
|
||||
|
||||
aux_offset = READ_ONCE(rb->user_page->aux_offset);
|
||||
aux_size = READ_ONCE(rb->user_page->aux_size);
|
||||
|
||||
@ -6335,6 +6339,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
atomic_dec(&rb->mmap_count);
|
||||
}
|
||||
aux_unlock:
|
||||
if (aux_mutex)
|
||||
mutex_unlock(aux_mutex);
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
|
||||
/*
|
||||
|
@ -40,6 +40,7 @@ struct perf_buffer {
|
||||
struct user_struct *mmap_user;
|
||||
|
||||
/* AUX area */
|
||||
struct mutex aux_mutex;
|
||||
long aux_head;
|
||||
unsigned int aux_nest;
|
||||
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
|
||||
|
@ -329,6 +329,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
|
||||
*/
|
||||
if (!rb->nr_pages)
|
||||
rb->paused = 1;
|
||||
|
||||
mutex_init(&rb->aux_mutex);
|
||||
}
|
||||
|
||||
void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
|
||||
|
@ -1485,7 +1485,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
|
||||
struct xol_area *area;
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
area = kzalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (unlikely(!area))
|
||||
goto out;
|
||||
|
||||
@ -1495,7 +1495,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
goto free_area;
|
||||
|
||||
area->xol_mapping.name = "[uprobes]";
|
||||
area->xol_mapping.fault = NULL;
|
||||
area->xol_mapping.pages = area->pages;
|
||||
area->pages[0] = alloc_page(GFP_HIGHUSER);
|
||||
if (!area->pages[0])
|
||||
|
@ -1205,6 +1205,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
}
|
||||
|
||||
static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
struct rt_mutex *lock,
|
||||
struct rt_mutex_waiter *w)
|
||||
{
|
||||
/*
|
||||
@ -1214,6 +1215,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
if (res != -EDEADLOCK || detect_deadlock)
|
||||
return;
|
||||
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
/*
|
||||
* Yell lowdly and stop the task right here.
|
||||
*/
|
||||
@ -1269,7 +1271,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
if (unlikely(ret)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_waiter(lock, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1034,6 +1034,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
||||
|
||||
queue_work_on(cpu, system_wq, &sscs.work);
|
||||
wait_for_completion(&sscs.done);
|
||||
destroy_work_on_stack(&sscs.work);
|
||||
|
||||
return sscs.ret;
|
||||
}
|
||||
|
@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
|
||||
if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
|
||||
v = new_root;
|
||||
new_node = NULL;
|
||||
} else {
|
||||
new_node->children[0] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5196,12 +5196,29 @@ static struct cftype mem_cgroup_legacy_files[] = {
|
||||
*/
|
||||
|
||||
static DEFINE_IDR(mem_cgroup_idr);
|
||||
static DEFINE_SPINLOCK(memcg_idr_lock);
|
||||
|
||||
static int mem_cgroup_alloc_id(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&memcg_idr_lock);
|
||||
ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
|
||||
GFP_NOWAIT);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
idr_preload_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg->id.id > 0) {
|
||||
trace_android_vh_mem_cgroup_id_remove(memcg);
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
|
||||
memcg->id.id = 0;
|
||||
}
|
||||
}
|
||||
@ -5333,9 +5350,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
if (!memcg)
|
||||
return ERR_PTR(error);
|
||||
|
||||
memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
|
||||
1, MEM_CGROUP_ID_MAX,
|
||||
GFP_KERNEL);
|
||||
memcg->id.id = mem_cgroup_alloc_id();
|
||||
if (memcg->id.id < 0) {
|
||||
error = memcg->id.id;
|
||||
goto fail;
|
||||
@ -5381,7 +5396,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
|
||||
memcg->deferred_split_queue.split_queue_len = 0;
|
||||
#endif
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
trace_android_vh_mem_cgroup_alloc(memcg);
|
||||
return memcg;
|
||||
fail:
|
||||
|
@ -482,10 +482,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
|
||||
|
||||
type = vhdr->h_vlan_encapsulated_proto;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
flush = 0;
|
||||
|
||||
@ -504,8 +503,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
|
||||
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -519,12 +516,10 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
struct packet_offload *ptype;
|
||||
int err = -ENOENT;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
|
||||
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1238,12 +1238,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
|
||||
if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
|
||||
/* Refresh entry */
|
||||
fdb->used = jiffies;
|
||||
} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
|
||||
/* Take over SW learned entry */
|
||||
set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
|
||||
} else {
|
||||
modified = true;
|
||||
}
|
||||
|
||||
|
@ -1423,6 +1423,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
|
||||
|
||||
/* remove device reference, if this is our bound device */
|
||||
if (bo->bound && bo->ifindex == dev->ifindex) {
|
||||
#if IS_ENABLED(CONFIG_PROC_FS)
|
||||
if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
|
||||
remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
|
||||
#endif
|
||||
bo->bound = 0;
|
||||
bo->ifindex = 0;
|
||||
notify_enodev = 1;
|
||||
|
@ -430,19 +430,16 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
type = eh->h_proto;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (ptype == NULL) {
|
||||
flush = 1;
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_gro_pull(skb, sizeof(*eh));
|
||||
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -460,13 +457,11 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
if (skb->encapsulation)
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype != NULL)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff +
|
||||
sizeof(struct ethhdr));
|
||||
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(eth_gro_complete);
|
||||
|
@ -1473,19 +1473,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
proto = iph->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(inet_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (*(u8 *)iph != 0x45)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (ip_is_fragment(iph))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
id = ntohl(*(__be32 *)&iph->id);
|
||||
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
|
||||
@ -1562,9 +1561,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
|
||||
ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -1640,10 +1636,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
csum_replace2(&iph->check, iph->tot_len, newlen);
|
||||
iph->tot_len = newlen;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(inet_offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
/* Only need to add sizeof(*iph) to get to the next hdr below
|
||||
* because any hdr with option will have been flushed in
|
||||
@ -1653,9 +1648,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
tcp4_gro_complete, udp4_gro_complete,
|
||||
skb, nhoff + sizeof(*iph));
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_gro_complete);
|
||||
|
@ -48,7 +48,7 @@ struct fou_net {
|
||||
|
||||
static inline struct fou *fou_from_sock(struct sock *sk)
|
||||
{
|
||||
return sk->sk_user_data;
|
||||
return rcu_dereference_sk_user_data(sk);
|
||||
}
|
||||
|
||||
static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
|
||||
@ -230,10 +230,16 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
|
||||
struct list_head *head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 proto = fou_from_sock(sk)->protocol;
|
||||
const struct net_offload **offloads;
|
||||
const struct net_offload __rcu **offloads;
|
||||
struct fou *fou = fou_from_sock(sk);
|
||||
const struct net_offload *ops;
|
||||
struct sk_buff *pp = NULL;
|
||||
u8 proto;
|
||||
|
||||
if (!fou)
|
||||
goto out;
|
||||
|
||||
proto = fou->protocol;
|
||||
|
||||
/* We can clear the encap_mark for FOU as we are essentially doing
|
||||
* one of two possible things. We are either adding an L4 tunnel
|
||||
@ -246,41 +252,45 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return pp;
|
||||
}
|
||||
|
||||
static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
int nhoff)
|
||||
{
|
||||
const struct net_offload __rcu **offloads;
|
||||
struct fou *fou = fou_from_sock(sk);
|
||||
const struct net_offload *ops;
|
||||
u8 proto = fou_from_sock(sk)->protocol;
|
||||
int err = -ENOSYS;
|
||||
const struct net_offload **offloads;
|
||||
u8 proto;
|
||||
int err;
|
||||
|
||||
if (!fou) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
proto = fou->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
|
||||
err = -ENOSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -311,7 +321,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
|
||||
struct list_head *head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const struct net_offload **offloads;
|
||||
const struct net_offload __rcu **offloads;
|
||||
const struct net_offload *ops;
|
||||
struct sk_buff *pp = NULL;
|
||||
struct sk_buff *p;
|
||||
@ -324,6 +334,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
|
||||
struct gro_remcsum grc;
|
||||
u8 proto;
|
||||
|
||||
if (!fou)
|
||||
goto out;
|
||||
|
||||
skb_gro_remcsum_init(&grc);
|
||||
|
||||
off = skb_gro_offset(skb);
|
||||
@ -438,17 +451,14 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
||||
|
||||
@ -457,8 +467,8 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
|
||||
|
||||
static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||
{
|
||||
const struct net_offload **offloads;
|
||||
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
|
||||
const struct net_offload __rcu **offloads;
|
||||
const struct net_offload *ops;
|
||||
unsigned int guehlen = 0;
|
||||
u8 proto;
|
||||
@ -485,18 +495,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||
return err;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + guehlen);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -158,10 +158,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
|
||||
type = greh->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
grehlen = GRE_HEADER_SECTION;
|
||||
|
||||
@ -175,13 +174,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
if (skb_gro_header_hard(skb, hlen)) {
|
||||
greh = skb_gro_header_slow(skb, hlen, off);
|
||||
if (unlikely(!greh))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Don't bother verifying checksum if we're going to flush anyway. */
|
||||
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
|
||||
if (skb_gro_checksum_simple_validate(skb))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
|
||||
null_compute_pseudo);
|
||||
@ -225,8 +224,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -251,13 +248,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
if (greh->flags & GRE_CSUM)
|
||||
grehlen += GRE_HEADER_SECTION;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + grehlen);
|
||||
|
||||
return err;
|
||||
|
@ -511,7 +511,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
err = sk_stream_error(sk, msg->msg_flags, err);
|
||||
release_sock(sk);
|
||||
sk_psock_put(sk, psock);
|
||||
return copied ? copied : err;
|
||||
return copied > 0 ? copied : err;
|
||||
}
|
||||
|
||||
static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
|
@ -149,8 +149,8 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
||||
netdev_features_t features,
|
||||
bool is_ipv6)
|
||||
{
|
||||
const struct net_offload __rcu **offloads;
|
||||
__be16 protocol = skb->protocol;
|
||||
const struct net_offload **offloads;
|
||||
const struct net_offload *ops;
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
|
||||
@ -606,13 +606,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
inet_gro_compute_pseudo);
|
||||
skip:
|
||||
NAPI_GRO_CB(skb)->is_ipv6 = 0;
|
||||
rcu_read_lock();
|
||||
|
||||
if (static_branch_unlikely(&udp_encap_needed_key))
|
||||
sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
|
||||
|
||||
pp = udp_gro_receive(head, skb, uh, sk);
|
||||
rcu_read_unlock();
|
||||
return pp;
|
||||
|
||||
flush:
|
||||
@ -647,7 +645,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
|
||||
uh->len = newlen;
|
||||
|
||||
rcu_read_lock();
|
||||
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
|
||||
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
|
||||
if (sk && udp_sk(sk)->gro_complete) {
|
||||
@ -663,7 +660,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
} else {
|
||||
err = udp_gro_complete_segment(skb);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (skb->remcsum_offload)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
|
||||
|
@ -209,7 +209,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
|
||||
|
||||
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
proto = iph->nexthdr;
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive) {
|
||||
@ -222,7 +221,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
|
||||
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
iph = ipv6_hdr(skb);
|
||||
}
|
||||
@ -280,9 +279,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
|
||||
pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
|
||||
ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
@ -332,18 +328,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
|
||||
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
|
||||
udp6_gro_complete, skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -144,13 +144,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
skip:
|
||||
NAPI_GRO_CB(skb)->is_ipv6 = 1;
|
||||
rcu_read_lock();
|
||||
|
||||
if (static_branch_unlikely(&udpv6_encap_needed_key))
|
||||
sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
|
||||
|
||||
pp = udp_gro_receive(head, skb, uh, sk);
|
||||
rcu_read_unlock();
|
||||
return pp;
|
||||
|
||||
flush:
|
||||
|
@ -310,7 +310,6 @@ insert_tree(struct net *net,
|
||||
struct nf_conncount_rb *rbconn;
|
||||
struct nf_conncount_tuple *conn;
|
||||
unsigned int count = 0, gc_count = 0;
|
||||
u8 keylen = data->keylen;
|
||||
bool do_gc = true;
|
||||
|
||||
spin_lock_bh(&nf_conncount_locks[hash]);
|
||||
@ -322,7 +321,7 @@ insert_tree(struct net *net,
|
||||
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
|
||||
|
||||
parent = *rbnode;
|
||||
diff = key_diff(key, rbconn->key, keylen);
|
||||
diff = key_diff(key, rbconn->key, data->keylen);
|
||||
if (diff < 0) {
|
||||
rbnode = &((*rbnode)->rb_left);
|
||||
} else if (diff > 0) {
|
||||
@ -367,7 +366,7 @@ insert_tree(struct net *net,
|
||||
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
memcpy(rbconn->key, key, sizeof(u32) * keylen);
|
||||
memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
|
||||
|
||||
nf_conncount_list_init(&rbconn->list);
|
||||
list_add(&conn->node, &rbconn->list.head);
|
||||
@ -392,7 +391,6 @@ count_tree(struct net *net,
|
||||
struct rb_node *parent;
|
||||
struct nf_conncount_rb *rbconn;
|
||||
unsigned int hash;
|
||||
u8 keylen = data->keylen;
|
||||
|
||||
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
|
||||
root = &data->root[hash];
|
||||
@ -403,7 +401,7 @@ count_tree(struct net *net,
|
||||
|
||||
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
|
||||
|
||||
diff = key_diff(key, rbconn->key, keylen);
|
||||
diff = key_diff(key, rbconn->key, data->keylen);
|
||||
if (diff < 0) {
|
||||
parent = rcu_dereference_raw(parent->rb_left);
|
||||
} else if (diff > 0) {
|
||||
|
@ -460,6 +460,8 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
|
||||
offset += info->wi_seg_off;
|
||||
|
||||
write_len = min(remaining, length - info->wi_seg_off);
|
||||
if (!write_len)
|
||||
goto out_overflow;
|
||||
ctxt = svc_rdma_get_rw_ctxt(rdma,
|
||||
(write_len >> PAGE_SHIFT) + 2);
|
||||
if (!ctxt)
|
||||
|
@ -2314,6 +2314,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
|
||||
case -EALREADY:
|
||||
xprt_unlock_connect(xprt, transport);
|
||||
return;
|
||||
case -EPERM:
|
||||
/* Happens, for instance, if a BPF program is preventing
|
||||
* the connect. Remap the error so upper layers can better
|
||||
* deal with it.
|
||||
*/
|
||||
status = -ECONNREFUSED;
|
||||
fallthrough;
|
||||
case -EINVAL:
|
||||
/* Happens, for instance, if the user specified a link
|
||||
* local IPv6 address without a scope-id.
|
||||
|
@ -603,9 +603,6 @@ static void init_peercred(struct sock *sk)
|
||||
|
||||
static void copy_peercred(struct sock *sk, struct sock *peersk)
|
||||
{
|
||||
const struct cred *old_cred;
|
||||
struct pid *old_pid;
|
||||
|
||||
if (sk < peersk) {
|
||||
spin_lock(&sk->sk_peer_lock);
|
||||
spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
|
||||
@ -613,16 +610,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
|
||||
spin_lock(&peersk->sk_peer_lock);
|
||||
spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
old_pid = sk->sk_peer_pid;
|
||||
old_cred = sk->sk_peer_cred;
|
||||
|
||||
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
|
||||
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
|
||||
|
||||
spin_unlock(&sk->sk_peer_lock);
|
||||
spin_unlock(&peersk->sk_peer_lock);
|
||||
|
||||
put_pid(old_pid);
|
||||
put_cred(old_cred);
|
||||
}
|
||||
|
||||
static int unix_listen(struct socket *sock, int backlog)
|
||||
|
@ -3643,12 +3643,18 @@ static int smack_unix_stream_connect(struct sock *sock,
|
||||
}
|
||||
}
|
||||
|
||||
if (rc == 0) {
|
||||
/*
|
||||
* Cross reference the peer labels for SO_PEERSEC.
|
||||
*/
|
||||
if (rc == 0) {
|
||||
nsp->smk_packet = ssp->smk_out;
|
||||
ssp->smk_packet = osp->smk_out;
|
||||
|
||||
/*
|
||||
* new/child/established socket must inherit listening socket labels
|
||||
*/
|
||||
nsp->smk_out = osp->smk_out;
|
||||
nsp->smk_in = osp->smk_in;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* a simple sanity check for input values to chmap kcontrol */
|
||||
static int chmap_value_check(struct hdac_chmap *hchmap,
|
||||
const struct snd_ctl_elem_value *ucontrol)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hchmap->channels_max; i++) {
|
||||
if (ucontrol->value.integer.value[i] < 0 ||
|
||||
ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
|
||||
struct snd_ctl_elem_value *ucontrol)
|
||||
{
|
||||
@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
|
||||
unsigned char chmap[8], per_pin_chmap[8];
|
||||
int i, err, ca, prepared = 0;
|
||||
|
||||
err = chmap_value_check(hchmap, ucontrol);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* No monitor is connected in dyn_pcm_assign.
|
||||
* It's invalid to setup the chmap
|
||||
*/
|
||||
|
@ -998,6 +998,8 @@ static int soc_tplg_denum_create_values(struct soc_enum *se,
|
||||
se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
|
||||
}
|
||||
|
||||
se->items = le32_to_cpu(ec->items);
|
||||
se->values = (const unsigned int *)se->dobj.control.dvalues;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -9005,7 +9005,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
|
||||
struct bpf_map *
|
||||
bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
|
||||
{
|
||||
if (prev == NULL)
|
||||
if (prev == NULL && obj != NULL)
|
||||
return obj->maps;
|
||||
|
||||
return __bpf_map__iter(prev, obj, 1);
|
||||
@ -9014,7 +9014,7 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
|
||||
struct bpf_map *
|
||||
bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
|
||||
{
|
||||
if (next == NULL) {
|
||||
if (next == NULL && obj != NULL) {
|
||||
if (!obj->nr_maps)
|
||||
return NULL;
|
||||
return obj->maps + obj->nr_maps - 1;
|
||||
|
@ -29,9 +29,11 @@ static int check_vgem(int fd)
|
||||
version.name = name;
|
||||
|
||||
ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
|
||||
if (ret)
|
||||
if (ret || version.name_len != 4)
|
||||
return 0;
|
||||
|
||||
name[4] = '\0';
|
||||
|
||||
return !strcmp(name, "vgem");
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user