Linux 5.4-rc8
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl3RzgkeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGN18H/0JZbfIpy8/4Irol 0va7Aj2fBi1a5oxfqYsMKN0u3GKbN3OV9tQ+7w1eBNGvL72TGadgVTzTY+Im7A9U UjboAc7jDPCG+YhIwXFufMiIAq5jDIj6h0LDas7ALsMfsnI/RhTwgNtLTAkyI3dH YV/6ljFULwueJHCxzmrYbd1x39PScj3kCNL2pOe6On7rXMKOemY/nbbYYISxY30E GMgKApSS+li7VuSqgrKoq5Qaox26LyR2wrXB1ij4pqEJ9xgbnKRLdHuvXZnE+/5p 46EMirt+yeSkltW3d2/9MoCHaA76ESzWMMDijLx7tPgoTc3RB3/3ZLsm3rYVH+cR cRlNNSk= =0+Cg -----END PGP SIGNATURE----- Merge 5.4-rc8 into android-mainline Linux 5.4-rc8 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I1f55e5d34dc78ddb064910ce1e1b7a7b5b39aaba
This commit is contained in:
commit
ad5859c6ae
@ -3261,7 +3261,6 @@ S: Maintained
|
||||
F: drivers/cpufreq/bmips-cpufreq.c
|
||||
|
||||
BROADCOM BMIPS MIPS ARCHITECTURE
|
||||
M: Kevin Cernekee <cernekee@gmail.com>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
L: linux-mips@vger.kernel.org
|
||||
@ -8299,11 +8298,14 @@ F: drivers/hid/intel-ish-hid/
|
||||
|
||||
INTEL IOMMU (VT-d)
|
||||
M: David Woodhouse <dwmw2@infradead.org>
|
||||
M: Lu Baolu <baolu.lu@linux.intel.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
T: git git://git.infradead.org/iommu-2.6.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
S: Supported
|
||||
F: drivers/iommu/intel-iommu.c
|
||||
F: drivers/iommu/dmar.c
|
||||
F: drivers/iommu/intel*.[ch]
|
||||
F: include/linux/intel-iommu.h
|
||||
F: include/linux/intel-svm.h
|
||||
|
||||
INTEL IOP-ADMA DMA DRIVER
|
||||
R: Dan Williams <dan.j.williams@intel.com>
|
||||
|
5
Makefile
5
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -921,6 +921,9 @@ ifeq ($(CONFIG_RELR),y)
|
||||
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
|
||||
endif
|
||||
|
||||
# make the checker run with the right architecture
|
||||
CHECKFLAGS += --arch=$(ARCH)
|
||||
|
||||
# insure the checker run with the right endianness
|
||||
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
|
||||
|
||||
|
@ -38,10 +38,3 @@ config REPLICATE_KTEXT
|
||||
Say Y here to enable replicating the kernel text across multiple
|
||||
nodes in a NUMA cluster. This trades memory for speed.
|
||||
|
||||
config REPLICATE_EXHANDLERS
|
||||
bool "Exception handler replication support"
|
||||
depends on SGI_IP27
|
||||
help
|
||||
Say Y here to enable replicating the kernel exception handlers
|
||||
across multiple nodes in a NUMA cluster. This trades memory for
|
||||
speed.
|
||||
|
@ -69,23 +69,14 @@ static void per_hub_init(cnodeid_t cnode)
|
||||
|
||||
hub_rtc_init(cnode);
|
||||
|
||||
#ifdef CONFIG_REPLICATE_EXHANDLERS
|
||||
/*
|
||||
* If this is not a headless node initialization,
|
||||
* copy over the caliased exception handlers.
|
||||
*/
|
||||
if (get_compact_nodeid() == cnode) {
|
||||
extern char except_vec2_generic, except_vec3_generic;
|
||||
extern void build_tlb_refill_handler(void);
|
||||
|
||||
memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
|
||||
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
|
||||
build_tlb_refill_handler();
|
||||
memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
|
||||
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
|
||||
if (nasid) {
|
||||
/* copy exception handlers from first node to current node */
|
||||
memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
|
||||
(void *)CKSEG0, 0x200);
|
||||
__flush_cache_all();
|
||||
/* switch to node local exception handlers */
|
||||
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void per_cpu_init(void)
|
||||
|
@ -332,11 +332,7 @@ static void __init mlreset(void)
|
||||
* thinks it is a node 0 address.
|
||||
*/
|
||||
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
|
||||
#ifdef CONFIG_REPLICATE_EXHANDLERS
|
||||
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
|
||||
#else
|
||||
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
|
||||
#endif
|
||||
|
||||
#ifdef LATER
|
||||
/*
|
||||
|
@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
|
||||
#
|
||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||
#
|
||||
CFLAGS_REMOVE_vdso-note.o = -pg
|
||||
CFLAGS_REMOVE_vclock_gettime.o = -pg
|
||||
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
|
||||
|
||||
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
|
||||
CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
|
||||
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
|
||||
|
||||
#This makes sure the $(obj) subdirectory exists even though vdso32/
|
||||
|
@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
||||
}
|
||||
|
||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||
rdt_last_cmd_clear();
|
||||
if (!rdtgrp) {
|
||||
ret = -ENOENT;
|
||||
rdt_last_cmd_puts("Directory was removed\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
int ret;
|
||||
|
||||
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
|
||||
rdt_last_cmd_clear();
|
||||
if (!prdtgrp) {
|
||||
ret = -ENODEV;
|
||||
rdt_last_cmd_puts("Directory was removed\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
|
||||
*/
|
||||
{ PCI_VENDOR_ID_INTEL, 0x0f00,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
|
||||
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
|
||||
{}
|
||||
|
@ -51,7 +51,12 @@
|
||||
extern bool itlb_multihit_kvm_mitigation;
|
||||
|
||||
static int __read_mostly nx_huge_pages = -1;
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
|
||||
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
|
||||
#else
|
||||
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
|
||||
#endif
|
||||
|
||||
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
|
||||
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
|
||||
@ -6280,14 +6285,13 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
||||
|
||||
if (new_val != old_val) {
|
||||
struct kvm *kvm;
|
||||
int idx;
|
||||
|
||||
mutex_lock(&kvm_lock);
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
kvm_mmu_zap_all_fast(kvm);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
wake_up_process(kvm->arch.nx_lpage_recovery_thread);
|
||||
}
|
||||
|
@ -5130,6 +5130,10 @@ static void kvm_init_msr_list(void)
|
||||
|
||||
perf_get_x86_pmu_capability(&x86_pmu);
|
||||
|
||||
num_msrs_to_save = 0;
|
||||
num_emulated_msrs = 0;
|
||||
num_msr_based_features = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
|
||||
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
|
||||
continue;
|
||||
|
@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static
|
||||
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
{
|
||||
/*
|
||||
* To prevent bfqq's service guarantees from being violated,
|
||||
* bfqq may be left busy, i.e., queued for service, even if
|
||||
* empty (see comments in __bfq_bfqq_expire() for
|
||||
* details). But, if no process will send requests to bfqq any
|
||||
* longer, then there is no point in keeping bfqq queued for
|
||||
* service. In addition, keeping bfqq queued for service, but
|
||||
* with no process ref any longer, may have caused bfqq to be
|
||||
* freed when dequeued from service. But this is assumed to
|
||||
* never happen.
|
||||
*/
|
||||
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
|
||||
bfqq != bfqd->in_service_queue)
|
||||
bfq_del_bfqq_busy(bfqd, bfqq, false);
|
||||
|
||||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
static void
|
||||
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||
@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
*/
|
||||
new_bfqq->pid = -1;
|
||||
bfqq->bic = NULL;
|
||||
/* release process reference to bfqq */
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
}
|
||||
|
||||
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||
@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
bfq_put_queue(bfqq); /* release process reference */
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
}
|
||||
|
||||
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
||||
@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
||||
|
||||
bfqq = bic_to_bfqq(bic, false);
|
||||
if (bfqq) {
|
||||
/* release process reference on this queue */
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
|
||||
bic_set_bfqq(bic, bfqq, false);
|
||||
}
|
||||
@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqq->bfqd, bfqq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -753,7 +753,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return false;
|
||||
|
||||
if (bio->bi_vcnt > 0) {
|
||||
if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
||||
|
@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
atomic64_set(&iocg->active_period, cur_period);
|
||||
|
||||
/* already activated or breaking leaf-only constraint? */
|
||||
for (i = iocg->level; i > 0; i--)
|
||||
if (!list_empty(&iocg->active_list))
|
||||
if (!list_empty(&iocg->active_list))
|
||||
goto succeed_unlock;
|
||||
for (i = iocg->level - 1; i > 0; i--)
|
||||
if (!list_empty(&iocg->ancestors[i]->active_list))
|
||||
goto fail_unlock;
|
||||
|
||||
if (iocg->child_active_sum)
|
||||
goto fail_unlock;
|
||||
|
||||
@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
ioc_start_period(ioc, now);
|
||||
}
|
||||
|
||||
succeed_unlock:
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
return true;
|
||||
|
||||
|
@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct for_each_memory_block_cb_data {
|
||||
walk_memory_blocks_func_t func;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static int for_each_memory_block_cb(struct device *dev, void *data)
|
||||
{
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
struct for_each_memory_block_cb_data *cb_data = data;
|
||||
|
||||
return cb_data->func(mem, cb_data->arg);
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_memory_block - walk through all present memory blocks
|
||||
*
|
||||
* @arg: argument passed to func
|
||||
* @func: callback for each memory block walked
|
||||
*
|
||||
* This function walks through all present memory blocks, calling func on
|
||||
* each memory block.
|
||||
*
|
||||
* In case func() returns an error, walking is aborted and the error is
|
||||
* returned.
|
||||
*/
|
||||
int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
|
||||
{
|
||||
struct for_each_memory_block_cb_data cb_data = {
|
||||
.func = func,
|
||||
.arg = arg,
|
||||
};
|
||||
|
||||
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
|
||||
for_each_memory_block_cb);
|
||||
}
|
||||
|
@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
|
||||
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
|
||||
struct ceph_osd_data *osd_data;
|
||||
u64 objno;
|
||||
u8 state, new_state, current_state;
|
||||
u8 state, new_state, uninitialized_var(current_state);
|
||||
bool has_current_state;
|
||||
void *p;
|
||||
|
||||
|
@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
|
||||
|
||||
cancel_work_sync(&card->event_work);
|
||||
|
||||
destroy_workqueue(card->event_wq);
|
||||
rsxx_destroy_dev(card);
|
||||
rsxx_dma_destroy(card);
|
||||
destroy_workqueue(card->creg_ctrl.creg_wq);
|
||||
|
||||
spin_lock_irqsave(&card->irq_lock, flags);
|
||||
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -422,9 +421,7 @@ static int hwrng_fillfn(void *unused)
|
||||
{
|
||||
long rc;
|
||||
|
||||
set_freezable();
|
||||
|
||||
while (!kthread_freezable_should_stop(NULL)) {
|
||||
while (!kthread_should_stop()) {
|
||||
struct hwrng *rng;
|
||||
|
||||
rng = get_current_rng();
|
||||
|
@ -327,7 +327,6 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/irq.h>
|
||||
@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||
* or when the calling thread is about to terminate.
|
||||
*/
|
||||
wait_event_freezable(random_write_wait,
|
||||
kthread_should_stop() ||
|
||||
wait_event_interruptible(random_write_wait, kthread_should_stop() ||
|
||||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
|
||||
mix_pool_bytes(poolp, buffer, count);
|
||||
credit_entropy_bits(poolp, entropy);
|
||||
|
@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
||||
(const struct sdma_firmware_header_v1_0 *)
|
||||
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *ce_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *pfp_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *me_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *mec_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
||||
(const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||
const struct smc_firmware_header_v1_0 *smc_hdr =
|
||||
(const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
|
||||
struct common_firmware_header *hdr;
|
||||
|
||||
switch (ucode->ucode_id) {
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
|
||||
case AMDGPU_UCODE_ID_SDMA5:
|
||||
case AMDGPU_UCODE_ID_SDMA6:
|
||||
case AMDGPU_UCODE_ID_SDMA7:
|
||||
amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
|
||||
hdr = (struct common_firmware_header *)
|
||||
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
|
||||
amdgpu_ucode_print_rlc_hdr(hdr);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
|
||||
hdr = (struct common_firmware_header *)adev->pm.fw->data;
|
||||
amdgpu_ucode_print_smc_hdr(hdr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
|
||||
|
||||
power_domains->initializing = true;
|
||||
|
||||
/* Must happen before power domain init on VLV/CHV */
|
||||
intel_update_rawclk(i915);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
icl_display_core_init(i915, resume);
|
||||
} else if (IS_CANNONLAKE(i915)) {
|
||||
|
@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
||||
MOCS_ENTRY(15, \
|
||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
|
||||
L3_3_WB), \
|
||||
/* Bypass LLC - Uncached (EHL+) */ \
|
||||
MOCS_ENTRY(16, \
|
||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
|
||||
L3_1_UC), \
|
||||
/* Bypass LLC - L3 (Read-Only) (EHL+) */ \
|
||||
MOCS_ENTRY(17, \
|
||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
|
||||
L3_3_WB), \
|
||||
/* Self-Snoop - L3 + LLC */ \
|
||||
MOCS_ENTRY(18, \
|
||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
|
||||
@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
|
||||
L3_1_UC),
|
||||
/* HW Special Case (Displayable) */
|
||||
MOCS_ENTRY(61,
|
||||
LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
|
||||
LE_1_UC | LE_TC_1_LLC,
|
||||
L3_3_WB),
|
||||
};
|
||||
|
||||
|
@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
||||
goto out_free_gem;
|
||||
}
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
|
||||
@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
||||
file_count(dmabuf->file),
|
||||
kref_read(&obj->base.refcount));
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return dmabuf_fd;
|
||||
|
||||
out_free_dmabuf:
|
||||
|
@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_vga_client;
|
||||
|
||||
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
|
||||
intel_update_rawclk(dev_priv);
|
||||
|
||||
intel_power_domains_init_hw(dev_priv, false);
|
||||
|
||||
intel_csr_ucode_init(dev_priv);
|
||||
|
@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
|
||||
|
||||
WARN_ON(!tcon->quirks->has_channel_0);
|
||||
|
||||
tcon->dclk_min_div = 6;
|
||||
tcon->dclk_min_div = 1;
|
||||
tcon->dclk_max_div = 127;
|
||||
sun4i_tcon0_mode_set_common(tcon, mode);
|
||||
|
||||
|
@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
|
||||
int index;
|
||||
u32 speed;
|
||||
u32 min_speed;
|
||||
u32 force_speed;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
|
||||
return acpi_match_device(matches, &client->dev);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
|
||||
/*
|
||||
* These Silead touchscreen controllers only work at 400KHz, for
|
||||
* some reason they do not work at 100KHz. On some devices the ACPI
|
||||
* tables list another device at their bus as only being capable
|
||||
* of 100KHz, testing has shown that these other devices work fine
|
||||
* at 400KHz (as can be expected of any recent i2c hw) so we force
|
||||
* the speed of the bus to 400 KHz if a Silead device is present.
|
||||
*/
|
||||
{ "MSSL1680", 0 },
|
||||
{}
|
||||
};
|
||||
|
||||
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
|
||||
void *data, void **return_value)
|
||||
{
|
||||
@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
|
||||
if (lookup->speed <= lookup->min_speed)
|
||||
lookup->min_speed = lookup->speed;
|
||||
|
||||
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
|
||||
lookup->force_speed = 400000;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
|
||||
if (lookup.force_speed) {
|
||||
if (lookup.force_speed != lookup.min_speed)
|
||||
dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
|
||||
lookup.min_speed, lookup.force_speed);
|
||||
return lookup.force_speed;
|
||||
} else if (lookup.min_speed != UINT_MAX) {
|
||||
return lookup.min_speed;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
|
||||
|
||||
|
@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
|
||||
}
|
||||
|
||||
client = of_i2c_register_device(adap, rd->dn);
|
||||
put_device(&adap->dev);
|
||||
|
||||
if (IS_ERR(client)) {
|
||||
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
|
||||
rd->dn);
|
||||
put_device(&adap->dev);
|
||||
of_node_clear_flag(rd->dn, OF_POPULATED);
|
||||
return notifier_from_errno(PTR_ERR(client));
|
||||
}
|
||||
put_device(&adap->dev);
|
||||
break;
|
||||
case OF_RECONFIG_CHANGE_REMOVE:
|
||||
/* already depopulated? */
|
||||
|
@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
hfi1_compute_tid_rdma_flow_wt();
|
||||
/*
|
||||
* These must be called before the driver is registered with
|
||||
* the PCI subsystem.
|
||||
|
@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
|
||||
/*
|
||||
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
|
||||
*/
|
||||
if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
|
||||
if (parent &&
|
||||
(dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
|
||||
dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
|
||||
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
|
||||
dd->link_gen3_capable = 0;
|
||||
}
|
||||
|
@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
if (qp->s_flags & RVT_S_WAIT_RNR)
|
||||
goto bail_stop;
|
||||
rdi = ib_to_rvt(qp->ibqp.device);
|
||||
if (qp->s_rnr_retry == 0 &&
|
||||
!((rdi->post_parms[wqe->wr.opcode].flags &
|
||||
RVT_OPERATION_IGN_RNR_CNT) &&
|
||||
qp->s_rnr_retry_cnt == 0)) {
|
||||
status = IB_WC_RNR_RETRY_EXC_ERR;
|
||||
goto class_b;
|
||||
if (!(rdi->post_parms[wqe->wr.opcode].flags &
|
||||
RVT_OPERATION_IGN_RNR_CNT)) {
|
||||
if (qp->s_rnr_retry == 0) {
|
||||
status = IB_WC_RNR_RETRY_EXC_ERR;
|
||||
goto class_b;
|
||||
}
|
||||
if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
|
||||
qp->s_rnr_retry--;
|
||||
}
|
||||
if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
|
||||
qp->s_rnr_retry--;
|
||||
|
||||
/*
|
||||
* The last valid PSN is the previous PSN. For TID RDMA WRITE
|
||||
|
@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
|
||||
* C - Capcode
|
||||
*/
|
||||
|
||||
static u32 tid_rdma_flow_wt;
|
||||
|
||||
static void tid_rdma_trigger_resume(struct work_struct *work);
|
||||
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
|
||||
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
||||
@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
|
||||
struct tid_rdma_flow *flow,
|
||||
bool fecn);
|
||||
|
||||
static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
|
||||
{
|
||||
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
||||
priv->r_tid_ack = priv->r_tid_tail;
|
||||
}
|
||||
|
||||
static void tid_rdma_schedule_ack(struct rvt_qp *qp)
|
||||
{
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
priv->s_flags |= RVT_S_ACK_PENDING;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
}
|
||||
|
||||
static void tid_rdma_trigger_ack(struct rvt_qp *qp)
|
||||
{
|
||||
validate_r_tid_ack(qp->priv);
|
||||
tid_rdma_schedule_ack(qp);
|
||||
}
|
||||
|
||||
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
|
||||
{
|
||||
return
|
||||
@ -3005,10 +3023,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
qpriv->s_nak_state = IB_NAK_PSN_ERROR;
|
||||
/* We are NAK'ing the next expected PSN */
|
||||
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
|
||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
||||
if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
||||
qpriv->r_tid_ack = qpriv->r_tid_tail;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
tid_rdma_trigger_ack(qp);
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
|
||||
}
|
||||
|
||||
void hfi1_compute_tid_rdma_flow_wt(void)
|
||||
static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
|
||||
{
|
||||
/*
|
||||
* Heuristic for computing the RNR timeout when waiting on the flow
|
||||
* queue. Rather than a computationaly expensive exact estimate of when
|
||||
* a flow will be available, we assume that if a QP is at position N in
|
||||
* the flow queue it has to wait approximately (N + 1) * (number of
|
||||
* segments between two sync points), assuming PMTU of 4K. The rationale
|
||||
* for this is that flows are released and recycled at each sync point.
|
||||
* segments between two sync points). The rationale for this is that
|
||||
* flows are released and recycled at each sync point.
|
||||
*/
|
||||
tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
|
||||
TID_RDMA_MAX_SEGMENT_SIZE;
|
||||
return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
|
||||
}
|
||||
|
||||
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
|
||||
@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
|
||||
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
|
||||
if (ret) {
|
||||
to_seg = tid_rdma_flow_wt *
|
||||
to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
|
||||
position_in_queue(qpriv,
|
||||
&rcd->flow_queue);
|
||||
break;
|
||||
@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||
/*
|
||||
* If overtaking req->acked_tail, send an RNR NAK. Because the
|
||||
* QP is not queued in this case, and the issue can only be
|
||||
* caused due a delay in scheduling the second leg which we
|
||||
* caused by a delay in scheduling the second leg which we
|
||||
* cannot estimate, we use a rather arbitrary RNR timeout of
|
||||
* (MAX_FLOWS / 2) segments
|
||||
*/
|
||||
@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||
MAX_FLOWS)) {
|
||||
ret = -EAGAIN;
|
||||
to_seg = MAX_FLOWS >> 1;
|
||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
tid_rdma_trigger_ack(qp);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
|
||||
req);
|
||||
trace_hfi1_tid_write_rsp_rcv_data(qp);
|
||||
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
||||
priv->r_tid_ack = priv->r_tid_tail;
|
||||
validate_r_tid_ack(priv);
|
||||
|
||||
if (opcode == TID_OP(WRITE_DATA_LAST)) {
|
||||
release_rdma_sge_mr(e);
|
||||
@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||
}
|
||||
|
||||
done:
|
||||
priv->s_flags |= RVT_S_ACK_PENDING;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
tid_rdma_schedule_ack(qp);
|
||||
exit:
|
||||
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
|
||||
if (fecn)
|
||||
@ -4388,10 +4399,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
|
||||
if (!priv->s_nak_state) {
|
||||
priv->s_nak_state = IB_NAK_PSN_ERROR;
|
||||
priv->s_nak_psn = flow->flow_state.r_next_psn;
|
||||
priv->s_flags |= RVT_S_ACK_PENDING;
|
||||
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
|
||||
priv->r_tid_ack = priv->r_tid_tail;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
tid_rdma_trigger_ack(qp);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
|
||||
qpriv->resync = true;
|
||||
/* RESYNC request always gets a TID RDMA ACK. */
|
||||
qpriv->s_nak_state = 0;
|
||||
qpriv->s_flags |= RVT_S_ACK_PENDING;
|
||||
hfi1_schedule_tid_send(qp);
|
||||
tid_rdma_trigger_ack(qp);
|
||||
bail:
|
||||
if (fecn)
|
||||
qp->s_flags |= RVT_S_ECN;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
|
||||
#define TID_RDMA_SEGMENT_SHIFT 18
|
||||
|
||||
/*
|
||||
* Bit definitions for priv->s_flags.
|
||||
@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||
struct ib_other_headers *ohdr,
|
||||
u32 *bth1, u32 *bth2, u32 *len);
|
||||
|
||||
void hfi1_compute_tid_rdma_flow_wt(void);
|
||||
|
||||
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
|
||||
|
||||
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
|
||||
|
@ -59,7 +59,7 @@ enum {
|
||||
|
||||
#define HNS_ROCE_HEM_CHUNK_LEN \
|
||||
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
|
||||
(sizeof(struct scatterlist)))
|
||||
(sizeof(struct scatterlist) + sizeof(void *)))
|
||||
|
||||
#define check_whether_bt_num_3(type, hop_num) \
|
||||
(type < HEM_TYPE_MTT && hop_num == 2)
|
||||
|
@ -376,7 +376,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
|
||||
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
|
||||
srq->max_gs = srq_init_attr->attr.max_sge;
|
||||
|
||||
srq_desc_size = max(16, 16 * srq->max_gs);
|
||||
srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
|
||||
|
||||
srq->wqe_shift = ilog2(srq_desc_size);
|
||||
|
||||
|
@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
|
||||
{
|
||||
struct ml_device *ml = ff->private;
|
||||
|
||||
/*
|
||||
* Even though we stop all playing effects when tearing down
|
||||
* an input device (via input_device_flush() that calls into
|
||||
* input_ff_flush() that stops and erases all effects), we
|
||||
* do not actually stop the timer, and therefore we should
|
||||
* do it here.
|
||||
*/
|
||||
del_timer_sync(&ml->timer);
|
||||
|
||||
kfree(ml->private);
|
||||
}
|
||||
|
||||
|
@ -177,6 +177,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||
"LEN0096", /* X280 */
|
||||
"LEN0097", /* X280 -> ALPS trackpoint */
|
||||
"LEN009b", /* T580 */
|
||||
"LEN0402", /* X1 Extreme 2nd Generation */
|
||||
"LEN200f", /* T450s */
|
||||
"LEN2054", /* E480 */
|
||||
"LEN2055", /* E580 */
|
||||
|
@ -510,7 +510,6 @@ struct f11_data {
|
||||
struct rmi_2d_sensor_platform_data sensor_pdata;
|
||||
unsigned long *abs_mask;
|
||||
unsigned long *rel_mask;
|
||||
unsigned long *result_bits;
|
||||
};
|
||||
|
||||
enum f11_finger_state {
|
||||
@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
||||
/*
|
||||
** init instance data, fill in values and create any sysfs files
|
||||
*/
|
||||
f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
|
||||
f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
|
||||
GFP_KERNEL);
|
||||
if (!f11)
|
||||
return -ENOMEM;
|
||||
@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
||||
+ sizeof(struct f11_data));
|
||||
f11->rel_mask = (unsigned long *)((char *)f11
|
||||
+ sizeof(struct f11_data) + mask_size);
|
||||
f11->result_bits = (unsigned long *)((char *)f11
|
||||
+ sizeof(struct f11_data) + mask_size * 2);
|
||||
|
||||
set_bit(fn->irq_pos, f11->abs_mask);
|
||||
set_bit(fn->irq_pos + 1, f11->rel_mask);
|
||||
@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
|
||||
valid_bytes = f11->sensor.attn_size;
|
||||
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
|
||||
valid_bytes);
|
||||
drvdata->attn_data.data += f11->sensor.attn_size;
|
||||
drvdata->attn_data.size -= f11->sensor.attn_size;
|
||||
drvdata->attn_data.data += valid_bytes;
|
||||
drvdata->attn_data.size -= valid_bytes;
|
||||
} else {
|
||||
error = rmi_read_block(rmi_dev,
|
||||
data_base_addr, f11->sensor.data_pkt,
|
||||
|
@ -55,6 +55,9 @@ struct f12_data {
|
||||
|
||||
const struct rmi_register_desc_item *data15;
|
||||
u16 data15_offset;
|
||||
|
||||
unsigned long *abs_mask;
|
||||
unsigned long *rel_mask;
|
||||
};
|
||||
|
||||
static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
|
||||
@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
|
||||
valid_bytes = sensor->attn_size;
|
||||
memcpy(sensor->data_pkt, drvdata->attn_data.data,
|
||||
valid_bytes);
|
||||
drvdata->attn_data.data += sensor->attn_size;
|
||||
drvdata->attn_data.size -= sensor->attn_size;
|
||||
drvdata->attn_data.data += valid_bytes;
|
||||
drvdata->attn_data.size -= valid_bytes;
|
||||
} else {
|
||||
retval = rmi_read_block(rmi_dev, f12->data_addr,
|
||||
sensor->data_pkt, sensor->pkt_size);
|
||||
@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
|
||||
static int rmi_f12_config(struct rmi_function *fn)
|
||||
{
|
||||
struct rmi_driver *drv = fn->rmi_dev->driver;
|
||||
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
|
||||
struct rmi_2d_sensor *sensor;
|
||||
int ret;
|
||||
|
||||
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
|
||||
sensor = &f12->sensor;
|
||||
|
||||
if (!sensor->report_abs)
|
||||
drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
|
||||
else
|
||||
drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
|
||||
|
||||
drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
|
||||
|
||||
ret = rmi_f12_write_control_regs(fn);
|
||||
if (ret)
|
||||
@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
|
||||
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
|
||||
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
|
||||
u16 data_offset = 0;
|
||||
int mask_size;
|
||||
|
||||
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
|
||||
|
||||
mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
|
||||
|
||||
ret = rmi_read(fn->rmi_dev, query_addr, &buf);
|
||||
if (ret < 0) {
|
||||
dev_err(&fn->dev, "Failed to read general info register: %d\n",
|
||||
@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
|
||||
f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
|
||||
GFP_KERNEL);
|
||||
if (!f12)
|
||||
return -ENOMEM;
|
||||
|
||||
f12->abs_mask = (unsigned long *)((char *)f12
|
||||
+ sizeof(struct f12_data));
|
||||
f12->rel_mask = (unsigned long *)((char *)f12
|
||||
+ sizeof(struct f12_data) + mask_size);
|
||||
|
||||
set_bit(fn->irq_pos, f12->abs_mask);
|
||||
set_bit(fn->irq_pos + 1, f12->rel_mask);
|
||||
|
||||
f12->has_dribble = !!(buf & BIT(3));
|
||||
|
||||
if (fn->dev.of_node) {
|
||||
|
@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
|
||||
static const struct vb2_queue rmi_f54_queue = {
|
||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
|
||||
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
|
||||
.buf_struct_size = sizeof(struct vb2_buffer),
|
||||
.buf_struct_size = sizeof(struct vb2_v4l2_buffer),
|
||||
.ops = &rmi_f54_queue_ops,
|
||||
.mem_ops = &vb2_vmalloc_memops,
|
||||
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
|
||||
@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
|
||||
{
|
||||
struct rmi_driver *drv = fn->rmi_dev->driver;
|
||||
|
||||
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
|
||||
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
|
||||
|
||||
video_unregister_device(&f54->vdev);
|
||||
v4l2_device_unregister(&f54->v4l2);
|
||||
destroy_workqueue(f54->workqueue);
|
||||
}
|
||||
|
||||
struct rmi_function_handler rmi_f54_handler = {
|
||||
|
@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
|
||||
|
||||
/* get sysinfo */
|
||||
md->si = &cd->sysinfo;
|
||||
if (!md->si) {
|
||||
dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
|
||||
__func__, md->si);
|
||||
goto error_get_sysinfo;
|
||||
}
|
||||
|
||||
rc = cyttsp4_setup_input_device(cd);
|
||||
if (rc)
|
||||
@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
|
||||
|
||||
error_init_input:
|
||||
input_free_device(md->input);
|
||||
error_get_sysinfo:
|
||||
input_set_drvdata(md->input, NULL);
|
||||
error_alloc_failed:
|
||||
dev_err(dev, "%s failed.\n", __func__);
|
||||
return rc;
|
||||
|
@ -358,7 +358,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
|
||||
/* HS200 is broken at this moment */
|
||||
host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
|
||||
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
|
||||
|
||||
ret = sdhci_add_host(host);
|
||||
if (ret)
|
||||
|
@ -617,6 +617,7 @@ static int slcan_open(struct tty_struct *tty)
|
||||
sl->tty = NULL;
|
||||
tty->disc_data = NULL;
|
||||
clear_bit(SLF_INUSE, &sl->flags);
|
||||
free_netdev(sl->dev);
|
||||
|
||||
err_exit:
|
||||
rtnl_unlock();
|
||||
|
@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
|
||||
int pin;
|
||||
int err;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests to enable time stamping on both edges. */
|
||||
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
|
||||
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
|
||||
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
|
||||
|
||||
if (pin < 0)
|
||||
|
@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
|
||||
|
||||
switch (rq->type) {
|
||||
case PTP_CLK_REQ_PEROUT:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (rq->perout.index != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct ep93xx_priv *ep;
|
||||
struct resource *mem;
|
||||
|
||||
dev = platform_get_drvdata(pdev);
|
||||
if (dev == NULL)
|
||||
@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
|
||||
iounmap(ep->base_addr);
|
||||
|
||||
if (ep->res != NULL) {
|
||||
release_resource(ep->res);
|
||||
kfree(ep->res);
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
release_mem_region(mem->start, resource_size(mem));
|
||||
}
|
||||
|
||||
free_netdev(dev);
|
||||
|
@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
|
||||
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
|
||||
|
||||
gemini_port_remove(port);
|
||||
free_netdev(port->netdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2232,8 +2232,16 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
|
||||
err_service_reg:
|
||||
free_channel(priv, channel);
|
||||
err_alloc_ch:
|
||||
if (err == -EPROBE_DEFER)
|
||||
if (err == -EPROBE_DEFER) {
|
||||
for (i = 0; i < priv->num_channels; i++) {
|
||||
channel = priv->channel[i];
|
||||
nctx = &channel->nctx;
|
||||
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
|
||||
free_channel(priv, channel);
|
||||
}
|
||||
priv->num_channels = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cpumask_empty(&priv->dpio_cpumask)) {
|
||||
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
|
||||
|
@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
|
||||
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
|
||||
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
|
||||
|
||||
struct hns3_link_mode_mapping {
|
||||
u32 hns3_link_mode;
|
||||
u32 ethtool_link_mode;
|
||||
};
|
||||
|
||||
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
|
||||
{
|
||||
struct hnae3_handle *h = hns3_get_handle(ndev);
|
||||
|
@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
|
||||
@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||
struct net_device *netdev = h->kinfo.netdev;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u8 i, j, pfc_map, *prio_tc;
|
||||
int ret;
|
||||
|
||||
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
|
||||
@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||
|
||||
hclge_tm_pfc_info_update(hdev);
|
||||
|
||||
return hclge_pause_setup_hw(hdev, false);
|
||||
ret = hclge_pause_setup_hw(hdev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hclge_buffer_alloc(hdev);
|
||||
if (ret) {
|
||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
}
|
||||
|
||||
/* DCBX configuration */
|
||||
|
@ -6263,11 +6263,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
|
||||
|
||||
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
|
||||
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
|
||||
|
||||
/* read current config parameter */
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
|
||||
false);
|
||||
true);
|
||||
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
|
||||
req->func_id = cpu_to_le32(func_id);
|
||||
req->switch_param = switch_param;
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"read mac vlan switch parameter fail, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* modify and write new config parameter */
|
||||
hclge_cmd_reuse_desc(&desc, false);
|
||||
req->switch_param = (req->switch_param & param_mask) | switch_param;
|
||||
req->param_mask = param_mask;
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
|
@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
|
||||
|
||||
switch (rq->type) {
|
||||
case PTP_CLK_REQ_EXTTS:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests failing to enable both edges. */
|
||||
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
|
||||
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
|
||||
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (on) {
|
||||
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
|
||||
rq->extts.index);
|
||||
@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
|
||||
return 0;
|
||||
|
||||
case PTP_CLK_REQ_PEROUT:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (on) {
|
||||
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
|
||||
rq->perout.index);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 CGX driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 CGX driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 CGX driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 CGX driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
* Marvell OcteonTx2 RVU Admin Function driver
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Marvell OcteonTx2 RVU Admin Function driver
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*
|
||||
|
@ -4010,6 +4010,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err_params_unregister;
|
||||
|
||||
devlink_params_publish(devlink);
|
||||
devlink_reload_enable(devlink);
|
||||
pci_save_state(pdev);
|
||||
return 0;
|
||||
|
||||
@ -4121,6 +4122,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
||||
struct devlink *devlink = priv_to_devlink(priv);
|
||||
int active_vfs = 0;
|
||||
|
||||
devlink_reload_disable(devlink);
|
||||
|
||||
if (mlx4_is_slave(dev))
|
||||
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
|
||||
|
||||
|
@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
|
||||
if (!MLX5_PPS_CAP(mdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests to enable time stamping on both edges. */
|
||||
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
|
||||
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
|
||||
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (rq->extts.index >= clock->ptp_info.n_pins)
|
||||
return -EINVAL;
|
||||
|
||||
@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
||||
if (!MLX5_PPS_CAP(mdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (rq->perout.index >= clock->ptp_info.n_pins)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1189,6 +1189,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
||||
if (mlxsw_driver->params_register)
|
||||
devlink_params_publish(devlink);
|
||||
|
||||
if (!reload)
|
||||
devlink_reload_enable(devlink);
|
||||
|
||||
return 0;
|
||||
|
||||
err_thermal_init:
|
||||
@ -1249,6 +1252,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
||||
|
||||
if (!reload)
|
||||
devlink_reload_disable(devlink);
|
||||
if (devlink_is_reload_failed(devlink)) {
|
||||
if (!reload)
|
||||
/* Only the parts that were not de-initialized in the
|
||||
|
@ -429,6 +429,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
|
||||
int pulse_width = 0;
|
||||
int perout_bit = 0;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (perout->flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!on) {
|
||||
lan743x_ptp_perout_off(adapter);
|
||||
return 0;
|
||||
|
@ -955,6 +955,8 @@ enum RAVB_QUEUE {
|
||||
#define NUM_RX_QUEUE 2
|
||||
#define NUM_TX_QUEUE 2
|
||||
|
||||
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
|
||||
|
||||
/* TX descriptors per packet */
|
||||
#define NUM_TX_DESC_GEN2 2
|
||||
#define NUM_TX_DESC_GEN3 1
|
||||
@ -1018,7 +1020,6 @@ struct ravb_private {
|
||||
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
|
||||
u32 cur_tx[NUM_TX_QUEUE];
|
||||
u32 dirty_tx[NUM_TX_QUEUE];
|
||||
u32 rx_buf_sz; /* Based on MTU+slack. */
|
||||
struct napi_struct napi[NUM_RX_QUEUE];
|
||||
struct work_struct work;
|
||||
/* MII transceiver section. */
|
||||
|
@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
||||
le32_to_cpu(desc->dptr)))
|
||||
dma_unmap_single(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr),
|
||||
priv->rx_buf_sz,
|
||||
RX_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
ring_size = sizeof(struct ravb_ex_rx_desc) *
|
||||
@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
/* RX descriptor */
|
||||
rx_desc = &priv->rx_ring[q][i];
|
||||
rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
|
||||
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
||||
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
|
||||
priv->rx_buf_sz,
|
||||
RX_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
/* We just set the data size to 0 for a failed mapping which
|
||||
* should prevent DMA from happening...
|
||||
@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
|
||||
int ring_size;
|
||||
int i;
|
||||
|
||||
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
|
||||
ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
|
||||
|
||||
/* Allocate RX and TX skb rings */
|
||||
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
|
||||
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
|
||||
@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
|
||||
skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
|
||||
if (!skb)
|
||||
goto error;
|
||||
ravb_set_buffer_align(skb);
|
||||
@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
|
||||
skb = priv->rx_skb[q][entry];
|
||||
priv->rx_skb[q][entry] = NULL;
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
priv->rx_buf_sz,
|
||||
RX_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
get_ts &= (q == RAVB_NC) ?
|
||||
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
|
||||
@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
|
||||
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
|
||||
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q][entry];
|
||||
desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
|
||||
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
||||
|
||||
if (!priv->rx_skb[q][entry]) {
|
||||
skb = netdev_alloc_skb(ndev,
|
||||
priv->rx_buf_sz +
|
||||
RX_BUF_SZ +
|
||||
RAVB_ALIGN - 1);
|
||||
if (!skb)
|
||||
break; /* Better luck next round. */
|
||||
@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
|
||||
|
||||
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
|
||||
{
|
||||
if (netif_running(ndev))
|
||||
return -EBUSY;
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
|
||||
ndev->mtu = new_mtu;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
synchronize_irq(priv->emac_irq);
|
||||
ravb_emac_init(ndev);
|
||||
}
|
||||
|
||||
netdev_update_features(ndev);
|
||||
|
||||
return 0;
|
||||
|
@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
|
||||
struct net_device *ndev = priv->ndev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (req->flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (req->index)
|
||||
return -EINVAL;
|
||||
|
||||
@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
|
||||
unsigned long flags;
|
||||
int error = 0;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (req->flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (req->index)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1226,7 +1226,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
|
||||
dwmac_mux:
|
||||
sun8i_dwmac_unset_syscon(gmac);
|
||||
dwmac_exit:
|
||||
sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
|
||||
stmmac_pltfr_remove(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
|
||||
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
|
||||
// stmmac Support for 5.xx Ethernet QoS cores
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
|
||||
/*
|
||||
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
|
||||
* stmmac XGMAC definitions.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
|
||||
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
|
||||
// stmmac HW Interface Callbacks
|
||||
|
||||
|
@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
|
||||
|
||||
switch (rq->type) {
|
||||
case PTP_CLK_REQ_PEROUT:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cfg = &priv->pps[rq->perout.index];
|
||||
|
||||
cfg->start.tv_sec = rq->perout.start.sec;
|
||||
|
@ -708,6 +708,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
|
||||
goto err_debugfs_exit;
|
||||
|
||||
devlink_params_publish(devlink);
|
||||
devlink_reload_enable(devlink);
|
||||
return nsim_dev;
|
||||
|
||||
err_debugfs_exit:
|
||||
@ -732,6 +733,7 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(nsim_dev);
|
||||
|
||||
devlink_reload_disable(devlink);
|
||||
nsim_bpf_dev_exit(nsim_dev);
|
||||
nsim_dev_debugfs_exit(nsim_dev);
|
||||
nsim_dev_traps_exit(devlink);
|
||||
|
@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
|
||||
|
||||
switch (rq->type) {
|
||||
case PTP_CLK_REQ_EXTTS:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
|
||||
PTP_RISING_EDGE |
|
||||
PTP_FALLING_EDGE |
|
||||
PTP_STRICT_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Reject requests to enable time stamping on both edges. */
|
||||
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
|
||||
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
|
||||
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
index = rq->extts.index;
|
||||
if (index >= N_EXT_TS)
|
||||
return -EINVAL;
|
||||
@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
|
||||
return 0;
|
||||
|
||||
case PTP_CLK_REQ_PEROUT:
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
if (rq->perout.index >= N_PER_OUT)
|
||||
return -EINVAL;
|
||||
return periodic_output(clock, rq, on, rq->perout.index);
|
||||
|
@ -64,11 +64,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
|
||||
if (mdiodev->dev.of_node)
|
||||
reset = devm_reset_control_get_exclusive(&mdiodev->dev,
|
||||
"phy");
|
||||
if (PTR_ERR(reset) == -ENOENT ||
|
||||
PTR_ERR(reset) == -ENOTSUPP)
|
||||
reset = NULL;
|
||||
else if (IS_ERR(reset))
|
||||
return PTR_ERR(reset);
|
||||
if (IS_ERR(reset)) {
|
||||
if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
|
||||
reset = NULL;
|
||||
else
|
||||
return PTR_ERR(reset);
|
||||
}
|
||||
|
||||
mdiodev->reset_ctrl = reset;
|
||||
|
||||
|
@ -855,6 +855,7 @@ static int slip_open(struct tty_struct *tty)
|
||||
sl->tty = NULL;
|
||||
tty->disc_data = NULL;
|
||||
clear_bit(SLF_INUSE, &sl->flags);
|
||||
free_netdev(sl->dev);
|
||||
|
||||
err_exit:
|
||||
rtnl_unlock();
|
||||
|
@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
/* Get the MAC address */
|
||||
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
|
||||
if (ret < 0) {
|
||||
if (ret < ETH_ALEN) {
|
||||
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
|
||||
goto free;
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
|
||||
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
|
||||
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
|
||||
0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
|
||||
if (err < sizeof(max_datagram_size)) {
|
||||
if (err != sizeof(max_datagram_size)) {
|
||||
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
@ -1371,6 +1371,8 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
|
||||
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
|
||||
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
|
||||
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
|
||||
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
u16 length, iv_len, amsdu_pad;
|
||||
u16 length, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
/* if the packet is protected, then it must be CCMP or GCMP */
|
||||
iv_len = ieee80211_has_protected(hdr->frame_control) ?
|
||||
IEEE80211_CCMP_HDR_LEN : 0;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
|
||||
&dev_cmd->hdr, start_len, 0);
|
||||
|
||||
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
|
||||
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
|
||||
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
|
||||
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
|
||||
amsdu_pad = 0;
|
||||
|
||||
/* total amount of header we may need for this A-MSDU */
|
||||
hdr_room = DIV_ROUND_UP(total_len, mss) *
|
||||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
|
||||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
|
||||
hdr_page->pos += iv_len;
|
||||
|
||||
/*
|
||||
* Pull the ieee80211 header + IV to be able to use TSO core,
|
||||
* Pull the ieee80211 header to be able to use TSO core,
|
||||
* we will restore it for the tx_status flow.
|
||||
*/
|
||||
skb_pull(skb, hdr_len + iv_len);
|
||||
skb_pull(skb, hdr_len);
|
||||
|
||||
/*
|
||||
* Remove the length of all the headers that we don't actually
|
||||
@ -364,8 +358,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
/* re -add the WiFi header and IV */
|
||||
skb_push(skb, hdr_len + iv_len);
|
||||
/* re -add the WiFi header */
|
||||
skb_push(skb, hdr_len);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
|
||||
|
||||
if (r == -EREMOTEIO) {
|
||||
phy->hard_fault = r;
|
||||
skb = NULL;
|
||||
} else if (r < 0) {
|
||||
if (info->mode == NXP_NCI_MODE_FW)
|
||||
nxp_nci_fw_recv_frame(phy->ndev, NULL);
|
||||
}
|
||||
if (r < 0) {
|
||||
nfc_err(&client->dev, "Read failed with error %d\n", r);
|
||||
goto exit_irq_handled;
|
||||
}
|
||||
|
@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
|
||||
req.extts.rsv[0] || req.extts.rsv[1]) &&
|
||||
cmd == PTP_EXTTS_REQUEST2) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
if (cmd == PTP_EXTTS_REQUEST2) {
|
||||
/* Tell the drivers to check the flags carefully. */
|
||||
req.extts.flags |= PTP_STRICT_FLAGS;
|
||||
/* Make sure no reserved bit is set. */
|
||||
if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
|
||||
req.extts.rsv[0] || req.extts.rsv[1]) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
/* Ensure one of the rising/falling edge bits is set. */
|
||||
if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
|
||||
(req.extts.flags & PTP_EXTTS_EDGES) == 0) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
} else if (cmd == PTP_EXTTS_REQUEST) {
|
||||
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
|
||||
req.extts.rsv[0] = 0;
|
||||
|
@ -803,7 +803,12 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
|
||||
continue;
|
||||
|
||||
if (cookie->inodes[i]) {
|
||||
afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
|
||||
struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
|
||||
|
||||
if (test_bit(AFS_VNODE_UNSET, &iv->flags))
|
||||
continue;
|
||||
|
||||
afs_vnode_commit_status(&fc, iv,
|
||||
scb->cb_break, NULL, scb);
|
||||
continue;
|
||||
}
|
||||
|
10
fs/aio.c
10
fs/aio.c
@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
struct __compat_aio_sigset {
|
||||
compat_sigset_t __user *sigmask;
|
||||
compat_uptr_t sigmask;
|
||||
compat_size_t sigsetsize;
|
||||
};
|
||||
|
||||
@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
|
||||
struct old_timespec32 __user *, timeout,
|
||||
const struct __compat_aio_sigset __user *, usig)
|
||||
{
|
||||
struct __compat_aio_sigset ksig = { NULL, };
|
||||
struct __compat_aio_sigset ksig = { 0, };
|
||||
struct timespec64 t;
|
||||
bool interrupted;
|
||||
int ret;
|
||||
@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
|
||||
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
|
||||
ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
|
||||
struct __kernel_timespec __user *, timeout,
|
||||
const struct __compat_aio_sigset __user *, usig)
|
||||
{
|
||||
struct __compat_aio_sigset ksig = { NULL, };
|
||||
struct __compat_aio_sigset ksig = { 0, };
|
||||
struct timespec64 t;
|
||||
bool interrupted;
|
||||
int ret;
|
||||
@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
|
||||
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize);
|
||||
ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
|
||||
*/
|
||||
how &= ~AUTOFS_EXP_LEAVES;
|
||||
found = should_expire(expired, mnt, timeout, how);
|
||||
if (!found || found != expired)
|
||||
/* Something has changed, continue */
|
||||
if (found != expired) { // something has changed, continue
|
||||
dput(found);
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (expired != dentry)
|
||||
dput(dentry);
|
||||
|
@ -9744,6 +9744,18 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
||||
commit_transaction = true;
|
||||
}
|
||||
if (commit_transaction) {
|
||||
/*
|
||||
* We may have set commit_transaction when logging the new name
|
||||
* in the destination root, in which case we left the source
|
||||
* root context in the list of log contextes. So make sure we
|
||||
* remove it to avoid invalid memory accesses, since the context
|
||||
* was allocated in our stack frame.
|
||||
*/
|
||||
if (sync_log_root) {
|
||||
mutex_lock(&root->log_mutex);
|
||||
list_del_init(&ctx_root.list);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
}
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
} else {
|
||||
int ret2;
|
||||
@ -9757,6 +9769,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
||||
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||
up_read(&fs_info->subvol_sem);
|
||||
|
||||
ASSERT(list_empty(&ctx_root.list));
|
||||
ASSERT(list_empty(&ctx_dest.list));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -753,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
|
||||
if (!atomic_dec_and_test(&aio_req->pending_reqs))
|
||||
return;
|
||||
|
||||
if (aio_req->iocb->ki_flags & IOCB_DIRECT)
|
||||
inode_dio_end(inode);
|
||||
|
||||
ret = aio_req->error;
|
||||
if (!ret)
|
||||
ret = aio_req->total_len;
|
||||
@ -1091,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
CEPH_CAP_FILE_RD);
|
||||
|
||||
list_splice(&aio_req->osd_reqs, &osd_reqs);
|
||||
inode_dio_begin(inode);
|
||||
while (!list_empty(&osd_reqs)) {
|
||||
req = list_first_entry(&osd_reqs,
|
||||
struct ceph_osd_request,
|
||||
@ -1264,14 +1268,24 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
|
||||
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT)
|
||||
ceph_start_io_direct(inode);
|
||||
else
|
||||
ceph_start_io_read(inode);
|
||||
|
||||
if (fi->fmode & CEPH_FILE_MODE_LAZY)
|
||||
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
|
||||
else
|
||||
want = CEPH_CAP_FILE_CACHE;
|
||||
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
|
||||
&got, &pinned_page);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
if (iocb->ki_flags & IOCB_DIRECT)
|
||||
ceph_end_io_direct(inode);
|
||||
else
|
||||
ceph_end_io_read(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
|
||||
(iocb->ki_flags & IOCB_DIRECT) ||
|
||||
@ -1283,16 +1297,12 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
|
||||
if (ci->i_inline_version == CEPH_INLINE_NONE) {
|
||||
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
|
||||
ceph_start_io_direct(inode);
|
||||
ret = ceph_direct_read_write(iocb, to,
|
||||
NULL, NULL);
|
||||
ceph_end_io_direct(inode);
|
||||
if (ret >= 0 && ret < len)
|
||||
retry_op = CHECK_EOF;
|
||||
} else {
|
||||
ceph_start_io_read(inode);
|
||||
ret = ceph_sync_read(iocb, to, &retry_op);
|
||||
ceph_end_io_read(inode);
|
||||
}
|
||||
} else {
|
||||
retry_op = READ_INLINE;
|
||||
@ -1303,11 +1313,10 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
|
||||
ceph_cap_string(got));
|
||||
ceph_add_rw_context(fi, &rw_ctx);
|
||||
ceph_start_io_read(inode);
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
ceph_end_io_read(inode);
|
||||
ceph_del_rw_context(fi, &rw_ctx);
|
||||
}
|
||||
|
||||
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
|
||||
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
|
||||
if (pinned_page) {
|
||||
@ -1315,6 +1324,12 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
pinned_page = NULL;
|
||||
}
|
||||
ceph_put_cap_refs(ci, got);
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT)
|
||||
ceph_end_io_direct(inode);
|
||||
else
|
||||
ceph_end_io_read(inode);
|
||||
|
||||
if (retry_op > HAVE_RETRIED && ret >= 0) {
|
||||
int statret;
|
||||
struct page *page = NULL;
|
||||
|
@ -128,13 +128,20 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
|
||||
struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
|
||||
struct dentry *lower_dir_dentry;
|
||||
struct inode *lower_dir_inode;
|
||||
int rc;
|
||||
|
||||
dget(lower_dentry);
|
||||
lower_dir_dentry = lock_parent(lower_dentry);
|
||||
rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
|
||||
lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
|
||||
lower_dir_inode = d_inode(lower_dir_dentry);
|
||||
inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
|
||||
dget(lower_dentry); // don't even try to make the lower negative
|
||||
if (lower_dentry->d_parent != lower_dir_dentry)
|
||||
rc = -EINVAL;
|
||||
else if (d_unhashed(lower_dentry))
|
||||
rc = -EINVAL;
|
||||
else
|
||||
rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
|
||||
goto out_unlock;
|
||||
@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
|
||||
fsstack_copy_attr_times(dir, lower_dir_inode);
|
||||
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
|
||||
inode->i_ctime = dir->i_ctime;
|
||||
d_drop(dentry);
|
||||
out_unlock:
|
||||
unlock_dir(lower_dir_dentry);
|
||||
dput(lower_dentry);
|
||||
inode_unlock(lower_dir_inode);
|
||||
if (!rc)
|
||||
d_drop(dentry);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
|
||||
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
|
||||
struct dentry *lower_dentry)
|
||||
{
|
||||
struct inode *inode, *lower_inode = d_inode(lower_dentry);
|
||||
struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
|
||||
struct inode *inode, *lower_inode;
|
||||
struct ecryptfs_dentry_info *dentry_info;
|
||||
struct vfsmount *lower_mnt;
|
||||
int rc = 0;
|
||||
|
||||
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
|
||||
@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
|
||||
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
|
||||
d_inode(lower_dentry->d_parent));
|
||||
d_inode(path->dentry));
|
||||
BUG_ON(!d_count(lower_dentry));
|
||||
|
||||
ecryptfs_set_dentry_private(dentry, dentry_info);
|
||||
dentry_info->lower_path.mnt = lower_mnt;
|
||||
dentry_info->lower_path.mnt = mntget(path->mnt);
|
||||
dentry_info->lower_path.dentry = lower_dentry;
|
||||
|
||||
if (d_really_is_negative(lower_dentry)) {
|
||||
/*
|
||||
* negative dentry can go positive under us here - its parent is not
|
||||
* locked. That's OK and that could happen just as we return from
|
||||
* ecryptfs_lookup() anyway. Just need to be careful and fetch
|
||||
* ->d_inode only once - it's not stable here.
|
||||
*/
|
||||
lower_inode = READ_ONCE(lower_dentry->d_inode);
|
||||
|
||||
if (!lower_inode) {
|
||||
/* We want to add because we couldn't find in lower */
|
||||
d_add(dentry, NULL);
|
||||
return NULL;
|
||||
@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
struct dentry *lower_dentry;
|
||||
struct dentry *lower_dir_dentry;
|
||||
struct inode *lower_dir_inode;
|
||||
int rc;
|
||||
|
||||
lower_dentry = ecryptfs_dentry_to_lower(dentry);
|
||||
dget(dentry);
|
||||
lower_dir_dentry = lock_parent(lower_dentry);
|
||||
dget(lower_dentry);
|
||||
rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
|
||||
dput(lower_dentry);
|
||||
if (!rc && d_really_is_positive(dentry))
|
||||
lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
|
||||
lower_dir_inode = d_inode(lower_dir_dentry);
|
||||
|
||||
inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
|
||||
dget(lower_dentry); // don't even try to make the lower negative
|
||||
if (lower_dentry->d_parent != lower_dir_dentry)
|
||||
rc = -EINVAL;
|
||||
else if (d_unhashed(lower_dentry))
|
||||
rc = -EINVAL;
|
||||
else
|
||||
rc = vfs_rmdir(lower_dir_inode, lower_dentry);
|
||||
if (!rc) {
|
||||
clear_nlink(d_inode(dentry));
|
||||
fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
|
||||
set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
|
||||
unlock_dir(lower_dir_dentry);
|
||||
fsstack_copy_attr_times(dir, lower_dir_inode);
|
||||
set_nlink(dir, lower_dir_inode->i_nlink);
|
||||
}
|
||||
dput(lower_dentry);
|
||||
inode_unlock(lower_dir_inode);
|
||||
if (!rc)
|
||||
d_drop(dentry);
|
||||
dput(dentry);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *lower_new_dentry;
|
||||
struct dentry *lower_old_dir_dentry;
|
||||
struct dentry *lower_new_dir_dentry;
|
||||
struct dentry *trap = NULL;
|
||||
struct dentry *trap;
|
||||
struct inode *target_inode;
|
||||
|
||||
if (flags)
|
||||
return -EINVAL;
|
||||
|
||||
lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
|
||||
lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
|
||||
|
||||
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
|
||||
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
|
||||
dget(lower_old_dentry);
|
||||
dget(lower_new_dentry);
|
||||
lower_old_dir_dentry = dget_parent(lower_old_dentry);
|
||||
lower_new_dir_dentry = dget_parent(lower_new_dentry);
|
||||
|
||||
target_inode = d_inode(new_dentry);
|
||||
|
||||
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
|
||||
dget(lower_new_dentry);
|
||||
rc = -EINVAL;
|
||||
if (lower_old_dentry->d_parent != lower_old_dir_dentry)
|
||||
goto out_lock;
|
||||
@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
if (new_dir != old_dir)
|
||||
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
|
||||
out_lock:
|
||||
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
|
||||
dput(lower_new_dir_dentry);
|
||||
dput(lower_old_dir_dentry);
|
||||
dput(lower_new_dentry);
|
||||
dput(lower_old_dentry);
|
||||
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
|
||||
* inode is actually connected to the parent.
|
||||
*/
|
||||
err = exportfs_get_name(mnt, target_dir, nbuf, result);
|
||||
if (!err) {
|
||||
inode_lock(target_dir->d_inode);
|
||||
nresult = lookup_one_len(nbuf, target_dir,
|
||||
strlen(nbuf));
|
||||
inode_unlock(target_dir->d_inode);
|
||||
if (!IS_ERR(nresult)) {
|
||||
if (nresult->d_inode) {
|
||||
dput(result);
|
||||
result = nresult;
|
||||
} else
|
||||
dput(nresult);
|
||||
}
|
||||
if (err) {
|
||||
dput(target_dir);
|
||||
goto err_result;
|
||||
}
|
||||
|
||||
inode_lock(target_dir->d_inode);
|
||||
nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
|
||||
if (!IS_ERR(nresult)) {
|
||||
if (unlikely(nresult->d_inode != result->d_inode)) {
|
||||
dput(nresult);
|
||||
nresult = ERR_PTR(-ESTALE);
|
||||
}
|
||||
}
|
||||
inode_unlock(target_dir->d_inode);
|
||||
/*
|
||||
* At this point we are done with the parent, but it's pinned
|
||||
* by the child dentry anyway.
|
||||
*/
|
||||
dput(target_dir);
|
||||
|
||||
if (IS_ERR(nresult)) {
|
||||
err = PTR_ERR(nresult);
|
||||
goto err_result;
|
||||
}
|
||||
dput(result);
|
||||
result = nresult;
|
||||
|
||||
/*
|
||||
* And finally make sure the dentry is actually acceptable
|
||||
* to NFSD.
|
||||
|
@ -326,6 +326,7 @@ struct io_kiocb {
|
||||
#define REQ_F_TIMEOUT 1024 /* timeout request */
|
||||
#define REQ_F_ISREG 2048 /* regular file */
|
||||
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
|
||||
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
|
||||
u64 user_data;
|
||||
u32 result;
|
||||
u32 sequence;
|
||||
@ -453,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
|
||||
if (req && !__io_sequence_defer(ctx, req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
if (req) {
|
||||
if (req->flags & REQ_F_TIMEOUT_NOSEQ)
|
||||
return NULL;
|
||||
if (!__io_sequence_defer(ctx, req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
|
||||
@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
|
||||
return -EFAULT;
|
||||
|
||||
req->flags |= REQ_F_TIMEOUT;
|
||||
|
||||
/*
|
||||
* sqe->off holds how many events that need to occur for this
|
||||
* timeout event to be satisfied.
|
||||
* timeout event to be satisfied. If it isn't set, then this is
|
||||
* a pure timeout request, sequence isn't used.
|
||||
*/
|
||||
count = READ_ONCE(sqe->off);
|
||||
if (!count)
|
||||
count = 1;
|
||||
if (!count) {
|
||||
req->flags |= REQ_F_TIMEOUT_NOSEQ;
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
entry = ctx->timeout_list.prev;
|
||||
goto add;
|
||||
}
|
||||
|
||||
req->sequence = ctx->cached_sq_head + count - 1;
|
||||
/* reuse it to store the count */
|
||||
req->submit.sequence = count;
|
||||
req->flags |= REQ_F_TIMEOUT;
|
||||
|
||||
/*
|
||||
* Insertion sort, ensuring the first entry in the list is always
|
||||
@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
unsigned nxt_sq_head;
|
||||
long long tmp, tmp_nxt;
|
||||
|
||||
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Since cached_sq_head + count - 1 can overflow, use type long
|
||||
* long to store it.
|
||||
@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
nxt->sequence++;
|
||||
}
|
||||
req->sequence -= span;
|
||||
add:
|
||||
list_add(&req->list, entry);
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
|
||||
@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
|
||||
switch (op) {
|
||||
case IORING_OP_NOP:
|
||||
case IORING_OP_POLL_REMOVE:
|
||||
case IORING_OP_TIMEOUT:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
@ -2500,8 +2500,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
|
||||
|
||||
time64_to_tm(sb->s_time_max, 0, &tm);
|
||||
|
||||
pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n",
|
||||
sb->s_type->name, mntpath,
|
||||
pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
|
||||
sb->s_type->name,
|
||||
is_mounted(mnt) ? "remounted" : "mounted",
|
||||
mntpath,
|
||||
tm.tm_year+1900, (unsigned long long)sb->s_time_max);
|
||||
|
||||
free_page((unsigned long)buf);
|
||||
@ -2794,14 +2796,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
|
||||
if (IS_ERR(mnt))
|
||||
return PTR_ERR(mnt);
|
||||
|
||||
error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
|
||||
if (error < 0) {
|
||||
mntput(mnt);
|
||||
return error;
|
||||
}
|
||||
|
||||
mnt_warn_timestamp_expiry(mountpoint, mnt);
|
||||
|
||||
error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
|
||||
if (error < 0)
|
||||
mntput(mnt);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
|
||||
void *data);
|
||||
|
||||
extern int can_send(struct sk_buff *skb, int loop);
|
||||
void can_sock_destruct(struct sock *sk);
|
||||
|
||||
#endif /* !_CAN_CORE_H */
|
||||
|
@ -336,7 +336,8 @@ enum {
|
||||
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
|
||||
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
|
||||
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
|
||||
((u64)((pfsid >> 4) & 0xfff) << 52))
|
||||
#define QI_DEV_IOTLB_SIZE 1
|
||||
#define QI_DEV_IOTLB_MAX_INVS 32
|
||||
|
||||
@ -360,7 +361,8 @@ enum {
|
||||
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
|
||||
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
|
||||
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
|
||||
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
|
||||
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
|
||||
((u64)((pfsid >> 4) & 0xfff) << 52))
|
||||
#define QI_DEV_EIOTLB_MAX_INVS 32
|
||||
|
||||
/* Page group response descriptor QW0 */
|
||||
|
@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
|
||||
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
|
||||
extern int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, walk_memory_blocks_func_t func);
|
||||
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
|
||||
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
|
||||
|
@ -38,7 +38,8 @@ struct devlink {
|
||||
struct device *dev;
|
||||
possible_net_t _net;
|
||||
struct mutex lock;
|
||||
bool reload_failed;
|
||||
u8 reload_failed:1,
|
||||
reload_enabled:1;
|
||||
char priv[0] __aligned(NETDEV_ALIGN);
|
||||
};
|
||||
|
||||
@ -774,6 +775,8 @@ struct ib_device;
|
||||
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
|
||||
int devlink_register(struct devlink *devlink, struct device *dev);
|
||||
void devlink_unregister(struct devlink *devlink);
|
||||
void devlink_reload_enable(struct devlink *devlink);
|
||||
void devlink_reload_disable(struct devlink *devlink);
|
||||
void devlink_free(struct devlink *devlink);
|
||||
int devlink_port_register(struct devlink *devlink,
|
||||
struct devlink_port *devlink_port,
|
||||
|
@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
|
||||
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
|
||||
),
|
||||
|
||||
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
|
||||
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
|
||||
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
|
||||
__entry->saddr_v6, __entry->daddr_v6,
|
||||
show_tcp_state_name(__entry->state))
|
||||
|
@ -421,6 +421,7 @@ enum devlink_attr {
|
||||
|
||||
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
|
||||
|
||||
DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
|
||||
/* add new attributes above here, update the policy in devlink.c */
|
||||
|
||||
__DEVLINK_ATTR_MAX,
|
||||
|
@ -31,13 +31,16 @@
|
||||
#define PTP_ENABLE_FEATURE (1<<0)
|
||||
#define PTP_RISING_EDGE (1<<1)
|
||||
#define PTP_FALLING_EDGE (1<<2)
|
||||
#define PTP_STRICT_FLAGS (1<<3)
|
||||
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
|
||||
|
||||
/*
|
||||
* flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
|
||||
*/
|
||||
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
|
||||
PTP_RISING_EDGE | \
|
||||
PTP_FALLING_EDGE)
|
||||
PTP_FALLING_EDGE | \
|
||||
PTP_STRICT_FLAGS)
|
||||
|
||||
/*
|
||||
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
|
||||
|
@ -351,12 +351,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
|
||||
struct dentry *d = kern_path_locked(watch->path, parent);
|
||||
if (IS_ERR(d))
|
||||
return PTR_ERR(d);
|
||||
inode_unlock(d_backing_inode(parent->dentry));
|
||||
if (d_is_positive(d)) {
|
||||
/* update watch filter fields */
|
||||
watch->dev = d->d_sb->s_dev;
|
||||
watch->ino = d_backing_inode(d)->i_ino;
|
||||
}
|
||||
inode_unlock(d_backing_inode(parent->dentry));
|
||||
dput(d);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2119,11 +2119,12 @@ int cgroup_do_get_tree(struct fs_context *fc)
|
||||
|
||||
nsdentry = kernfs_node_dentry(cgrp->kn, sb);
|
||||
dput(fc->root);
|
||||
fc->root = nsdentry;
|
||||
if (IS_ERR(nsdentry)) {
|
||||
ret = PTR_ERR(nsdentry);
|
||||
deactivate_locked_super(sb);
|
||||
ret = PTR_ERR(nsdentry);
|
||||
nsdentry = NULL;
|
||||
}
|
||||
fc->root = nsdentry;
|
||||
}
|
||||
|
||||
if (!ctx->kfc.new_sb_created)
|
||||
|
@ -1036,7 +1036,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
static inline void
|
||||
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
|
||||
{
|
||||
}
|
||||
@ -10540,6 +10540,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
goto err_ns;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disallow uncore-cgroup events, they don't make sense as the cgroup will
|
||||
* be different on other CPUs in the uncore mask.
|
||||
*/
|
||||
if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
|
||||
err = -EINVAL;
|
||||
goto err_pmu;
|
||||
}
|
||||
|
||||
if (event->attr.aux_output &&
|
||||
!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
|
||||
err = -EOPNOTSUPP;
|
||||
@ -11331,8 +11340,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Get the target context (task or percpu):
|
||||
* Grouping is not supported for kernel events, neither is 'AUX',
|
||||
* make sure the caller's intentions are adjusted.
|
||||
*/
|
||||
if (attr->aux_output)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
|
||||
overflow_handler, context, -1);
|
||||
@ -11344,6 +11356,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
/* Mark owner so we could distinguish it from user events. */
|
||||
event->owner = TASK_TOMBSTONE;
|
||||
|
||||
/*
|
||||
* Get the target context (task or percpu):
|
||||
*/
|
||||
ctx = find_get_context(event->pmu, task, event);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
@ -11795,7 +11810,7 @@ inherit_event(struct perf_event *parent_event,
|
||||
GFP_KERNEL);
|
||||
if (!child_ctx->task_ctx_data) {
|
||||
free_event(child_event);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
@ -11898,7 +11913,7 @@ static int inherit_group(struct perf_event *parent_event,
|
||||
if (IS_ERR(child_ctr))
|
||||
return PTR_ERR(child_ctr);
|
||||
|
||||
if (sub->aux_event == parent_event &&
|
||||
if (sub->aux_event == parent_event && child_ctr &&
|
||||
!perf_get_aux_event(child_ctr, leader))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user