Merge 6.1.65 into android14-6.1-lts

Changes in 6.1.65
	afs: Fix afs_server_list to be cleaned up with RCU
	afs: Make error on cell lookup failure consistent with OpenAFS
	drm/panel: boe-tv101wum-nl6: Fine tune the panel power sequence
	drm/panel: auo,b101uan08.3: Fine tune the panel power sequence
	drm/panel: simple: Fix Innolux G101ICE-L01 bus flags
	drm/panel: simple: Fix Innolux G101ICE-L01 timings
	wireguard: use DEV_STATS_INC()
	octeontx2-pf: Fix memory leak during interface down
	ata: pata_isapnp: Add missing error check for devm_ioport_map()
	drm/i915: do not clean GT table on error path
	drm/rockchip: vop: Fix color for RGB888/BGR888 format on VOP full
	HID: fix HID device resource race between HID core and debugging support
	ipv4: Correct/silence an endian warning in __ip_do_redirect
	net: usb: ax88179_178a: fix failed operations during ax88179_reset
	net/smc: avoid data corruption caused by decline
	arm/xen: fix xen_vcpu_info allocation alignment
	octeontx2-pf: Fix ntuple rule creation to direct packet to VF with higher Rx queue than its PF
	amd-xgbe: handle corner-case during sfp hotplug
	amd-xgbe: handle the corner-case during tx completion
	amd-xgbe: propagate the correct speed and duplex status
	net: axienet: Fix check for partial TX checksum
	afs: Return ENOENT if no cell DNS record can be found
	afs: Fix file locking on R/O volumes to operate in local mode
	mm,kfence: decouple kfence from page granularity mapping judgement
	arm64: mm: Fix "rodata=on" when CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
	i40e: use ERR_PTR error print in i40e messages
	i40e: Fix adding unsupported cloud filters
	nvmet: nul-terminate the NQNs passed in the connect command
	USB: dwc3: qcom: fix resource leaks on probe deferral
	USB: dwc3: qcom: fix ACPI platform device leak
	lockdep: Fix block chain corruption
	cifs: minor cleanup of some headers
	smb3: allow dumping session and tcon id to improve stats analysis and debugging
	cifs: print last update time for interface list
	cifs: distribute channels across interfaces based on speed
	cifs: account for primary channel in the interface list
	cifs: fix leak of iface for primary channel
	MIPS: KVM: Fix a build warning about variable set but not used
	media: camss: Split power domain management
	media: camss: Convert to platform remove callback returning void
	media: qcom: Initialise V4L2 async notifier later
	media: qcom: camss: Fix V4L2 async notifier error path
	media: qcom: camss: Fix genpd cleanup
	ext4: add a new helper to check if es must be kept
	ext4: factor out __es_alloc_extent() and __es_free_extent()
	ext4: use pre-allocated es in __es_insert_extent()
	ext4: use pre-allocated es in __es_remove_extent()
	ext4: using nofail preallocation in ext4_es_remove_extent()
	ext4: using nofail preallocation in ext4_es_insert_delayed_block()
	ext4: using nofail preallocation in ext4_es_insert_extent()
	ext4: fix slab-use-after-free in ext4_es_insert_extent()
	ext4: make sure allocate pending entry not fail
	NFSD: Fix "start of NFS reply" pointer passed to nfsd_cache_update()
	NFSD: Fix checksum mismatches in the duplicate reply cache
	arm64: dts: imx8mn-var-som: add 20ms delay to ethernet regulator enable
	ACPI: resource: Skip IRQ override on ASUS ExpertBook B1402CVA
	swiotlb-xen: provide the "max_mapping_size" method
	bcache: replace a mistaken IS_ERR() by IS_ERR_OR_NULL() in btree_gc_coalesce()
	md: fix bi_status reporting in md_end_clone_io
	bcache: fixup multi-threaded bch_sectors_dirty_init() wake-up race
	io_uring/fs: consider link->flags when getting path for LINKAT
	s390/dasd: protect device queue against concurrent access
	USB: serial: option: add Luat Air72*U series products
	hv_netvsc: fix race of netvsc and VF register_netdevice
	hv_netvsc: Fix race of register_netdevice_notifier and VF register
	hv_netvsc: Mark VF as slave before exposing it to user-mode
	dm-delay: fix a race between delay_presuspend and delay_bio
	bcache: check return value from btree_node_alloc_replacement()
	bcache: prevent potential division by zero error
	bcache: fixup init dirty data errors
	bcache: fixup lock c->root error
	usb: cdnsp: Fix deadlock issue during using NCM gadget
	USB: serial: option: add Fibocom L7xx modules
	USB: serial: option: fix FM101R-GL defines
	USB: serial: option: don't claim interface 4 for ZTE MF290
	usb: typec: tcpm: Skip hard reset when in error recovery
	USB: dwc2: write HCINT with INTMASK applied
	usb: dwc3: Fix default mode initialization
	usb: dwc3: set the dma max_seg_size
	USB: dwc3: qcom: fix software node leak on probe errors
	USB: dwc3: qcom: fix wakeup after probe deferral
	io_uring: fix off-by one bvec index
	Linux 6.1.65

Change-Id: Iea9267bee56905028a77d03c7fad8def8969246e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-12-14 12:20:03 +00:00
commit ee16988743
73 changed files with 1084 additions and 518 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 64
SUBLEVEL = 65
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -487,7 +487,8 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
xen_vcpu_info = alloc_percpu(struct vcpu_info);
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
1 << fls(sizeof(struct vcpu_info) - 1));
if (xen_vcpu_info == NULL)
return -ENOMEM;

View File

@ -27,6 +27,7 @@ reg_eth_phy: regulator-eth-phy {
regulator-name = "eth_phy_pwr";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-enable-ramp-delay = <20000>;
gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>;
enable-active-high;
};

View File

@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
extern bool rodata_enabled;
extern bool rodata_full;
if (arg && !strcmp(arg, "full")) {
if (!arg)
return false;
if (!strcmp(arg, "full")) {
rodata_enabled = rodata_full = true;
return true;
}
if (!strcmp(arg, "off")) {
rodata_enabled = rodata_full = false;
return true;
}
if (!strcmp(arg, "on")) {
rodata_enabled = true;
rodata_full = true;
rodata_full = false;
return true;
}

View File

@ -29,8 +29,8 @@ bool can_set_direct_map(void)
*
* KFENCE pool requires page-granular mapping if initialized late.
*/
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map();
return rodata_full || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
* If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area.
*/
if (rodata_enabled &&
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]),

View File

@ -593,7 +593,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
pte_t *ptep, entry, old_pte;
pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@ -665,7 +665,6 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
old_pte = *ptep;
set_pte(ptep, entry);
err = 0;

View File

@ -446,6 +446,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
},
},
{
/* Asus ExpertBook B1402CVA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
},
},
{
.ident = "Asus ExpertBook B2402CBA",
.matches = {

View File

@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
if (!ctl_addr)
return -ENOMEM;
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;

View File

@ -903,8 +903,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
err:
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
intel_gt_release_all(i915);
return ret;
}
@ -923,15 +921,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
return 0;
}
void intel_gt_release_all(struct drm_i915_private *i915)
{
struct intel_gt *gt;
unsigned int id;
for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
}
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p)
{

View File

@ -901,7 +901,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = i915_driver_mmio_probe(i915);
if (ret < 0)
goto out_tiles_cleanup;
goto out_runtime_pm_put;
ret = i915_driver_hw_probe(i915);
if (ret < 0)
@ -959,8 +959,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
out_tiles_cleanup:
intel_gt_release_all(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_late_release(i915);

View File

@ -36,6 +36,7 @@ struct panel_desc {
const struct panel_init_cmd *init_cmds;
unsigned int lanes;
bool discharge_on_disable;
bool lp11_before_reset;
};
struct boe_panel {
@ -1269,6 +1270,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
usleep_range(10000, 11000);
if (boe->desc->lp11_before_reset) {
mipi_dsi_dcs_nop(boe->dsi);
usleep_range(1000, 2000);
}
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000);
gpiod_set_value(boe->enable_gpio, 0);
@ -1468,6 +1473,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = auo_b101uan08_3_init_cmd,
.lp11_before_reset = true,
};
static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
@ -1495,6 +1501,7 @@ static const struct panel_desc boe_tv105wum_nw0_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = boe_init_cmd,
.lp11_before_reset = true,
};
static int boe_panel_get_modes(struct drm_panel *panel,

View File

@ -2205,13 +2205,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 41, 80, 100 },
.hback_porch = { 40, 79, 99 },
.hsync_len = { 1, 1, 1 },
.hfront_porch = { 30, 60, 70 },
.hback_porch = { 30, 60, 70 },
.hsync_len = { 22, 40, 60 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 5, 11, 14 },
.vback_porch = { 4, 11, 14 },
.vsync_len = { 1, 1, 1 },
.vfront_porch = { 3, 8, 14 },
.vback_porch = { 3, 8, 14 },
.vsync_len = { 4, 7, 12 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@ -2228,6 +2228,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};

View File

@ -248,14 +248,22 @@ static inline void vop_cfg_done(struct vop *vop)
VOP_REG_SET(vop, common, cfg_done, 1);
}
static bool has_rb_swapped(uint32_t format)
static bool has_rb_swapped(uint32_t version, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565:
return true;
/*
* full framework (IP version 3.x) only need rb swapped for RGB888 and
* little framework (IP version 2.x) only need rb swapped for BGR888,
* check for 3.x to also only rb swap BGR888 for unknown vop version
*/
case DRM_FORMAT_RGB888:
return VOP_MAJOR(version) == 3;
case DRM_FORMAT_BGR888:
return VOP_MAJOR(version) != 3;
default:
return false;
}
@ -1017,7 +1025,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
rb_swap = has_rb_swapped(fb->format->format);
rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/*

View File

@ -708,15 +708,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields.
*/
static void hid_device_release(struct device *dev)
void hiddev_free(struct kref *ref)
{
struct hid_device *hid = to_hid_device(dev);
struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid);
kfree(hid->dev_rdesc);
kfree(hid);
}
static void hid_device_release(struct device *dev)
{
struct hid_device *hid = to_hid_device(dev);
kref_put(&hid->ref, hiddev_free);
}
/*
* Fetch a report description item from the data stream. We support long
* items, though they are not used yet.
@ -2814,6 +2821,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
kref_init(&hdev->ref);
return hdev;
}

View File

@ -1132,6 +1132,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
list->hdev = (struct hid_device *) inode->i_private;
kref_get(&list->hdev->ref);
file->private_data = list;
mutex_init(&list->read_mutex);
@ -1224,6 +1225,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo);
kref_put(&list->hdev->ref, hiddev_free);
kfree(list);
return 0;

View File

@ -1342,7 +1342,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
@ -1506,6 +1506,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0;
n = btree_node_alloc_replacement(replace, NULL);
if (IS_ERR(n))
return 0;
/* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) {

View File

@ -1103,7 +1103,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i];
if (n)
do_div(sum, n);
sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) /

View File

@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d)
{
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
if (b != c->root) {
rw_unlock(0, b);
goto retry_lock;
}
/* Just count root keys if no leaf node */
rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid)
k, &iter, bch_ptr_invalid) {
if (KEY_INODE(k) != op.inode)
continue;
sectors_dirty_init_fn(&op.op, c->root, k);
}
rw_unlock(0, c->root);
rw_unlock(0, b);
return;
}
@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough))
break;
atomic_inc(&state.started);
state.infos[i].state = &state;
state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
atomic_dec(&state.started);
for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread);
goto out;
}
atomic_inc(&state.started);
}
out:
/* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0);
rw_unlock(0, c->root);
rw_unlock(0, b);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)

View File

@ -30,7 +30,7 @@ struct delay_c {
struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
bool may_delay;
struct delay_class read;
struct delay_class write;
@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
dc->may_delay = true;
dc->argc = argc;
ret = delay_class_ctr(ti, &dc->read, argv);
@ -246,7 +246,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
struct dm_delay_info *delayed;
unsigned long expires = 0;
if (!c->delay || !atomic_read(&dc->may_delay))
if (!c->delay)
return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@ -255,6 +255,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock);
if (unlikely(!dc->may_delay)) {
mutex_unlock(&delayed_bios_lock);
return DM_MAPIO_REMAPPED;
}
c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock);
@ -268,7 +272,10 @@ static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 0);
mutex_lock(&delayed_bios_lock);
dc->may_delay = false;
mutex_unlock(&delayed_bios_lock);
del_timer_sync(&dc->delay_timer);
flush_bios(flush_delayed_bios(dc, 1));
}
@ -277,7 +284,7 @@ static void delay_resume(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 1);
dc->may_delay = true;
}
static int delay_map(struct dm_target *ti, struct bio *bio)

View File

@ -8648,7 +8648,8 @@ static void md_end_io_acct(struct bio *bio)
struct md_io_acct *md_io_acct = bio->bi_private;
struct bio *orig_bio = md_io_acct->orig_bio;
orig_bio->bi_status = bio->bi_status;
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
bio_end_io_acct(orig_bio, md_io_acct->start_time);
bio_put(bio);

View File

@ -671,7 +671,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
/* nop */
struct camss *camss = vfe->camss;
if (vfe->id >= camss->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
@ -680,6 +685,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
if (id >= camss->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id])
return -EINVAL;
return 0;
}

View File

@ -478,7 +478,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
/* nop */
struct camss *camss = vfe->camss;
if (vfe->id >= camss->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
@ -487,6 +492,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
if (id >= camss->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id])
return -EINVAL;
return 0;
}

View File

@ -1453,7 +1453,6 @@ static const struct media_device_ops camss_media_ops = {
static int camss_configure_pd(struct camss *camss)
{
struct device *dev = camss->dev;
int last_pm_domain = 0;
int i;
int ret;
@ -1484,32 +1483,34 @@ static int camss_configure_pd(struct camss *camss)
if (!camss->genpd_link)
return -ENOMEM;
/*
* VFE power domains are in the beginning of the list, and while all
* power domains should be attached, only if TITAN_TOP power domain is
* found in the list, it should be linked over here.
*/
for (i = 0; i < camss->genpd_num; i++) {
camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
if (IS_ERR(camss->genpd[i])) {
ret = PTR_ERR(camss->genpd[i]);
goto fail_pm;
}
}
camss->genpd_link[i] = device_link_add(camss->dev, camss->genpd[i],
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[i]) {
dev_pm_domain_detach(camss->genpd[i], true);
if (i > camss->vfe_num) {
camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[i - 1]) {
ret = -EINVAL;
goto fail_pm;
}
last_pm_domain = i;
}
return 0;
fail_pm:
for (i = 0; i < last_pm_domain; i++) {
device_link_del(camss->genpd_link[i]);
for (--i ; i >= 0; i--)
dev_pm_domain_detach(camss->genpd[i], true);
}
return ret;
}
@ -1537,6 +1538,20 @@ static int camss_icc_get(struct camss *camss)
return 0;
}
static void camss_genpd_cleanup(struct camss *camss)
{
int i;
if (camss->genpd_num == 1)
return;
if (camss->genpd_num > camss->vfe_num)
device_link_del(camss->genpd_link[camss->genpd_num - 1]);
for (i = 0; i < camss->genpd_num; i++)
dev_pm_domain_detach(camss->genpd[i], true);
}
/*
* camss_probe - Probe CAMSS platform device
* @pdev: Pointer to CAMSS platform device
@ -1612,31 +1627,23 @@ static int camss_probe(struct platform_device *pdev)
if (!camss->vfe)
return -ENOMEM;
v4l2_async_nf_init(&camss->notifier);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
goto err_cleanup;
}
ret = camss_icc_get(camss);
if (ret < 0)
goto err_cleanup;
return ret;
ret = camss_configure_pd(camss);
if (ret < 0) {
dev_err(dev, "Failed to configure power domains: %d\n", ret);
goto err_cleanup;
return ret;
}
ret = camss_init_subdevices(camss);
if (ret < 0)
goto err_cleanup;
goto err_genpd_cleanup;
ret = dma_set_mask_and_coherent(dev, 0xffffffff);
if (ret)
goto err_cleanup;
goto err_genpd_cleanup;
camss->media_dev.dev = camss->dev;
strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
@ -1648,12 +1655,20 @@ static int camss_probe(struct platform_device *pdev)
ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
goto err_cleanup;
goto err_genpd_cleanup;
}
v4l2_async_nf_init(&camss->notifier);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
goto err_v4l2_device_unregister;
}
ret = camss_register_entities(camss);
if (ret < 0)
goto err_register_entities;
goto err_v4l2_device_unregister;
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
@ -1688,31 +1703,22 @@ static int camss_probe(struct platform_device *pdev)
err_register_subdevs:
camss_unregister_entities(camss);
err_register_entities:
err_v4l2_device_unregister:
v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
v4l2_async_nf_cleanup(&camss->notifier);
err_genpd_cleanup:
camss_genpd_cleanup(camss);
return ret;
}
void camss_delete(struct camss *camss)
{
int i;
v4l2_device_unregister(&camss->v4l2_dev);
media_device_unregister(&camss->media_dev);
media_device_cleanup(&camss->media_dev);
pm_runtime_disable(camss->dev);
if (camss->genpd_num == 1)
return;
for (i = 0; i < camss->genpd_num; i++) {
device_link_del(camss->genpd_link[i]);
dev_pm_domain_detach(camss->genpd[i], true);
}
}
/*
@ -1721,7 +1727,7 @@ void camss_delete(struct camss *camss)
*
* Always returns 0.
*/
static int camss_remove(struct platform_device *pdev)
static void camss_remove(struct platform_device *pdev)
{
struct camss *camss = platform_get_drvdata(pdev);
@ -1732,7 +1738,7 @@ static int camss_remove(struct platform_device *pdev)
if (atomic_read(&camss->ref_count) == 0)
camss_delete(camss);
return 0;
camss_genpd_cleanup(camss);
}
static const struct of_device_id camss_dt_match[] = {
@ -1794,7 +1800,7 @@ static const struct dev_pm_ops camss_pm_ops = {
static struct platform_driver qcom_camss_driver = {
.probe = camss_probe,
.remove = camss_remove,
.remove_new = camss_remove,
.driver = {
.name = "qcom-camss",
.of_match_table = camss_dt_match,

View File

@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
struct xgbe_channel *channel;
unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
if (!pdata->tx_usecs)
return;
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
if (!channel->tx_ring || channel->tx_timer_active)
break;
channel->tx_timer_active = 1;
mod_timer(&channel->tx_timer,
jiffies + usecs_to_jiffies(pdata->tx_usecs));
}
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)

View File

@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address;
cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.speed = pdata->phy.speed;
cmd->base.duplex = pdata->phy.duplex;
if (netif_carrier_ok(netdev)) {
cmd->base.speed = pdata->phy.speed;
cmd->base.duplex = pdata->phy.duplex;
} else {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported);

View File

@ -1178,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
xgbe_set_mode(pdata, mode);
/* Force the mode change for SFI in Fixed PHY config.
* Fixed PHY configs needs PLL to be enabled while doing mode set.
* When the SFP module isn't connected during boot, driver assumes
* AN is ON and attempts autonegotiation. However, if the connected
* SFP comes up in Fixed PHY config, the link will not come up as
* PLL isn't enabled while the initial mode set command is issued.
* So, force the mode change for SFI in Fixed PHY configuration to
* fix link issues.
*/
if (mode == XGBE_MODE_SFI)
xgbe_change_mode(pdata, mode);
else
xgbe_set_mode(pdata, mode);
return 0;
}

View File

@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %d aq_err %s\n",
err,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %d aq_err %s\n",
err,
"update VSI ctxt for PE failed, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}

View File

@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed setting DCB ETS configuration err %d aq_err %s\n",
ret,
"Failed setting DCB ETS configuration err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed setting DCB PFC configuration err %d aq_err %s\n",
ret,
"Failed setting DCB PFC configuration err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %d aq_err %s\n",
ret,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %d aq_err %s\n",
ret,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}

View File

@ -1453,8 +1453,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
"Set phy config failed, err %d aq_err %s\n",
status,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@ -1463,8 +1463,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
netdev_dbg(netdev,
"Updating link info failed with err %d aq_err %s\n",
status,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
@ -1515,8 +1515,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
"Set phy config failed, err %d aq_err %s\n",
status,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@ -1529,8 +1529,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
* (e.g. no physical connection etc.)
*/
netdev_dbg(netdev,
"Updating link info failed with err %d aq_err %s\n",
status,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@ -1636,8 +1636,8 @@ static int i40e_nway_reset(struct net_device *netdev)
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
ret,
netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@ -1753,20 +1753,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
status,
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
status,
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
status,
netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@ -5360,8 +5360,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %d aq_err %s\n",
ret,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@ -5433,8 +5433,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %d, %s\n",
status,
"Starting FW LLDP agent failed: error: %pe, %s\n",
ERR_PTR(status),
i40e_aq_str(&pf->hw,
adq_err));
return -EINVAL;

View File

@ -1822,8 +1822,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
ret,
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %d aq_err %s\n",
ret,
"Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %s\n",
ret,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %d, aq_err %s\n",
vsi_name, aq_ret,
"ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
vsi_name, ERR_PTR(aq_ret),
i40e_aq_str(hw, aq_status));
}
}
@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %d, aq_err %s\n",
aq_ret,
"Set default VSI failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %d, aq_err %s\n",
aq_ret,
"set unicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %d, aq_err %s\n",
aq_ret,
"set multicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@ -2815,9 +2815,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set multi promisc failed on %s, err %d aq_err %s\n",
"set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
@ -2835,10 +2835,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"Setting promiscuous %s failed on %s, err %d aq_err %s\n",
"Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@ -2986,8 +2986,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %d aq_err %s\n",
ret,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@ -3021,8 +3021,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %d aq_err %s\n",
ret,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@ -3266,8 +3266,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"add pvid failed, err %d aq_err %s\n",
ret,
"add pvid failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
return -ENOENT;
@ -5533,8 +5533,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi bw config, err %d aq_err %s\n",
ret,
"couldn't get PF vsi bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %d aq_err %s\n",
ret,
"couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
ret,
dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %d aq_err %s\n",
ret,
"Failed querying vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %d aq_err %s\n",
ret,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %d aq_err %s\n",
ret,
"Failed updating vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
max_tx_rate, seid, ret,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
max_tx_rate, seid, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %d aq_err %s\n",
ret,
"Failed to delete cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %s\n",
ret,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"add new vsi failed, err %d aq_err %s\n",
ret,
"add new vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %d aq_err %s\n",
ret,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %d aq_err %s\n",
ret,
"VEB bw config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %d aq_err %s\n",
ret,
"Failed getting veb bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Resume Port Tx failed, err %d aq_err %s\n",
ret,
"Resume Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Suspend Port Tx failed, err %d aq_err %s\n",
ret,
"Suspend Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
ret = i40e_set_dcb_config(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev,
"Set DCB Config failed, err %d aq_err %s\n",
ret,
"Set DCB Config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
i40e_aqc_opc_modify_switching_comp_ets, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Modify Port ETS failed, err %d aq_err %s\n",
ret,
"Modify Port ETS failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
ret = i40e_aq_dcb_updated(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"DCB Updated failed, err %d aq_err %s\n",
ret,
"DCB Updated failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
i40e_aqc_opc_enable_switching_comp_ets, NULL);
if (err) {
dev_info(&pf->pdev->dev,
"Enable Port ETS failed, err %d aq_err %s\n",
err,
"Enable Port ETS failed, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %d aq_err %s\n",
err,
"Query for DCB configuration failed, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@ -7436,8 +7436,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %d last_status = %s\n",
err,
"failed to get phy cap., ret = %pe last_status = %s\n",
ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@ -7448,8 +7448,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %d last_status = %s\n",
err,
"failed to get phy cap., ret = %pe last_status = %s\n",
ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@ -7493,8 +7493,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"set phy config ret = %d last_status = %s\n",
err,
"set phy config ret = %pe last_status = %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return err;
}
@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
rx_ring->netdev = NULL;
}
dev_info(&pf->pdev->dev,
"Error adding mac filter on macvlan err %d, aq_err %s\n",
ret,
"Error adding mac filter on macvlan err %pe, aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %d aq_err %s\n",
ret,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
ch->fwd = NULL;
} else {
dev_info(&pf->pdev->dev,
"Error deleting mac filter on macvlan err %d, aq_err %s\n",
ret,
"Error deleting mac filter on macvlan err %pe, aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
}
break;
@ -8875,7 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to delete cloud filter, err %d\n", err);
"Failed to delete cloud filter, err %pe\n",
ERR_PTR(err));
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}
@ -9437,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
ret,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@ -10264,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %d aq_err %s\n",
ret,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@ -10276,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %d aq_err %s\n",
ret,
"update vsi switch failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@ -10300,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %d aq_err %s\n",
ret,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@ -10312,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %d aq_err %s\n",
ret,
"update vsi switch failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@ -10457,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %d aq_err %s\n",
err,
"capability discovery failed, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENODEV;
@ -10595,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
if (ret) {
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %d aq_err %s\n",
ret,
"Failed to rebuild cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@ -10836,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
ret,
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@ -10948,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
ret,
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
@ -11052,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
ret,
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@ -11084,9 +11085,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
"Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
ret,
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@ -12220,8 +12221,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
(struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot get RSS key, err %d aq_err %s\n",
ret,
"Cannot get RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@ -12234,8 +12235,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot get RSS lut, err %d aq_err %s\n",
ret,
"Cannot get RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@ -12575,8 +12576,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot acquire NVM for read access, err %d aq_err %s\n",
ret,
"Cannot acquire NVM for read access, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@ -12592,8 +12593,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
ret,
dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@ -12606,8 +12607,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
"Cannot acquire NVM for write access, err %d aq_err %s\n",
ret,
"Cannot acquire NVM for write access, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@ -12626,8 +12627,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
"BW settings NOT SAVED, err %d aq_err %s\n",
ret,
"BW settings NOT SAVED, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
@ -12681,8 +12682,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
err_nvm:
dev_warn(&pf->pdev->dev,
"total-port-shutdown feature is off due to read nvm error: %d\n",
read_status);
"total-port-shutdown feature is off due to read nvm error: %pe\n",
ERR_PTR(read_status));
return ret;
}
@ -13009,8 +13010,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
NULL);
if (ret) {
netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
ret,
netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@ -13029,8 +13030,8 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
ret,
netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@ -13919,8 +13920,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %d aq_err %s\n",
ret,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@ -13969,8 +13970,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi failed, err %d aq_err %s\n",
ret,
"update vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@ -13992,9 +13993,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* message and continue
*/
dev_info(&pf->pdev->dev,
"failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
"failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
ret,
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@ -14088,8 +14089,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"add vsi failed, err %d aq_err %s\n",
ret,
"add vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@ -14120,8 +14121,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get vsi bw info, err %d aq_err %s\n",
ret,
"couldn't get vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
@ -14567,8 +14568,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw config failed, err %d aq_err %s\n",
ret,
"query veb bw config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@ -14577,8 +14578,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw ets config failed, err %d aq_err %s\n",
ret,
"query veb bw ets config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@ -14774,8 +14775,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %d aq_err %s\n",
ret,
"couldn't add VEB, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
@ -14785,16 +14786,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB statistics idx, err %d aq_err %s\n",
ret,
"couldn't get VEB statistics idx, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB bw info, err %d aq_err %s\n",
ret,
"couldn't get VEB bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
@ -15050,8 +15051,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't fetch switch config, err %d aq_err %s\n",
ret,
"couldn't fetch switch config, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@ -15077,8 +15078,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %d aq_err %s\n",
ret,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@ -15984,8 +15985,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
err,
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Reconfigure hardware for allowing smaller MSS in the case
@ -16003,8 +16004,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
err,
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@ -16136,8 +16137,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %d last_status = %s\n",
err,
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
@ -16147,8 +16148,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %d last_status = %s\n",
err,
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */

View File

@ -1429,8 +1429,8 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
buff_size, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"%s err %d aq_err %s\n",
__func__, status,
"%s err %pe aq_err %s\n",
__func__, ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;

View File

@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
aq_ret,
ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
if (ret)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
vf->vf_id, ret,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
if (ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
vf->vf_id, ret,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err;
}
@ -3774,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
int aq_ret = 0;
int i, ret;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@ -3798,8 +3798,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter)
return -ENOMEM;
if (!cfilter) {
aq_ret = -ENOMEM;
goto err_out;
}
/* parse destination mac address */
for (i = 0; i < ETH_ALEN; i++)
@ -3847,13 +3849,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* Adding cloud filter programmed as TC filter */
if (tcf.dst_port)
ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
else
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %d aq_err %s\n",
vf->vf_id, ret,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}

View File

@ -1069,6 +1069,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
struct ethhdr *eth_hdr;
bool new = false;
int err = 0;
u64 vf_num;
u32 ring;
if (!flow_cfg->max_flows) {
@ -1081,7 +1082,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
/* Number of queues on a VF can be greater or less than
* the PF's queue. Hence no need to check for the
* queue count. Hence no need to check queue count if PF
* is installing for its VF. Below is the expected vf_num value
* based on the ethtool commands.
*
* e.g.
* 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
* 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
* 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
* vf_num:vf_idx+1
*/
vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
if (fsp->location >= otx2_get_maxflows(flow_cfg))
@ -1163,6 +1178,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow_cfg->nr_flows++;
}
if (flow->is_vf)
netdev_info(pfvf->netdev,
"Make sure that VF's queue number is within its queue limit\n");
return 0;
}

View File

@ -1919,6 +1919,8 @@ int otx2_stop(struct net_device *netdev)
/* Clear RSS enable flag */
rss = &pf->hw.rss_info;
rss->enable = false;
if (!netif_is_rxfh_configured(netdev))
kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,

View File

@ -821,7 +821,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
csum_start_off = skb_transport_offset(skb);
csum_index_off = csum_start_off + skb->csum_offset;
/* Tx Partial Checksum Offload Enabled */

View File

@ -2204,9 +2204,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
/* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE;
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@ -2313,16 +2310,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
/* Fallback path to check synthetic vf with
* help of mac addr
/* Fallback path to check synthetic vf with help of mac addr.
* Because this function can be called before vf_netdev is
* initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
* from dev_addr, also try to match to its dev_addr.
* Note: On Hyper-V and Azure, it's not possible to set a MAC address
* on a VF that matches to the MAC of a unrelated NETVSC device.
*/
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
netdev_notice(vf_netdev,
"falling back to mac addr based matching\n");
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
return ndev;
}
}
netdev_notice(vf_netdev,
@ -2330,6 +2329,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
static int netvsc_prepare_bonding(struct net_device *vf_netdev)
{
struct net_device *ndev;
ndev = get_netvsc_byslot(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
/* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE;
return NOTIFY_DONE;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@ -2529,6 +2541,21 @@ static int netvsc_probe(struct hv_device *dev,
goto devinfo_failed;
}
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
*
* The rtnl lock also needs to be held before rndis_filter_device_add()
* which advertises nvsp_2_vsc_capability / sriov bit, and triggers
* VF NIC offering and registering. If VF NIC finished register_netdev()
* earlier it may cause name based config failure.
*/
rtnl_lock();
nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
@ -2538,16 +2565,6 @@ static int netvsc_probe(struct hv_device *dev,
eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
*/
rtnl_lock();
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@ -2581,9 +2598,9 @@ static int netvsc_probe(struct hv_device *dev,
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
rtnl_unlock();
netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
@ -2749,6 +2766,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
case NETDEV_UNREGISTER:
@ -2784,12 +2803,17 @@ static int __init netvsc_drv_init(void)
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
register_netdevice_notifier(&netvsc_netdev_notifier);
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
goto err_vmbus_reg;
register_netdevice_notifier(&netvsc_netdev_notifier);
return 0;
err_vmbus_reg:
unregister_netdevice_notifier(&netvsc_netdev_notifier);
return ret;
}
MODULE_LICENSE("GPL");

View File

@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
msleep(200);
msleep(500);
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
msleep(100);
msleep(200);
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev);

View File

@ -209,7 +209,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
*/
while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
++dev->stats.tx_dropped;
DEV_STATS_INC(dev, tx_dropped);
}
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
@ -227,7 +227,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
++dev->stats.tx_errors;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return ret;
}

View File

@ -416,20 +416,20 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
dev->name, skb, peer->internal_id,
&peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_frame_errors;
DEV_STATS_INC(dev, rx_errors);
DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_type:
net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_frame_errors;
DEV_STATS_INC(dev, rx_errors);
DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_size:
net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_length_errors;
DEV_STATS_INC(dev, rx_errors);
DEV_STATS_INC(dev, rx_length_errors);
goto packet_processed;
packet_processed:
dev_kfree_skb(skb);

View File

@ -333,7 +333,8 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
void wg_packet_purge_staged_packets(struct wg_peer *peer)
{
spin_lock_bh(&peer->staged_packet_queue.lock);
peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
DEV_STATS_ADD(peer->device->dev, tx_dropped,
peer->staged_packet_queue.qlen);
__skb_queue_purge(&peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
}

View File

@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
if (status)
@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
}
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req);
if (!ctrl) {

View File

@ -673,18 +673,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once.
*/
device = cqr->startdev;
if (device->profile.data) {
counter = 1; /* request is not yet queued on the start device */
list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
}
if (!device->profile.data)
return;
spin_lock(get_ccwdev_lock(device->cdev));
counter = 1; /* request is not yet queued on the start device */
list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock(&device->profile.lock);
if (device->profile.data) {
device->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
device->profile.data->dasd_read_nr_req[counter]++;
}
device->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}

View File

@ -1522,6 +1522,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
unsigned long flags;
int counter = 0;
local_bh_disable();
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
@ -1534,6 +1535,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_died(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
local_bh_enable();
return IRQ_HANDLED;
}
@ -1550,6 +1552,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
spin_unlock_irqrestore(&pdev->lock, flags);
local_bh_enable();
return IRQ_HANDLED;
}

View File

@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan;
u32 hcint, hcintmsk;
u32 hcint, hcintraw, hcintmsk;
chan = hsotg->hc_ptr_array[chnum];
hcint = dwc2_readl(hsotg, HCINT(chnum));
hcintraw = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
hcint = hcintraw & hcintmsk;
dwc2_writel(hsotg, hcint, HCINT(chnum));
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chnum);
dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
hcint, hcintmsk, hcint & hcintmsk);
hcintraw, hcintmsk, hcint);
}
dwc2_writel(hsotg, hcint, HCINT(chnum));
/*
* If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below
@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
return;
}
chan->hcint = hcint;
hcint &= hcintmsk;
chan->hcint = hcintraw;
/*
* If the channel was halted due to a dequeue, the qtd list might

View File

@ -2036,6 +2036,8 @@ static int dwc3_probe(struct platform_device *pdev)
pm_runtime_put(dev);
dma_set_max_seg_size(dev, UINT_MAX);
return 0;
err5:

View File

@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
mode = DWC3_GCTL_PRTCAP_DEVICE;
}
dwc3_set_mode(dwc, mode);
dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
dwc3_role_switch.set = dwc3_usb_role_switch_set;
@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
}
}
dwc3_set_mode(dwc, mode);
return 0;
}
#else

View File

@ -550,7 +550,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
IRQF_ONESHOT,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
@ -565,7 +565,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
IRQF_ONESHOT,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
@ -580,7 +580,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
IRQF_ONESHOT,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
@ -595,7 +595,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
IRQF_ONESHOT,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
@ -759,6 +759,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
if (!qcom->dwc3) {
ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n");
of_platform_depopulate(dev);
}
node_put:
@ -767,9 +768,9 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return ret;
}
static struct platform_device *
dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
{
struct platform_device *urs_usb = NULL;
struct fwnode_handle *fwh;
struct acpi_device *adev;
char name[8];
@ -789,9 +790,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
adev = to_acpi_device_node(fwh);
if (!adev)
return NULL;
goto err_put_handle;
return acpi_create_platform_device(adev, NULL);
urs_usb = acpi_create_platform_device(adev, NULL);
if (IS_ERR_OR_NULL(urs_usb))
goto err_put_handle;
return urs_usb;
err_put_handle:
fwnode_handle_put(fwh);
return urs_usb;
}
static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
{
struct fwnode_handle *fwh = urs_usb->dev.fwnode;
platform_device_unregister(urs_usb);
fwnode_handle_put(fwh);
}
static int dwc3_qcom_probe(struct platform_device *pdev)
@ -876,13 +894,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable;
goto free_urs;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
goto clk_disable;
goto free_urs;
}
/*
@ -901,7 +919,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
goto depopulate;
goto free_urs;
}
ret = dwc3_qcom_interconnect_init(qcom);
@ -933,10 +951,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
if (np)
if (np) {
of_platform_depopulate(&pdev->dev);
else
platform_device_put(pdev);
} else {
device_remove_software_node(&qcom->dwc3->dev);
platform_device_del(qcom->dwc3);
}
platform_device_put(qcom->dwc3);
free_urs:
if (qcom->urs_usb)
dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@ -955,11 +979,16 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
device_remove_software_node(&qcom->dwc3->dev);
if (np)
if (np) {
of_platform_depopulate(&pdev->dev);
else
platform_device_put(pdev);
} else {
device_remove_software_node(&qcom->dwc3->dev);
platform_device_del(qcom->dwc3);
}
platform_device_put(qcom->dwc3);
if (qcom->urs_usb)
dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);

View File

@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
#define DELL_PRODUCT_FM101R 0x8213
#define DELL_PRODUCT_FM101R_ESIM 0x8215
#define DELL_PRODUCT_FM101R_ESIM 0x8213
#define DELL_PRODUCT_FM101R 0x8215
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
/* Device flags */
@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);

View File

@ -5434,6 +5434,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
switch (port->state) {
case ERROR_RECOVERY:
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
return;
default:
break;
}
if (port->ams != NONE_AMS)
port->ams = NONE_AMS;
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)

View File

@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
};

View File

@ -132,8 +132,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
ret = dns_query(net->net, "afsdb", name, len, "srv=1",
NULL, NULL, false);
if (ret == -ENODATA)
ret = -EDESTADDRREQ;
if (ret == -ENODATA || ret == -ENOKEY)
ret = -ENOENT;
return ret;
}

View File

@ -552,6 +552,7 @@ struct afs_server_entry {
};
struct afs_server_list {
struct rcu_head rcu;
afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
refcount_t usage;
unsigned char nr_servers;

View File

@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server,
afs_server_trace_put_slist);
kfree(slist);
kfree_rcu(slist, rcu);
}
}

View File

@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
return PTR_ERR(volume);
ctx->volume = volume;
if (volume->type != AFSVL_RWVOL)
ctx->flock_mode = afs_flock_mode_local;
}
return 0;

View File

@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
}
/* Status load is ordered after lookup counter load */
if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
pr_warn("No record of cell %s\n", cell->name);
vc->error = -ENOENT;
return false;
}
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
vc->error = -EDESTADDRREQ;
return false;
@ -285,6 +291,7 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
*/
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
{
struct afs_cell *cell = vc->cell;
static int count;
int i;
@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
pr_notice("DNS: src=%u st=%u lc=%x\n",
cell->dns_source, cell->dns_status, cell->dns_lookup_count);
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);

View File

@ -144,14 +144,17 @@
static struct kmem_cache *ext4_es_cachep;
static struct kmem_cache *ext4_pending_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
struct extent_status *prealloc);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end, int *reserved);
ext4_lblk_t end, int *reserved,
struct extent_status *prealloc);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len,
struct pending_reservation **prealloc);
int __init ext4_init_es(void)
{
@ -448,22 +451,49 @@ static void ext4_es_list_del(struct inode *inode)
spin_unlock(&sbi->s_es_lock);
}
static struct extent_status *
ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
ext4_fsblk_t pblk)
static inline struct pending_reservation *__alloc_pending(bool nofail)
{
if (!nofail)
return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
}
static inline void __free_pending(struct pending_reservation *pr)
{
kmem_cache_free(ext4_pending_cachep, pr);
}
/*
* Returns true if we cannot fail to allocate memory for this extent_status
* entry and cannot reclaim it until its status changes.
*/
static inline bool ext4_es_must_keep(struct extent_status *es)
{
/* fiemap, bigalloc, and seek_data/hole need to use it. */
if (ext4_es_is_delayed(es))
return true;
return false;
}
static inline struct extent_status *__es_alloc_extent(bool nofail)
{
if (!nofail)
return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
}
static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
{
struct extent_status *es;
es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
if (es == NULL)
return NULL;
es->es_lblk = lblk;
es->es_len = len;
es->es_pblk = pblk;
/*
* We don't count delayed extent because we never try to reclaim them
*/
if (!ext4_es_is_delayed(es)) {
/* We never try to reclaim a must kept extent, so we don't count it. */
if (!ext4_es_must_keep(es)) {
if (!EXT4_I(inode)->i_es_shk_nr++)
ext4_es_list_add(inode);
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
@ -472,8 +502,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
EXT4_I(inode)->i_es_all_nr++;
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
}
return es;
static inline void __es_free_extent(struct extent_status *es)
{
kmem_cache_free(ext4_es_cachep, es);
}
static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
@ -481,8 +514,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
EXT4_I(inode)->i_es_all_nr--;
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
/* Decrease the shrink counter when this es is not delayed */
if (!ext4_es_is_delayed(es)) {
/* Decrease the shrink counter when we can reclaim the extent. */
if (!ext4_es_must_keep(es)) {
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
if (!--EXT4_I(inode)->i_es_shk_nr)
ext4_es_list_del(inode);
@ -490,7 +523,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
s_es_stats.es_stats_shk_cnt);
}
kmem_cache_free(ext4_es_cachep, es);
__es_free_extent(es);
}
/*
@ -751,7 +784,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
}
#endif
static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node **p = &tree->root.rb_node;
@ -791,10 +825,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
}
}
es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
newes->es_pblk);
if (prealloc)
es = prealloc;
else
es = __es_alloc_extent(false);
if (!es)
return -ENOMEM;
ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
newes->es_pblk);
rb_link_node(&es->rb_node, parent, p);
rb_insert_color(&es->rb_node, &tree->root);
@ -815,8 +854,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err = 0;
int err1 = 0, err2 = 0, err3 = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
struct pending_reservation *pr = NULL;
bool revise_pending = false;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
@ -844,29 +887,57 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes);
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end, NULL);
if (err != 0)
goto error;
revise_pending = sbi->s_cluster_ratio > 1 &&
test_opt(inode->i_sb, DELALLOC) &&
(status & (EXTENT_STATUS_WRITTEN |
EXTENT_STATUS_UNWRITTEN));
retry:
err = __es_insert_extent(inode, &newes);
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
128, EXT4_I(inode)))
goto retry;
if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
err = 0;
if (err1 && !es1)
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
if ((err1 || err2 || err3) && revise_pending && !pr)
pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
(status & EXTENT_STATUS_WRITTEN ||
status & EXTENT_STATUS_UNWRITTEN))
__revise_pending(inode, lblk, len);
err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
if (err1 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
if (es1) {
if (!es1->es_len)
__es_free_extent(es1);
es1 = NULL;
}
err2 = __es_insert_extent(inode, &newes, es2);
if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
err2 = 0;
if (err2 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
if (es2) {
if (!es2->es_len)
__es_free_extent(es2);
es2 = NULL;
}
if (revise_pending) {
err3 = __revise_pending(inode, lblk, len, &pr);
if (err3 != 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
if (err1 || err2 || err3)
goto retry;
ext4_es_print_tree(inode);
return err;
return 0;
}
/*
@ -899,7 +970,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
if (!es || es->es_lblk > end)
__es_insert_extent(inode, &newes);
__es_insert_extent(inode, &newes, NULL);
write_unlock(&EXT4_I(inode)->i_es_lock);
}
@ -1270,7 +1341,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
rc->ndelonly--;
node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root);
kmem_cache_free(ext4_pending_cachep, pr);
__free_pending(pr);
if (!node)
break;
pr = rb_entry(node, struct pending_reservation,
@ -1289,6 +1360,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* @lblk - first block in range
* @end - last block in range
* @reserved - number of cluster reservations released
* @prealloc - pre-allocated es to avoid memory allocation failures
*
* If @reserved is not NULL and delayed allocation is enabled, counts
* block/cluster reservations freed by removing range and if bigalloc
@ -1296,7 +1368,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* error code on failure.
*/
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end, int *reserved)
ext4_lblk_t end, int *reserved,
struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node *node;
@ -1304,14 +1377,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status orig_es;
ext4_lblk_t len1, len2;
ext4_fsblk_t block;
int err;
int err = 0;
bool count_reserved = true;
struct rsvd_count rc;
if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
count_reserved = false;
retry:
err = 0;
es = __es_tree_search(&tree->root, lblk);
if (!es)
@ -1345,14 +1416,13 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
orig_es.es_len - len2;
ext4_es_store_pblock_status(&newes, block,
ext4_es_status(&orig_es));
err = __es_insert_extent(inode, &newes);
err = __es_insert_extent(inode, &newes, prealloc);
if (err) {
if (!ext4_es_must_keep(&newes))
return 0;
es->es_lblk = orig_es.es_lblk;
es->es_len = orig_es.es_len;
if ((err == -ENOMEM) &&
__es_shrink(EXT4_SB(inode->i_sb),
128, EXT4_I(inode)))
goto retry;
goto out;
}
} else {
@ -1432,6 +1502,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end;
int err = 0;
int reserved = 0;
struct extent_status *es = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
@ -1446,17 +1517,29 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
end = lblk + len - 1;
BUG_ON(end < lblk);
retry:
if (err && !es)
es = __es_alloc_extent(true);
/*
* ext4_clear_inode() depends on us taking i_es_lock unconditionally
* so that we are sure __es_shrink() is done with the inode before it
* is reclaimed.
*/
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end, &reserved);
err = __es_remove_extent(inode, lblk, end, &reserved, es);
/* Free preallocated extent if it didn't get used. */
if (es) {
if (!es->es_len)
__es_free_extent(es);
es = NULL;
}
write_unlock(&EXT4_I(inode)->i_es_lock);
if (err)
goto retry;
ext4_es_print_tree(inode);
ext4_da_release_space(inode, reserved);
return err;
return 0;
}
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
@ -1704,11 +1787,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
(*nr_to_scan)--;
node = rb_next(&es->rb_node);
/*
* We can't reclaim delayed extent from status tree because
* fiemap, bigallic, and seek_data/hole need to use it.
*/
if (ext4_es_is_delayed(es))
if (ext4_es_must_keep(es))
goto next;
if (ext4_es_is_referenced(es)) {
ext4_es_clear_referenced(es);
@ -1772,7 +1852,7 @@ void ext4_clear_inode_es(struct inode *inode)
while (node) {
es = rb_entry(node, struct extent_status, rb_node);
node = rb_next(node);
if (!ext4_es_is_delayed(es)) {
if (!ext4_es_must_keep(es)) {
rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es);
}
@ -1859,11 +1939,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
*
* @inode - file containing the cluster
* @lblk - logical block in the cluster to be added
* @prealloc - preallocated pending entry
*
* Returns 0 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully.
*/
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
@ -1889,10 +1971,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
}
}
pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
if (pr == NULL) {
ret = -ENOMEM;
goto out;
if (likely(*prealloc == NULL)) {
pr = __alloc_pending(false);
if (!pr) {
ret = -ENOMEM;
goto out;
}
} else {
pr = *prealloc;
*prealloc = NULL;
}
pr->lclu = lclu;
@ -1922,7 +2009,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
if (pr != NULL) {
tree = &EXT4_I(inode)->i_pending_tree;
rb_erase(&pr->rb_node, &tree->root);
kmem_cache_free(ext4_pending_cachep, pr);
__free_pending(pr);
}
}
@ -1983,7 +2070,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
bool allocated)
{
struct extent_status newes;
int err = 0;
int err1 = 0, err2 = 0, err3 = 0;
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
struct pending_reservation *pr = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
@ -1998,29 +2088,52 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes);
retry:
if (err1 && !es1)
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
if ((err1 || err2 || err3) && allocated && !pr)
pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, lblk, NULL);
if (err != 0)
err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
if (err1 != 0)
goto error;
retry:
err = __es_insert_extent(inode, &newes);
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
128, EXT4_I(inode)))
goto retry;
if (err != 0)
/* Free preallocated extent if it didn't get used. */
if (es1) {
if (!es1->es_len)
__es_free_extent(es1);
es1 = NULL;
}
err2 = __es_insert_extent(inode, &newes, es2);
if (err2 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
if (es2) {
if (!es2->es_len)
__es_free_extent(es2);
es2 = NULL;
}
if (allocated)
__insert_pending(inode, lblk);
if (allocated) {
err3 = __insert_pending(inode, lblk, &pr);
if (err3 != 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
if (err1 || err2 || err3)
goto retry;
ext4_es_print_tree(inode);
ext4_print_pending_tree(inode);
return err;
return 0;
}
/*
@ -2121,21 +2234,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
* @inode - file containing the range
* @lblk - logical block defining the start of range
* @len - length of range in blocks
* @prealloc - preallocated pending entry
*
* Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten
* status. Must be called while holding i_es_lock.
*/
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len)
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len,
struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last;
bool f_del = false, l_del = false;
int ret = 0;
if (len == 0)
return;
return 0;
/*
* Two cases - block range within single cluster and block range
@ -2156,7 +2272,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1);
if (f_del) {
__insert_pending(inode, first);
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
} else {
last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1;
@ -2164,9 +2282,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
l_del = __es_scan_range(inode,
&ext4_es_is_delonly,
end + 1, last);
if (l_del)
__insert_pending(inode, last);
else
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
} else
__remove_pending(inode, last);
}
} else {
@ -2174,18 +2294,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (first != lblk)
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1);
if (f_del)
__insert_pending(inode, first);
else
if (f_del) {
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
} else
__remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end)
l_del = __es_scan_range(inode, &ext4_es_is_delonly,
end + 1, last);
if (l_del)
__insert_pending(inode, last);
else
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
} else
__remove_pending(inode, last);
}
out:
return ret;
}

View File

@ -82,7 +82,8 @@ int nfsd_drc_slab_create(void);
void nfsd_drc_slab_free(void);
int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *);
int nfsd_cache_lookup(struct svc_rqst *);
int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
unsigned int len);
void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);

View File

@ -311,33 +311,53 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
return prune_cache_entries(nn);
}
/*
* Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
/**
* nfsd_cache_csum - Checksum incoming NFS Call arguments
* @buf: buffer containing a whole RPC Call message
* @start: starting byte of the NFS Call header
* @remaining: size of the NFS Call header, in bytes
*
* Compute a weak checksum of the leading bytes of an NFS procedure
* call header to help verify that a retransmitted Call matches an
* entry in the duplicate reply cache.
*
* To avoid assumptions about how the RPC message is laid out in
* @buf and what else it might contain (eg, a GSS MIC suffix), the
* caller passes us the exact location and length of the NFS Call
* header.
*
* Returns a 32-bit checksum value, as defined in RFC 793.
*/
static __wsum
nfsd_cache_csum(struct svc_rqst *rqstp)
static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
unsigned int remaining)
{
unsigned int base, len;
struct xdr_buf subbuf;
__wsum csum = 0;
void *p;
int idx;
unsigned int base;
__wsum csum;
struct xdr_buf *buf = &rqstp->rq_arg;
const unsigned char *p = buf->head[0].iov_base;
size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
RC_CSUMLEN);
size_t len = min(buf->head[0].iov_len, csum_len);
if (remaining > RC_CSUMLEN)
remaining = RC_CSUMLEN;
if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
return csum;
/* rq_arg.head first */
csum = csum_partial(p, len, 0);
csum_len -= len;
if (subbuf.head[0].iov_len) {
len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
csum = csum_partial(subbuf.head[0].iov_base, len, csum);
remaining -= len;
}
/* Continue into page array */
idx = buf->page_base / PAGE_SIZE;
base = buf->page_base & ~PAGE_MASK;
while (csum_len) {
p = page_address(buf->pages[idx]) + base;
len = min_t(size_t, PAGE_SIZE - base, csum_len);
idx = subbuf.page_base / PAGE_SIZE;
base = subbuf.page_base & ~PAGE_MASK;
while (remaining) {
p = page_address(subbuf.pages[idx]) + base;
len = min_t(unsigned int, PAGE_SIZE - base, remaining);
csum = csum_partial(p, len, csum);
csum_len -= len;
remaining -= len;
base = 0;
++idx;
}
@ -408,6 +428,8 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
/**
* nfsd_cache_lookup - Find an entry in the duplicate reply cache
* @rqstp: Incoming Call to find
* @start: starting byte in @rqstp->rq_arg of the NFS Call header
* @len: size of the NFS Call header, in bytes
*
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
@ -420,7 +442,8 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
* %RC_REPLY: Reply from cache
* %RC_DROPIT: Do not process the request further
*/
int nfsd_cache_lookup(struct svc_rqst *rqstp)
int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
unsigned int len)
{
struct nfsd_net *nn;
struct svc_cacherep *rp, *found;
@ -435,7 +458,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
goto out;
}
csum = nfsd_cache_csum(rqstp);
csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
/*
* Since the common case is a cache miss followed by an insert,

View File

@ -1027,6 +1027,8 @@ nfsd(void *vrqstp)
int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
const struct svc_procedure *proc = rqstp->rq_procinfo;
unsigned int start, len;
__be32 *nfs_reply;
/*
* Give the xdr decoder a chance to change this if it wants
@ -1035,10 +1037,18 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
rqstp->rq_cachetype = proc->pc_cachetype;
svcxdr_init_decode(rqstp);
/*
* ->pc_decode advances the argument stream past the NFS
* Call header, so grab the header's starting location and
* size now for the call to nfsd_cache_lookup().
*/
start = xdr_stream_pos(&rqstp->rq_arg_stream);
len = xdr_stream_remaining(&rqstp->rq_arg_stream);
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
switch (nfsd_cache_lookup(rqstp)) {
switch (nfsd_cache_lookup(rqstp, start, len)) {
case RC_DOIT:
break;
case RC_REPLY:
@ -1053,6 +1063,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*/
svcxdr_init_encode(rqstp);
nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
*statp = proc->pc_func(rqstp);
if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
goto out_update_drop;
@ -1060,7 +1071,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
nfsd_cache_update(rqstp, rqstp->rq_cachetype, nfs_reply);
out_cached_reply:
return 1;

View File

@ -220,6 +220,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct cifs_server_iface *iface;
size_t iface_weight = 0, iface_min_speed = 0;
struct cifs_server_iface *last_iface = NULL;
int c, i, j;
seq_puts(m,
@ -457,13 +459,29 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
spin_lock(&ses->iface_lock);
if (ses->iface_count)
seq_printf(m, "\n\n\tServer interfaces: %zu",
ses->iface_count);
seq_printf(m, "\n\n\tServer interfaces: %zu"
"\tLast updated: %lu seconds ago",
ses->iface_count,
(jiffies - ses->iface_last_update) / HZ);
last_iface = list_last_entry(&ses->iface_list,
struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
j = 0;
list_for_each_entry(iface, &ses->iface_list,
iface_head) {
seq_printf(m, "\n\t%d)", ++j);
cifs_dump_iface(m, iface);
iface_weight = iface->speed / iface_min_speed;
seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
"\n\t\tAllocated channels: %u\n",
iface->weight_fulfilled,
iface_weight,
iface->num_channels);
if (is_ses_using_iface(ses, iface))
seq_puts(m, "\t\t[CONNECTED]\n");
}

View File

@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
__u64 cifs_posix_caps;
} __packed;
struct smb_mnt_tcon_info {
__u32 tid;
__u64 session_id;
} __packed;
struct smb_snapshot_array {
__u32 number_of_snapshots;
__u32 number_of_snapshots_returned;
@ -108,7 +113,8 @@ struct smb3_notify_info {
#define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
#define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
#define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
#define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
/*
* Flags for going down operation

View File

@ -105,8 +105,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, struct dir_context *ctx);

View File

@ -788,6 +788,7 @@ static inline unsigned int
in_flight(struct TCP_Server_Info *server)
{
unsigned int num;
spin_lock(&server->req_lock);
num = server->in_flight;
spin_unlock(&server->req_lock);
@ -798,6 +799,7 @@ static inline bool
has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
{
int num;
spin_lock(&server->req_lock);
num = *credits;
spin_unlock(&server->req_lock);
@ -954,6 +956,8 @@ struct cifs_server_iface {
struct list_head iface_head;
struct kref refcount;
size_t speed;
size_t weight_fulfilled;
unsigned int num_channels;
unsigned int rdma_capable : 1;
unsigned int rss_capable : 1;
unsigned int is_active : 1; /* unset if non existent */
@ -991,7 +995,7 @@ struct cifs_ses {
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */
unsigned overrideSecFlg; /* if non-zero override global sec flags */
unsigned int overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
@ -1347,7 +1351,7 @@ struct cifsFileInfo {
__u32 pid; /* process id who opened file */
struct cifs_fid fid; /* file id from remote */
struct list_head rlist; /* reconnect list */
/* BB add lock scope info here if needed */ ;
/* BB add lock scope info here if needed */
/* lock scope id (0 if none) */
struct dentry *dentry;
struct tcon_link *tlink;
@ -1735,6 +1739,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
int number_of_items)
{
int i;
if ((number_of_items == 0) || (param == NULL))
return;
for (i = 0; i < number_of_items; i++) {

View File

@ -2070,6 +2070,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
}
}
/* we now account for primary channel in iface->refcount */
if (ses->chans[0].iface) {
kref_put(&ses->chans[0].iface->refcount, release_iface);
ses->chans[0].server = NULL;
}
sesInfoFree(ses);
cifs_put_tcp_session(server, 0);
}

View File

@ -117,6 +117,20 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
return rc;
}
static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
{
int rc = 0;
struct smb_mnt_tcon_info tcon_inf;
tcon_inf.tid = tcon->tid;
tcon_inf.session_id = tcon->ses->Suid;
if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
rc = -EFAULT;
return rc;
}
static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
void __user *arg)
{
@ -410,6 +424,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
tcon = tlink_tcon(pSMBFile->tlink);
rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
break;
case CIFS_IOC_GET_TCON_INFO:
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
break;
}
tcon = tlink_tcon(tlink);
rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
cifs_put_tlink(tlink);
break;
case CIFS_ENUMERATE_SNAPSHOTS:
if (pSMBFile == NULL)
break;

View File

@ -164,7 +164,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
int left;
int rc = 0;
int tries = 0;
size_t iface_weight = 0, iface_min_speed = 0;
struct cifs_server_iface *iface = NULL, *niface = NULL;
struct cifs_server_iface *last_iface = NULL;
spin_lock(&ses->chan_lock);
@ -192,21 +194,11 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
}
spin_unlock(&ses->chan_lock);
/*
* Keep connecting to same, fastest, iface for all channels as
* long as its RSS. Try next fastest one if not RSS or channel
* creation fails.
*/
spin_lock(&ses->iface_lock);
iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
spin_unlock(&ses->iface_lock);
while (left > 0) {
tries++;
if (tries > 3*ses->chan_max) {
cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
left);
break;
}
@ -214,17 +206,35 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
cifs_dbg(VFS, "server %s does not advertise interfaces\n",
ses->server->hostname);
break;
}
if (!iface)
iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
iface_head) {
/* do not mix rdma and non-rdma interfaces */
if (iface->rdma_capable != ses->server->rdma)
continue;
/* skip ifaces that are unusable */
if (!iface->is_active ||
(is_ses_using_iface(ses, iface) &&
!iface->rss_capable)) {
!iface->rss_capable))
continue;
/* check if we already allocated enough channels */
iface_weight = iface->speed / iface_min_speed;
if (iface->weight_fulfilled >= iface_weight)
continue;
}
/* take ref before unlock */
kref_get(&iface->refcount);
@ -241,10 +251,21 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
continue;
}
cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
iface->num_channels++;
iface->weight_fulfilled++;
cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
&iface->sockaddr);
break;
}
/* reached end of list. reset weight_fulfilled and start over */
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
list_for_each_entry(iface, &ses->iface_list, iface_head)
iface->weight_fulfilled = 0;
spin_unlock(&ses->iface_lock);
iface = NULL;
continue;
}
spin_unlock(&ses->iface_lock);
left--;
@ -263,8 +284,11 @@ int
cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
{
unsigned int chan_index;
size_t iface_weight = 0, iface_min_speed = 0;
struct cifs_server_iface *iface = NULL;
struct cifs_server_iface *old_iface = NULL;
struct cifs_server_iface *last_iface = NULL;
struct sockaddr_storage ss;
int rc = 0;
spin_lock(&ses->chan_lock);
@ -283,14 +307,49 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
}
spin_unlock(&ses->chan_lock);
spin_lock(&server->srv_lock);
ss = server->dstaddr;
spin_unlock(&server->srv_lock);
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
return 0;
}
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
/* then look for a new one */
list_for_each_entry(iface, &ses->iface_list, iface_head) {
if (!chan_index) {
/* if we're trying to get the updated iface for primary channel */
if (!cifs_match_ipaddr((struct sockaddr *) &ss,
(struct sockaddr *) &iface->sockaddr))
continue;
kref_get(&iface->refcount);
break;
}
/* do not mix rdma and non-rdma interfaces */
if (iface->rdma_capable != server->rdma)
continue;
if (!iface->is_active ||
(is_ses_using_iface(ses, iface) &&
!iface->rss_capable)) {
continue;
}
/* check if we already allocated enough channels */
iface_weight = iface->speed / iface_min_speed;
if (iface->weight_fulfilled >= iface_weight)
continue;
kref_get(&iface->refcount);
break;
}
@ -301,16 +360,41 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
cifs_dbg(FYI, "unable to find a suitable iface\n");
}
if (!chan_index && !iface) {
cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
&ss);
spin_unlock(&ses->iface_lock);
return 0;
}
/* now drop the ref to the current iface */
if (old_iface && iface) {
cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
&old_iface->sockaddr,
&iface->sockaddr);
old_iface->num_channels--;
if (old_iface->weight_fulfilled)
old_iface->weight_fulfilled--;
iface->num_channels++;
iface->weight_fulfilled++;
kref_put(&old_iface->refcount, release_iface);
} else if (old_iface) {
cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
&old_iface->sockaddr);
old_iface->num_channels--;
if (old_iface->weight_fulfilled)
old_iface->weight_fulfilled--;
kref_put(&old_iface->refcount, release_iface);
} else if (!chan_index) {
/* special case: update interface for primary channel */
cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
&iface->sockaddr);
iface->num_channels++;
iface->weight_fulfilled++;
} else {
WARN_ON(!iface);
cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);

View File

@ -752,6 +752,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
unsigned int ret_data_len = 0;
struct network_interface_info_ioctl_rsp *out_buf = NULL;
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *pserver;
/* do not query too frequently */
if (ses->iface_last_update &&
@ -776,6 +777,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
if (rc)
goto out;
/* check if iface is still active */
pserver = ses->chans[0].server;
if (pserver && !cifs_chan_is_iface_active(ses, pserver))
cifs_chan_update_iface(ses, pserver);
out:
kfree(out_buf);
return rc;

View File

@ -656,6 +656,7 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
struct kref ref;
unsigned int id; /* system unique id */
@ -663,6 +664,8 @@ struct hid_device { /* device report descriptor */
ANDROID_KABI_RESERVE(2);
};
void hiddev_free(struct kref *ref);
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)

View File

@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
lnk->flags = READ_ONCE(sqe->hardlink_flags);
lnk->oldpath = getname(oldf);
lnk->oldpath = getname_uflags(oldf, lnk->flags);
if (IS_ERR(lnk->oldpath))
return PTR_ERR(lnk->oldpath);

View File

@ -1351,7 +1351,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
*/
const struct bio_vec *bvec = imu->bvec;
if (offset <= bvec->bv_len) {
if (offset < bvec->bv_len) {
iov_iter_advance(iter, offset);
} else {
unsigned long seg_skip;

View File

@ -3453,7 +3453,8 @@ static int alloc_chain_hlocks(int req)
size = chain_block_size(curr);
if (likely(size >= req)) {
del_chain_block(0, size, chain_block_next(curr));
add_chain_block(curr + req, size - req);
if (size > req)
add_chain_block(curr + req, size - req);
return curr;
}
}

View File

@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
goto reject_redirect;
}
n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
if (!n)
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
if (!IS_ERR(n)) {

View File

@ -585,8 +585,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
struct smc_llc_qentry *qentry;
int rc;
/* receive CONFIRM LINK request from server over RoCE fabric */
qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
/* Receive CONFIRM LINK request from server over RoCE fabric.
* Increasing the client's timeout by twice as much as the server's
* timeout by default can temporarily avoid decline messages of
* both sides crossing or colliding
*/
qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
SMC_LLC_CONFIRM_LINK);
if (!qentry) {
struct smc_clc_msg_decline dclc;