This is the 5.4.58 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl8ynngACgkQONu9yGCS
 aT5cPw/9GiiNZCLyjB3jVyalmN9uDYKQ+eS8H97GG7IBVYyg9whOKIrOCZHcLINH
 DV5s/qnhRWJPzmQ47410ySuPb6QMvkhWTK7i5Xf+K7BOvoz/snfFGcmdu1CA4KxR
 CuuH449Y4l3sH+5fPv7+EToovBqA2cfeiz/i5d5Di/N4yODWBCbHHAsZIt0oGvuK
 sI1aI1K//R7vKZQvQo85tvENbWJWQwDN7eYVQj3aSbvnq8JqVpwTKTBpKfshzgzB
 RmMSOVpQoQYNivW9oleG0NTQeHqj7alG8anFs3Drgu1hs5dzQhqAFlKddhtRS69j
 mtrSJuf0GgJEYA6n+PfacoM4l7kHUZcQH9+bglbCXTA3nH24DLT7h0Lybm+ETPXc
 ZAYo7cHqrS1BWv8VHZggSbqPr6YNKvgeflgS394wAgNEFcaJmaPfS6+elmc0qf2t
 VPSas7QQruRS1Bqwb6CF2tHsl4N+VdvxX1a2JjHHcF5N3z5aqDm1qaVVQHxAg2rE
 gXepFNGkEIBhWRxPz+5quxN1XBcUDiQYrzdoaKHxwY+OcdCf635P5Ob+1WK3w1oa
 qhl/2scs44D67wZSgXHo5N4vMi9HhPOSV77jklgFcg75IzM9YWwYnJADQRHxo7nT
 25XEgUlMfxm0ngw+CZWR0ssaP6rB9V1rRLDqb1W5HbqA7lp2YE8=
 =Fe6H
 -----END PGP SIGNATURE-----

Merge 5.4.58 into android11-5.4

Changes in 5.4.58
	USB: serial: qcserial: add EM7305 QDL product ID
	perf/core: Fix endless multiplex timer
	USB: iowarrior: fix up report size handling for some devices
	usb: xhci: define IDs for various ASMedia host controllers
	usb: xhci: Fix ASMedia ASM1142 DMA addressing
	io_uring: prevent re-read of sqe->opcode
	io_uring: Fix use-after-free in io_sq_wq_submit_work()
	Revert "ALSA: hda: call runtime_allow() for all hda controllers"
	ALSA: hda/realtek: Add alc269/alc662 pin-tables for Loongson-3 laptops
	ALSA: hda/ca0132 - Add new quirk ID for Recon3D.
	ALSA: hda/ca0132 - Fix ZxR Headphone gain control get value.
	ALSA: hda/ca0132 - Fix AE-5 microphone selection commands.
	ALSA: seq: oss: Serialize ioctls
	staging: android: ashmem: Fix lockdep warning for write operation
	staging: rtl8712: handle firmware load failure
	Staging: rtl8188eu: rtw_mlme: Fix uninitialized variable authmode
	Bluetooth: Fix slab-out-of-bounds read in hci_extended_inquiry_result_evt()
	Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_evt()
	Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_with_rssi_evt()
	omapfb: dss: Fix max fclk divider for omap36xx
	binder: Prevent context manager from incrementing ref 0
	Smack: fix use-after-free in smk_write_relabel_self()
	scripts: add dummy report mode to add_namespace.cocci
	vgacon: Fix for missing check in scrollback handling
	mtd: properly check all write ioctls for permissions
	leds: wm831x-status: fix use-after-free on unbind
	leds: lm36274: fix use-after-free on unbind
	leds: da903x: fix use-after-free on unbind
	leds: lm3533: fix use-after-free on unbind
	leds: 88pm860x: fix use-after-free on unbind
	net/9p: validate fds in p9_fd_open
	drm/nouveau/fbcon: fix module unload when fbcon init has failed for some reason
	drm/nouveau/fbcon: zero-initialise the mode_cmd2 structure
	nvme-pci: prevent SK hynix PC400 from using Write Zeroes command
	drm/drm_fb_helper: fix fbdev with sparc64
	i2c: slave: improve sanity check when registering
	i2c: slave: add sanity check when unregistering
	usb: hso: check for return value in hso_serial_common_create()
	net: ethernet: mtk_eth_soc: Always call mtk_gmac0_rgmii_adjust() for mt7623
	ALSA: hda: fix NULL pointer dereference during suspend
	firmware: Fix a reference count leak.
	cfg80211: check vendor command doit pointer before use
	igb: reinit_locked() should be called with rtnl_lock
	atm: fix atm_dev refcnt leaks in atmtcp_remove_persistent
	tools lib traceevent: Fix memory leak in process_dynamic_array_len
	Drivers: hv: vmbus: Ignore CHANNELMSG_TL_CONNECT_RESULT(23)
	xattr: break delegations in {set,remove}xattr
	Revert "powerpc/kasan: Fix shadow pages allocation failure"
	PCI: tegra: Revert tegra124 raw_violation_fixup
	ipv4: Silence suspicious RCU usage warning
	ipv6: fix memory leaks on IPV6_ADDRFORM path
	ipv6: Fix nexthop refcnt leak when creating ipv6 route info
	net: ethernet: mtk_eth_soc: fix MTU warnings
	rxrpc: Fix race between recvmsg and sendmsg on immediate call failure
	vxlan: Ensure FDB dump is performed under RCU
	net: lan78xx: replace bogus endpoint lookup
	appletalk: Fix atalk_proc_init() return path
	dpaa2-eth: Fix passing zero to 'PTR_ERR' warning
	hv_netvsc: do not use VF device if link is down
	net: gre: recompute gre csum for sctp over gre tunnels
	net: thunderx: use spin_lock_bh in nicvf_set_rx_mode_task()
	openvswitch: Prevent kernel-infoleak in ovs_ct_put_key()
	Revert "vxlan: fix tos value before xmit"
	selftests/net: relax cpu affinity requirement in msg_zerocopy test
	tcp: apply a floor of 1 for RTT samples from TCP timestamps
	ima: move APPRAISE_BOOTPARAM dependency on ARCH_POLICY to runtime
	nfsd: Fix NFSv4 READ on RDMA when using readv
	Linux 5.4.58

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I0e89e2c0faf90bdf1f6ac37f9a2c2395cacab054
This commit is contained in:
Greg Kroah-Hartman 2020-08-11 18:37:58 +02:00
commit 3a9b53bc89
82 changed files with 817 additions and 341 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 57
SUBLEVEL = 58
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -27,9 +27,11 @@
#ifdef CONFIG_KASAN
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
#else
static inline void kasan_init(void) { }
static inline void kasan_mmu_init(void) { }
#endif
#endif /* __ASSEMBLY */

View File

@ -175,6 +175,8 @@ void __init MMU_init(void)
btext_unmap();
#endif
kasan_mmu_init();
setup_kup();
/* Shortly after that, the entire linear mapping will be available */

View File

@ -129,7 +129,7 @@ static void __init kasan_remap_early_shadow_ro(void)
flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}
static void __init kasan_mmu_init(void)
void __init kasan_mmu_init(void)
{
int ret;
struct memblock_region *reg;
@ -156,8 +156,6 @@ static void __init kasan_mmu_init(void)
void __init kasan_init(void)
{
kasan_mmu_init();
kasan_remap_early_shadow_ro();
clear_page(kasan_early_shadow_page);

View File

@ -3143,6 +3143,12 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
if (WARN_ON(proc == target_proc)) {
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@ -3806,10 +3812,17 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_node *ctx_mgr_node;
mutex_lock(&context->context_mgr_node_lock);
ctx_mgr_node = context->binder_context_mgr_node;
if (ctx_mgr_node)
if (ctx_mgr_node) {
if (ctx_mgr_node->proc == proc) {
binder_user_error("%d:%d context manager tried to acquire desc 0\n",
proc->pid, thread->pid);
mutex_unlock(&context->context_mgr_node_lock);
return -EINVAL;
}
ret = binder_inc_ref_for_node(
proc, ctx_mgr_node,
strong, NULL, &rdata);
}
mutex_unlock(&context->context_mgr_node_lock);
}
if (ret)

View File

@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
if (!dev_data->persist) return 0;
if (!dev_data->persist) {
atm_dev_put(dev);
return 0;
}
dev_data->persist = 0;
if (PRIV(dev)->vcc) return 0;
if (PRIV(dev)->vcc) {
atm_dev_put(dev);
return 0;
}
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);

View File

@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
if (err)
goto err_register;
if (err) {
kobject_put(&entry->kobj);
return err;
}
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
err_register:
kfree(entry);
return err;
}

View File

@ -194,6 +194,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;

View File

@ -390,7 +390,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
memcpy(dst, src, len);
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
memcpy(dst, src, len);
else
memcpy_toio((void __iomem *)dst, src, len);
src += fb->pitches[0];
dst += fb->pitches[0];
}

View File

@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@ -592,6 +592,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
drm->fbcon = NULL;
return ret;
}

View File

@ -1354,6 +1354,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_19, 0, NULL },
{ CHANNELMSG_20, 0, NULL },
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
{ CHANNELMSG_22, 0, NULL },
{ CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
};
/*
@ -1365,25 +1367,16 @@ void vmbus_onmessage(void *context)
{
struct hv_message *msg = context;
struct vmbus_channel_message_header *hdr;
int size;
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
trace_vmbus_on_message(hdr);
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
print_hex_dump_bytes("", DUMP_PREFIX_NONE,
(unsigned char *)msg->u.payload, size);
return;
}
if (channel_message_table[hdr->msgtype].message_handler)
channel_message_table[hdr->msgtype].message_handler(hdr);
else
pr_err("Unhandled channel message type %d\n", hdr->msgtype);
/*
* vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
* out of bound and the message_handler pointer can not be NULL.
*/
channel_message_table[hdr->msgtype].message_handler(hdr);
}
/*

View File

@ -1073,6 +1073,10 @@ void vmbus_on_msg_dpc(unsigned long data)
}
entry = &channel_message_table[hdr->msgtype];
if (!entry->message_handler)
goto msg_handled;
if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx == NULL)

View File

@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
if (!client || !slave_cb) {
WARN(1, "insufficient data\n");
if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
}
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
if (IS_ERR_OR_NULL(client))
return -EINVAL;
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;

View File

@ -203,21 +203,33 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->cdev.brightness_set_blocking = pm860x_led_set;
mutex_init(&data->lock);
ret = devm_led_classdev_register(chip->dev, &data->cdev);
ret = led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
}
pm860x_led_set(&data->cdev, 0);
platform_set_drvdata(pdev, data);
return 0;
}
static int pm860x_led_remove(struct platform_device *pdev)
{
struct pm860x_led *data = platform_get_drvdata(pdev);
led_classdev_unregister(&data->cdev);
return 0;
}
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
},
.probe = pm860x_led_probe,
.remove = pm860x_led_remove,
};
module_platform_driver(pm860x_led_driver);

View File

@ -110,12 +110,23 @@ static int da903x_led_probe(struct platform_device *pdev)
led->flags = pdata->flags;
led->master = pdev->dev.parent;
ret = devm_led_classdev_register(led->master, &led->cdev);
ret = led_classdev_register(led->master, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", id);
return ret;
}
platform_set_drvdata(pdev, led);
return 0;
}
static int da903x_led_remove(struct platform_device *pdev)
{
struct da903x_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
return 0;
}
@ -124,6 +135,7 @@ static struct platform_driver da903x_led_driver = {
.name = "da903x-led",
},
.probe = da903x_led_probe,
.remove = da903x_led_remove,
};
module_platform_driver(da903x_led_driver);

View File

@ -694,7 +694,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
ret = led_classdev_register(pdev->dev.parent, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
return ret;
@ -704,13 +704,18 @@ static int lm3533_led_probe(struct platform_device *pdev)
ret = lm3533_led_setup(led, pdata);
if (ret)
return ret;
goto err_deregister;
ret = lm3533_ctrlbank_enable(&led->cb);
if (ret)
return ret;
goto err_deregister;
return 0;
err_deregister:
led_classdev_unregister(&led->cdev);
return ret;
}
static int lm3533_led_remove(struct platform_device *pdev)
@ -720,6 +725,7 @@ static int lm3533_led_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
lm3533_ctrlbank_disable(&led->cb);
led_classdev_unregister(&led->cdev);
return 0;
}

View File

@ -133,7 +133,7 @@ static int lm36274_probe(struct platform_device *pdev)
lm36274_data->pdev = pdev;
lm36274_data->dev = lmu->dev;
lm36274_data->regmap = lmu->regmap;
dev_set_drvdata(&pdev->dev, lm36274_data);
platform_set_drvdata(pdev, lm36274_data);
ret = lm36274_parse_dt(lm36274_data);
if (ret) {
@ -147,8 +147,16 @@ static int lm36274_probe(struct platform_device *pdev)
return ret;
}
return devm_led_classdev_register(lm36274_data->dev,
&lm36274_data->led_dev);
return led_classdev_register(lm36274_data->dev, &lm36274_data->led_dev);
}
static int lm36274_remove(struct platform_device *pdev)
{
struct lm36274 *lm36274_data = platform_get_drvdata(pdev);
led_classdev_unregister(&lm36274_data->led_dev);
return 0;
}
static const struct of_device_id of_lm36274_leds_match[] = {
@ -159,6 +167,7 @@ MODULE_DEVICE_TABLE(of, of_lm36274_leds_match);
static struct platform_driver lm36274_driver = {
.probe = lm36274_probe,
.remove = lm36274_remove,
.driver = {
.name = "lm36274-leds",
},

View File

@ -269,12 +269,23 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->cdev.blink_set = wm831x_status_blink_set;
drvdata->cdev.groups = wm831x_status_groups;
ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, drvdata);
return 0;
}
static int wm831x_status_remove(struct platform_device *pdev)
{
struct wm831x_status *drvdata = platform_get_drvdata(pdev);
led_classdev_unregister(&drvdata->cdev);
return 0;
}
@ -283,6 +294,7 @@ static struct platform_driver wm831x_status_driver = {
.name = "wm831x-status",
},
.probe = wm831x_status_probe,
.remove = wm831x_status_remove,
};
module_platform_driver(wm831x_status_driver);

View File

@ -354,9 +354,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t retlen;
int ret = 0;
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
if (length > 4096)
return -EINVAL;
@ -641,6 +638,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
pr_debug("MTD_ioctl\n");
/*
* Check the file mode to require "dangerous" commands to have write
* permissions.
*/
switch (cmd) {
/* "safe" commands */
case MEMGETREGIONCOUNT:
case MEMGETREGIONINFO:
case MEMGETINFO:
case MEMREADOOB:
case MEMREADOOB64:
case MEMLOCK:
case MEMUNLOCK:
case MEMISLOCKED:
case MEMGETOOBSEL:
case MEMGETBADBLOCK:
case MEMSETBADBLOCK:
case OTPSELECT:
case OTPGETREGIONCOUNT:
case OTPGETREGIONINFO:
case OTPLOCK:
case ECCGETLAYOUT:
case ECCGETSTATS:
case MTDFILEMODE:
case BLKPG:
case BLKRRPART:
break;
/* "dangerous" commands */
case MEMERASE:
case MEMERASE64:
case MEMWRITEOOB:
case MEMWRITEOOB64:
case MEMWRITE:
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
break;
default:
return -ENOTTY;
}
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
@ -688,9 +727,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct erase_info *erase;
if(!(file->f_mode & FMODE_WRITE))
return -EPERM;
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
@ -983,9 +1019,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
default:
ret = -ENOTTY;
}
return ret;
@ -1029,6 +1062,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
struct mtd_oob_buf32 buf;
struct mtd_oob_buf32 __user *buf_user = argp;
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EPERM;
break;
}
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else

View File

@ -2047,11 +2047,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
spin_lock(&nic->rx_mode_wq_lock);
spin_lock_bh(&nic->rx_mode_wq_lock);
mode = vf_work->mode;
mc = vf_work->mc;
vf_work->mc = NULL;
spin_unlock(&nic->rx_mode_wq_lock);
spin_unlock_bh(&nic->rx_mode_wq_lock);
__nicvf_set_rx_mode_task(mode, mc, nic);
}

View File

@ -2090,7 +2090,7 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
free:
fsl_mc_object_free(dpcon);
return NULL;
return ERR_PTR(err);
}
static void free_dpcon(struct dpaa2_eth_priv *priv,
@ -2114,8 +2114,8 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return NULL;
channel->dpcon = setup_dpcon(priv);
if (IS_ERR_OR_NULL(channel->dpcon)) {
err = PTR_ERR_OR_ZERO(channel->dpcon);
if (IS_ERR(channel->dpcon)) {
err = PTR_ERR(channel->dpcon);
goto err_setup;
}

View File

@ -6194,9 +6194,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IGB_DOWN, &adapter->state) ||
test_bit(__IGB_RESETTING, &adapter->state)) {
rtnl_unlock();
return;
}
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
rtnl_unlock();
}
/**

View File

@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface, int speed)
{
u32 val;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
val = 500000000;
ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
if (state->interface !=
PHY_INTERFACE_MODE_TRGMII)
mtk_gmac0_rgmii_adjust(mac->hw,
state->speed);
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@ -2869,6 +2878,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
return 0;
free_netdev:

View File

@ -531,12 +531,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
/* if VF is present and up then redirect packets
* already called with rcu_read_lock_bh
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
* If netpoll is in uses, then VF can not be used either.
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
!netpoll_tx_running(net))
netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis

View File

@ -2260,12 +2260,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
minor = get_free_serial_index();
if (minor < 0)
goto exit;
goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
if (IS_ERR(serial->parent->dev))
goto exit2;
/* fill in specific data for later use */
serial->minor = minor;
@ -2310,6 +2312,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
exit2:
hso_serial_common_free(serial);
return -1;
}

View File

@ -377,10 +377,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
struct usb_host_endpoint *ep_blkin;
struct usb_host_endpoint *ep_blkout;
struct usb_host_endpoint *ep_intr;
int msg_enable;
struct urb *urb_intr;
@ -2868,78 +2864,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
static int
lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt = NULL;
struct usb_host_endpoint *in = NULL, *out = NULL;
struct usb_host_endpoint *status = NULL;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = NULL;
out = NULL;
status = NULL;
alt = intf->altsetting + tmp;
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
int intr = 0;
e = alt->endpoint + ep;
switch (e->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
if (!usb_endpoint_dir_in(&e->desc))
continue;
intr = 1;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_BULK:
break;
default:
continue;
}
if (usb_endpoint_dir_in(&e->desc)) {
if (!intr && !in)
in = e;
else if (intr && !status)
status = e;
} else {
if (!out)
out = e;
}
}
if (in && out)
break;
}
if (!alt || !in || !out)
return -EINVAL;
dev->pipe_in = usb_rcvbulkpipe(dev->udev,
in->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
dev->pipe_out = usb_sndbulkpipe(dev->udev,
out->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
dev->ep_intr = status;
return 0;
}
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
ret = lan78xx_get_endpoints(dev, intf);
if (ret) {
netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
ret);
return ret;
}
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@ -3708,6 +3638,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@ -3756,6 +3687,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
ret = -ENODEV;
goto out2;
}
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
ret = -ENODEV;
goto out2;
}
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
ret = -ENODEV;
goto out2;
}
ep_intr = &intf->cur_altsetting->endpoint[2];
if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
ret = -ENODEV;
goto out2;
}
dev->pipe_intr = usb_rcvintpipe(dev->udev,
usb_endpoint_num(&ep_intr->desc));
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@ -3767,23 +3726,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
ret = -ENODEV;
goto out3;
}
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
dev->pipe_intr = usb_rcvintpipe(dev->udev,
dev->ep_intr->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
period = dev->ep_intr->desc.bInterval;
period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {

View File

@ -1225,6 +1225,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
@ -1237,12 +1238,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
if (err < 0)
if (err < 0) {
rcu_read_unlock();
goto out;
}
skip:
*idx += 1;
}
}
rcu_read_unlock();
}
out:
return err;
@ -2546,7 +2550,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@ -2586,7 +2590,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),

View File

@ -3140,6 +3140,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },

View File

@ -181,13 +181,6 @@
#define AFI_PEXBIAS_CTRL_0 0x168
#define RP_PRIV_XP_DL 0x00000494
#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
#define RP_RX_HDR_LIMIT 0x00000e00
#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
#define RP_ECTL_2_R1 0x00000e84
#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
@ -323,7 +316,6 @@ struct tegra_pcie_soc {
bool program_uphy;
bool update_clamp_threshold;
bool program_deskew_time;
bool raw_violation_fixup;
bool update_fc_timer;
bool has_cache_bars;
struct {
@ -669,23 +661,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
writel(value, port->base + RP_VEND_CTL0);
}
/* Fixup for read after write violation. */
if (soc->raw_violation_fixup) {
value = readl(port->base + RP_RX_HDR_LIMIT);
value &= ~RP_RX_HDR_LIMIT_PW_MASK;
value |= RP_RX_HDR_LIMIT_PW;
writel(value, port->base + RP_RX_HDR_LIMIT);
value = readl(port->base + RP_PRIV_XP_DL);
value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
writel(value, port->base + RP_PRIV_XP_DL);
value = readl(port->base + RP_VEND_XP);
value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
value |= soc->update_fc_threshold;
writel(value, port->base + RP_VEND_XP);
}
if (soc->update_fc_timer) {
value = readl(port->base + RP_VEND_XP);
value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
@ -2511,7 +2486,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
.raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = true,
.ectl.enable = false,
@ -2541,7 +2515,6 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.program_uphy = true,
.update_clamp_threshold = false,
.program_deskew_time = false,
.raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@ -2554,8 +2527,6 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x44ac44ac,
/* FC threshold is bit[25:18] */
.update_fc_threshold = 0x03fc0000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@ -2565,7 +2536,6 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = false,
.raw_violation_fixup = true,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,
@ -2589,7 +2559,6 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.program_uphy = true,
.update_clamp_threshold = true,
.program_deskew_time = true,
.raw_violation_fixup = false,
.update_fc_timer = true,
.has_cache_bars = false,
.ectl = {
@ -2631,7 +2600,6 @@ static const struct tegra_pcie_soc tegra186_pcie = {
.program_uphy = false,
.update_clamp_threshold = false,
.program_deskew_time = false,
.raw_violation_fixup = false,
.update_fc_timer = false,
.has_cache_bars = false,
.ectl.enable = false,

View File

@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
/*
* A separate lockdep class for the backing shmem inodes to resolve the lockdep
* warning about the race between kswapd taking fs_reclaim before inode_lock
* and write syscall taking inode_lock and then fs_reclaim.
* Note that such race is impossible because ashmem does not support write
* syscalls operating on the backing shmem.
*/
static struct lock_class_key backing_shmem_inode_class;
static inline unsigned long range_size(struct ashmem_range *range)
{
return range->pgend - range->pgstart + 1;
@ -396,6 +405,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
struct inode *inode;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
@ -407,6 +417,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
goto out;
}
vmfile->f_mode |= FMODE_LSEEK;
inode = file_inode(vmfile);
lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
asma->file = vmfile;
/*
* override mmap operation of the vmfile so that it can't be

View File

@ -1729,9 +1729,11 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
if ((ndisauthmode == Ndis802_11AuthModeWPA) ||
(ndisauthmode == Ndis802_11AuthModeWPAPSK))
authmode = _WPA_IE_ID_;
if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
else if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
(ndisauthmode == Ndis802_11AuthModeWPA2PSK))
authmode = _WPA2_IE_ID_;
else
authmode = 0x0;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);

View File

@ -33,7 +33,6 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
{
struct _adapter *adapter = context;
complete(&adapter->rtl8712_fw_ready);
if (!firmware) {
struct usb_device *udev = adapter->dvobjpriv.pusbdev;
struct usb_interface *usb_intf = adapter->pusb_intf;
@ -41,11 +40,13 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
dev_err(&udev->dev, "r8712u: Firmware request failed\n");
usb_put_dev(udev);
usb_set_intfdata(usb_intf, NULL);
complete(&adapter->rtl8712_fw_ready);
return;
}
adapter->fw = firmware;
/* firmware available - start netdev */
register_netdev(adapter->pnetdev);
complete(&adapter->rtl8712_fw_ready);
}
static const char firmware_file[] = "rtlwifi/rtl8712u.bin";

View File

@ -595,13 +595,17 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
if (pnetdev) {
struct _adapter *padapter = netdev_priv(pnetdev);
usb_set_intfdata(pusb_intf, NULL);
release_firmware(padapter->fw);
/* never exit with a firmware callback pending */
wait_for_completion(&padapter->rtl8712_fw_ready);
pnetdev = usb_get_intfdata(pusb_intf);
usb_set_intfdata(pusb_intf, NULL);
if (!pnetdev)
goto firmware_load_fail;
release_firmware(padapter->fw);
if (drvpriv.drv_registered)
padapter->surprise_removed = true;
unregister_netdev(pnetdev); /* will call netdev_close() */
if (pnetdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(pnetdev); /* will call netdev_close() */
flush_scheduled_work();
udelay(1);
/* Stop driver mlme relation timer */
@ -614,6 +618,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
*/
usb_put_dev(udev);
}
firmware_load_fail:
/* If we didn't unplug usb dongle and remove/insert module, driver
* fails on sitesurvey for the first time when device is up.
* Reset usb port for sitesurvey fail issue.

View File

@ -56,7 +56,10 @@
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
static const char hcd_name[] = "xhci_hcd";
@ -244,13 +247,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1142)
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x2142)
(pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI))
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&

View File

@ -2,8 +2,9 @@
/*
* Native support for the I/O-Warrior USB devices
*
* Copyright (c) 2003-2005 Code Mercenaries GmbH
* written by Christian Lucht <lucht@codemercs.com>
* Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH
* written by Christian Lucht <lucht@codemercs.com> and
* Christoph Jung <jung@codemercs.com>
*
* based on
@ -802,14 +803,28 @@ static int iowarrior_probe(struct usb_interface *interface,
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
/* IOWarrior56 has wMaxPacketSize different from report size */
dev->report_size = 7;
/*
* Some devices need the report size to be different than the
* endpoint size.
*/
if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
switch (dev->product_id) {
case USB_DEVICE_ID_CODEMERCS_IOW56:
case USB_DEVICE_ID_CODEMERCS_IOW56AM:
dev->report_size = 7;
break;
case USB_DEVICE_ID_CODEMERCS_IOW28:
case USB_DEVICE_ID_CODEMERCS_IOW28L:
dev->report_size = 4;
break;
case USB_DEVICE_ID_CODEMERCS_IOW100:
dev->report_size = 13;
break;
}
}
/* create the urb and buffer for reading */
dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);

View File

@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */
{DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */

View File

@ -251,6 +251,10 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
p = (void *) (c->vc_origin + t * c->vc_size_row);
while (count--) {
if ((vgacon_scrollback_cur->tail + c->vc_size_row) >
vgacon_scrollback_cur->size)
vgacon_scrollback_cur->tail = 0;
scr_memcpyw(vgacon_scrollback_cur->data +
vgacon_scrollback_cur->tail,
p, c->vc_size_row);

View File

@ -833,7 +833,7 @@ static const struct dss_features omap34xx_dss_feats = {
};
static const struct dss_features omap3630_dss_feats = {
.fck_div_max = 32,
.fck_div_max = 31,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,

View File

@ -279,6 +279,7 @@ struct sqe_submit {
bool has_user;
bool needs_lock;
bool needs_fixed_file;
u8 opcode;
};
/*
@ -505,7 +506,7 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx,
int rw = 0;
if (req->submit.sqe) {
switch (req->submit.sqe->opcode) {
switch (req->submit.opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
rw = !(req->rw.ki_flags & IOCB_DIRECT);
@ -1254,23 +1255,15 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
}
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
const struct sqe_submit *s, struct iovec **iovec,
struct io_kiocb *req, struct iovec **iovec,
struct iov_iter *iter)
{
const struct io_uring_sqe *sqe = s->sqe;
const struct io_uring_sqe *sqe = req->submit.sqe;
void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
size_t sqe_len = READ_ONCE(sqe->len);
u8 opcode;
/*
* We're reading ->opcode for the second time, but the first read
* doesn't care whether it's _FIXED or not, so it doesn't matter
* whether ->opcode changes concurrently. The first read does care
* about whether it is a READ or a WRITE, so we don't trust this read
* for that purpose and instead let the caller pass in the read/write
* flag.
*/
opcode = READ_ONCE(sqe->opcode);
opcode = req->submit.opcode;
if (opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED) {
ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
@ -1278,7 +1271,7 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
return ret;
}
if (!s->has_user)
if (!req->submit.has_user)
return -EFAULT;
#ifdef CONFIG_COMPAT
@ -1425,7 +1418,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter);
if (ret < 0)
return ret;
@ -1490,7 +1483,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter);
if (ret < 0)
return ret;
@ -2109,15 +2102,14 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct sqe_submit *s, bool force_nonblock)
{
int ret, opcode;
int ret;
req->user_data = READ_ONCE(s->sqe->user_data);
if (unlikely(s->index >= ctx->sq_entries))
return -EINVAL;
opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) {
switch (req->submit.opcode) {
case IORING_OP_NOP:
ret = io_nop(req, req->user_data);
break;
@ -2181,10 +2173,10 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return 0;
}
static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
const struct io_uring_sqe *sqe)
static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
switch (sqe->opcode) {
switch (req->submit.opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
return &ctx->pending_async[READ];
@ -2196,12 +2188,10 @@ static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
}
}
static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
static inline bool io_req_needs_user(struct io_kiocb *req)
{
u8 opcode = READ_ONCE(sqe->opcode);
return !(opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED);
return !(req->submit.opcode == IORING_OP_READ_FIXED ||
req->submit.opcode == IORING_OP_WRITE_FIXED);
}
static void io_sq_wq_submit_work(struct work_struct *work)
@ -2217,7 +2207,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
int ret;
old_cred = override_creds(ctx->creds);
async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
async_list = io_async_list_from_req(ctx, req);
allow_kernel_signal(SIGINT);
restart:
@ -2239,9 +2229,10 @@ static void io_sq_wq_submit_work(struct work_struct *work)
}
ret = 0;
if (io_sqe_needs_user(sqe) && !cur_mm) {
if (io_req_needs_user(req) && !cur_mm) {
if (!mmget_not_zero(ctx->sqo_mm)) {
ret = -EFAULT;
goto end_req;
} else {
cur_mm = ctx->sqo_mm;
use_mm(cur_mm);
@ -2387,11 +2378,9 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
return ret;
}
static bool io_op_needs_file(const struct io_uring_sqe *sqe)
static bool io_op_needs_file(struct io_kiocb *req)
{
int op = READ_ONCE(sqe->opcode);
switch (op) {
switch (req->submit.opcode) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
@ -2419,7 +2408,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
*/
req->sequence = s->sequence;
if (!io_op_needs_file(s->sqe))
if (!io_op_needs_file(req))
return 0;
if (flags & IOSQE_FIXED_FILE) {
@ -2460,7 +2449,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
s->sqe = sqe_copy;
memcpy(&req->submit, s, sizeof(*s));
list = io_async_list_from_sqe(ctx, s->sqe);
list = io_async_list_from_req(ctx, req);
if (!io_add_to_prev_work(list, req)) {
if (list)
atomic_inc(&list->cnt);
@ -2582,7 +2571,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
req->user_data = s->sqe->user_data;
#if defined(CONFIG_NET)
switch (READ_ONCE(s->sqe->opcode)) {
switch (req->submit.opcode) {
case IORING_OP_SENDMSG:
case IORING_OP_RECVMSG:
spin_lock(&current->fs->lock);
@ -2697,6 +2686,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
if (head < ctx->sq_entries) {
s->index = head;
s->sqe = &ctx->sq_sqes[head];
s->opcode = READ_ONCE(s->sqe->opcode);
s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++;
return true;

View File

@ -3530,17 +3530,17 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
u32 zzz = 0;
int pad;
/*
* svcrdma requires every READ payload to start somewhere
* in xdr->pages.
*/
if (xdr->iov == xdr->buf->head) {
xdr->iov = NULL;
xdr->end = xdr->p;
}
len = maxcount;
v = 0;
thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
p = xdr_reserve_space(xdr, (thislen+3)&~3);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
v++;
len -= thislen;
while (len) {
thislen = min_t(long, len, PAGE_SIZE);
p = xdr_reserve_space(xdr, (thislen+3)&~3);
@ -3559,6 +3559,8 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
read->rd_length = maxcount;
if (nfserr)
return nfserr;
if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount))
return nfserr_io;
xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
tmp = htonl(eof);

View File

@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
return error;
}
/**
* __vfs_setxattr_locked: set an extended attribute while holding the inode
* lock
*
* @dentry - object to perform setxattr on
* @name - xattr name to set
* @value - value to set @name to
* @size - size of @value
* @flags - flags to pass into filesystem operations
* @delegated_inode - on return, will contain an inode pointer that
* a delegation was broken on, NULL if none.
*/
int
vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
__vfs_setxattr_locked(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags,
struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (error)
return error;
inode_lock(inode);
error = security_inode_setxattr(dentry, name, value, size, flags);
if (error)
goto out;
error = try_break_deleg(inode, delegated_inode);
if (error)
goto out;
error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
out:
return error;
}
EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
int
vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
struct inode *delegated_inode = NULL;
int error;
retry_deleg:
inode_lock(inode);
error = __vfs_setxattr_locked(dentry, name, value, size, flags,
&delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
return error;
}
EXPORT_SYMBOL_NS_GPL(vfs_setxattr, ANDROID_GKI_VFS_EXPORT_ONLY);
@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name)
}
EXPORT_SYMBOL(__vfs_removexattr);
/**
* __vfs_removexattr_locked: set an extended attribute while holding the inode
* lock
*
* @dentry - object to perform setxattr on
* @name - name of xattr to remove
* @delegated_inode - on return, will contain an inode pointer that
* a delegation was broken on, NULL if none.
*/
int
vfs_removexattr(struct dentry *dentry, const char *name)
__vfs_removexattr_locked(struct dentry *dentry, const char *name,
struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name)
if (error)
return error;
inode_lock(inode);
error = security_inode_removexattr(dentry, name);
if (error)
goto out;
error = try_break_deleg(inode, delegated_inode);
if (error)
goto out;
error = __vfs_removexattr(dentry, name);
if (!error) {
@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name)
}
out:
return error;
}
EXPORT_SYMBOL_GPL(__vfs_removexattr_locked);
int
vfs_removexattr(struct dentry *dentry, const char *name)
{
struct inode *inode = dentry->d_inode;
struct inode *delegated_inode = NULL;
int error;
retry_deleg:
inode_lock(inode);
error = __vfs_removexattr_locked(dentry, name, &delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
return error;
}
EXPORT_SYMBOL_GPL(vfs_removexattr);
/*
* Extended attribute SET operations
*/

View File

@ -865,6 +865,18 @@ struct drm_mode_config {
*/
bool prefer_shadow_fbdev;
/**
* @fbdev_use_iomem:
*
* Set to true if framebuffer reside in iomem.
* When set to true memcpy_toio() is used when copying the framebuffer in
* drm_fb_helper.drm_fb_helper_dirty_blit_real().
*
* FIXME: This should be replaced with a per-mapping is_iomem
* flag (like ttm does), and then used everywhere in fbdev code.
*/
bool fbdev_use_iomem;
/**
* @quirk_addfb_prefer_xbgr_30bpp:
*

View File

@ -423,6 +423,8 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
CHANNELMSG_22 = 22,
CHANNELMSG_TL_CONNECT_RESULT = 23,
CHANNELMSG_COUNT
};

View File

@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
int svc_encode_read_payload(struct svc_rqst *rqstp,
unsigned int offset,
unsigned int length);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
struct page **pages,
struct kvec *first, size_t total);

View File

@ -137,6 +137,8 @@ struct svc_rdma_recv_ctxt {
unsigned int rc_page_count;
unsigned int rc_hdr_count;
u32 rc_inv_rkey;
unsigned int rc_read_payload_offset;
unsigned int rc_read_payload_length;
struct page *rc_pages[RPCSVC_MAXPAGES];
};
@ -171,7 +173,9 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
struct svc_rdma_recv_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
__be32 *wr_ch, struct xdr_buf *xdr);
__be32 *wr_ch, struct xdr_buf *xdr,
unsigned int offset,
unsigned long length);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
__be32 *rp_ch, bool writelist,
struct xdr_buf *xdr);
@ -190,6 +194,8 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt,
struct xdr_buf *xdr, __be32 *wr_lst);
extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length);
/* svc_rdma_transport.c */
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);

View File

@ -21,6 +21,8 @@ struct svc_xprt_ops {
int (*xpo_has_wspace)(struct svc_xprt *);
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
int (*xpo_read_payload)(struct svc_rqst *, unsigned int,
unsigned int);
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);

View File

@ -52,8 +52,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int __vfs_removexattr(struct dentry *, const char *);
int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
int vfs_removexattr(struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);

View File

@ -285,6 +285,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);
int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);

View File

@ -2171,6 +2171,7 @@ __perf_remove_from_context(struct perf_event *event,
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
ctx->rotate_necessary = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL;
@ -3047,12 +3048,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
/*
* If we had been multiplexing, no rotations are necessary, now no events
* are active.
*/
ctx->rotate_necessary = 0;
perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
@ -3062,6 +3057,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
group_sched_out(event, cpuctx, ctx);
/*
* Since we cleared EVENT_FLEXIBLE, also clear
* rotate_necessary, is will be reset by
* ctx_flexible_sched_in() when needed.
*/
ctx->rotate_necessary = 0;
}
perf_pmu_enable(ctx->pmu);
}
@ -3800,6 +3802,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
typeof(*event), group_node);
}
/*
* Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
* finds there are unschedulable events, it will set it again.
*/
ctx->rotate_necessary = 0;
return event;
}

View File

@ -816,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
return -ENOMEM;
ts->rd = fget(rfd);
if (!ts->rd)
goto out_free_ts;
if (!(ts->rd->f_mode & FMODE_READ))
goto out_put_rd;
ts->wr = fget(wfd);
if (!ts->rd || !ts->wr) {
if (ts->rd)
fput(ts->rd);
if (ts->wr)
fput(ts->wr);
kfree(ts);
return -EIO;
}
if (!ts->wr)
goto out_put_rd;
if (!(ts->wr->f_mode & FMODE_WRITE))
goto out_put_wr;
client->trans = ts;
client->status = Connected;
return 0;
out_put_wr:
fput(ts->wr);
out_put_rd:
fput(ts->rd);
out_free_ts:
kfree(ts);
return -EIO;
}
static int p9_socket_open(struct p9_client *client, struct socket *csocket)

View File

@ -229,6 +229,8 @@ int __init atalk_proc_init(void)
sizeof(struct aarp_iter_state), NULL))
goto out;
return 0;
out:
remove_proc_subtree("atalk", init_net.proc_net);
return -ENOMEM;

View File

@ -2444,7 +2444,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@ -4067,6 +4067,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@ -4088,6 +4091,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@ -4108,6 +4114,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
}
}
unlock:
hci_dev_unlock(hdev);
}
@ -4270,7 +4277,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))

View File

@ -1751,7 +1751,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
struct key_vector *local_l = NULL, *local_tp;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
hlist_for_each_entry(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
if (local_tb->tb_id != fa->tb_id)

View File

@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
bool need_csum, need_recompute_csum, gso_partial;
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int gre_offset, outer_hlen;
bool need_csum, gso_partial;
if (!skb->encapsulation)
goto out;
@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
skb->protocol = skb->inner_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
need_recompute_csum = skb->csum_not_inet;
skb->encap_hdr_csum = need_csum;
features &= skb->dev->hw_enc_features;
@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
}
*(pcsum + 1) = 0;
*pcsum = gso_make_checksum(skb, 0);
if (need_recompute_csum && !skb_is_gso(skb)) {
__wsum csum;
csum = skb_checksum(skb, gre_offset,
skb->len - gre_offset, 0);
*pcsum = csum_fold(csum);
} else {
*pcsum = gso_make_checksum(skb, 0);
}
} while ((skb = skb->next));
out:
return segs;

View File

@ -2944,6 +2944,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
if (!delta)
delta = 1;
seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
ca_rtt_us = seq_rtt_us;
}

View File

@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return 0;
}
void ipv6_sock_ac_close(struct sock *sk)
void __ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net_device *dev = NULL;
@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk)
struct net *net = sock_net(sk);
int prev_index;
if (!np->ipv6_ac_list)
return;
rtnl_lock();
ASSERT_RTNL();
pac = np->ipv6_ac_list;
np->ipv6_ac_list = NULL;
@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
}
void ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
if (!np->ipv6_ac_list)
return;
rtnl_lock();
__ipv6_sock_ac_close(sk);
rtnl_unlock();
}

View File

@ -205,6 +205,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
fl6_free_socklist(sk);
__ipv6_sock_mc_close(sk);
__ipv6_sock_ac_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so

View File

@ -3686,14 +3686,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->fib6_src.plen = cfg->fc_src_len;
#endif
if (nh) {
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
goto out;
}
if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
goto out;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
goto out;
}
rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh);
} else {

View File

@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
ovs_ct_update_key(skb, NULL, key, false, false);
}
#define IN6_ADDR_INITIALIZER(ADDR) \
{ (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \
(ADDR).s6_addr32[2], (ADDR).s6_addr32[3] }
int ovs_ct_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, struct sk_buff *skb)
{
@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey,
if (swkey->ct_orig_proto) {
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ct_tuple_ipv4 orig = {
output->ipv4.ct_orig.src,
output->ipv4.ct_orig.dst,
output->ct.orig_tp.src,
output->ct.orig_tp.dst,
output->ct_orig_proto,
};
struct ovs_key_ct_tuple_ipv4 orig;
memset(&orig, 0, sizeof(orig));
orig.ipv4_src = output->ipv4.ct_orig.src;
orig.ipv4_dst = output->ipv4.ct_orig.dst;
orig.src_port = output->ct.orig_tp.src;
orig.dst_port = output->ct.orig_tp.dst;
orig.ipv4_proto = output->ct_orig_proto;
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
sizeof(orig), &orig))
return -EMSGSIZE;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ct_tuple_ipv6 orig = {
IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src),
IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst),
output->ct.orig_tp.src,
output->ct.orig_tp.dst,
output->ct_orig_proto,
};
struct ovs_key_ct_tuple_ipv6 orig;
memset(&orig, 0, sizeof(orig));
memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
sizeof(orig.ipv6_src));
memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
sizeof(orig.ipv6_dst));
orig.src_port = output->ct.orig_tp.src;
orig.dst_port = output->ct.orig_tp.dst;
orig.ipv6_proto = output->ct_orig_proto;
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
sizeof(orig), &orig))
return -EMSGSIZE;

View File

@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
*/
ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
goto error_attached_to_socket;
trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
atomic_read(&call->usage), here, NULL);
@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
ret = -EEXIST;
error:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
RX_CALL_DEAD, -EEXIST);
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
atomic_read(&call->usage), here, ERR_PTR(ret));
atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
return ERR_PTR(ret);
_leave(" = -EEXIST");
return ERR_PTR(-EEXIST);
/* We got an error, but the call is attached to the socket and is in
* need of release. However, we might now race with recvmsg() when
* completing the call queues it. Return 0 from sys_sendmsg() and
* leave the error to recvmsg() to deal with.
*/
error_attached_to_socket:
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
atomic_read(&call->usage), here, ERR_PTR(ret));
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
_leave(" = c=%08x [err]", call->debug_id);
return call;
}
/*

View File

@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock);
hlist_del_rcu(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
if (!hlist_unhashed(&call->error_link)) {
spin_lock_bh(&call->peer->lock);
hlist_del_rcu(&call->error_link);
spin_unlock_bh(&call->peer->lock);
}
if (rxrpc_is_client_call(call))
return rxrpc_disconnect_client_call(call);

View File

@ -541,7 +541,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto error_unlock_call;
}
if (msg->msg_name) {
if (msg->msg_name && call->peer) {
struct sockaddr_rxrpc *srx = msg->msg_name;
size_t len = sizeof(call->peer->srx);

View File

@ -683,6 +683,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
ret = 0;
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
goto out_put_unlock;
} else {
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_UNINITIALISED:

View File

@ -1634,6 +1634,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_max_payload);
/**
* svc_encode_read_payload - mark a range of bytes as a READ payload
* @rqstp: svc_rqst to operate on
* @offset: payload's byte offset in rqstp->rq_res
* @length: size of payload, in bytes
*
* Returns zero on success, or a negative errno if a permanent
* error occurred.
*/
int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length)
{
return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
}
EXPORT_SYMBOL_GPL(svc_encode_read_payload);
/**
* svc_fill_write_vector - Construct data argument for VFS write call
* @rqstp: svc_rqst to operate on

View File

@ -279,6 +279,12 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
return len;
}
static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length)
{
return 0;
}
/*
* Report socket names for nfsdfs
*/
@ -655,6 +661,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
.xpo_create = svc_udp_create,
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
.xpo_read_payload = svc_sock_read_payload,
.xpo_release_rqst = svc_release_udp_skb,
.xpo_detach = svc_sock_detach,
.xpo_free = svc_sock_free,
@ -1175,6 +1182,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
.xpo_read_payload = svc_sock_read_payload,
.xpo_release_rqst = svc_release_skb,
.xpo_detach = svc_tcp_sock_detach,
.xpo_free = svc_sock_free,

View File

@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
out:
ctxt->rc_page_count = 0;
ctxt->rc_read_payload_length = 0;
return ctxt;
out_empty:

View File

@ -481,18 +481,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
vec->iov_len);
}
/* Send an xdr_buf's page list by itself. A Write chunk is
* just the page list. a Reply chunk is the head, page list,
* and tail. This function is shared between the two types
* of chunk.
/* Send an xdr_buf's page list by itself. A Write chunk is just
* the page list. A Reply chunk is @xdr's head, page list, and
* tail. This function is shared between the two types of chunk.
*/
static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
struct xdr_buf *xdr)
struct xdr_buf *xdr,
unsigned int offset,
unsigned long length)
{
info->wi_xdr = xdr;
info->wi_next_off = 0;
info->wi_next_off = offset - xdr->head[0].iov_len;
return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
xdr->page_len);
length);
}
/**
@ -500,6 +501,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
* @rdma: controlling RDMA transport
* @wr_ch: Write chunk provided by client
* @xdr: xdr_buf containing the data payload
* @offset: payload's byte offset in @xdr
* @length: size of payload, in bytes
*
* Returns a non-negative number of bytes the chunk consumed, or
* %-E2BIG if the payload was larger than the Write chunk,
@ -509,19 +512,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
*/
int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
struct xdr_buf *xdr)
struct xdr_buf *xdr,
unsigned int offset, unsigned long length)
{
struct svc_rdma_write_info *info;
int ret;
if (!xdr->page_len)
if (!length)
return 0;
info = svc_rdma_write_info_alloc(rdma, wr_ch);
if (!info)
return -ENOMEM;
ret = svc_rdma_send_xdr_pagelist(info, xdr);
ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
if (ret < 0)
goto out_err;
@ -530,7 +534,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
goto out_err;
trace_svcrdma_encode_write(xdr->page_len);
return xdr->page_len;
return length;
out_err:
svc_rdma_write_info_free(info);
@ -570,7 +574,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
* client did not provide Write chunks.
*/
if (!writelist && xdr->page_len) {
ret = svc_rdma_send_xdr_pagelist(info, xdr);
ret = svc_rdma_send_xdr_pagelist(info, xdr,
xdr->head[0].iov_len,
xdr->page_len);
if (ret < 0)
goto out_err;
consumed += xdr->page_len;

View File

@ -856,7 +856,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (wr_lst) {
/* XXX: Presume the client sent only one Write chunk */
ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
unsigned long offset;
unsigned int length;
if (rctxt->rc_read_payload_length) {
offset = rctxt->rc_read_payload_offset;
length = rctxt->rc_read_payload_length;
} else {
offset = xdr->head[0].iov_len;
length = xdr->page_len;
}
ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
length);
if (ret < 0)
goto err2;
svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
@ -891,3 +902,30 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
set_bit(XPT_CLOSE, &xprt->xpt_flags);
return -ENOTCONN;
}
/**
* svc_rdma_read_payload - special processing for a READ payload
* @rqstp: svc_rqst to operate on
* @offset: payload's byte offset in @xdr
* @length: size of payload, in bytes
*
* Returns zero on success.
*
* For the moment, just record the xdr_buf location of the READ
* payload. svc_rdma_sendto will use that location later when
* we actually send the payload.
*/
int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length)
{
struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
/* XXX: Just one READ payload slot for now, since our
* transport implementation currently supports only one
* Write chunk.
*/
rctxt->rc_read_payload_offset = offset;
rctxt->rc_read_payload_length = length;
return 0;
}

View File

@ -81,6 +81,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
.xpo_create = svc_rdma_create,
.xpo_recvfrom = svc_rdma_recvfrom,
.xpo_sendto = svc_rdma_sendto,
.xpo_read_payload = svc_rdma_read_payload,
.xpo_release_rqst = svc_rdma_release_rqst,
.xpo_detach = svc_rdma_detach,
.xpo_free = svc_rdma_free,

View File

@ -13172,13 +13172,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
if (!wdev_running(wdev))
return -ENETDOWN;
}
if (!vcmd->doit)
return -EOPNOTSUPP;
} else {
wdev = NULL;
}
if (!vcmd->doit)
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);

View File

@ -6,6 +6,7 @@
/// add a missing namespace tag to a module source file.
///
virtual nsdeps
virtual report
@has_ns_import@
@ -16,10 +17,15 @@ MODULE_IMPORT_NS(ns);
// Add missing imports, but only adjacent to a MODULE_LICENSE statement.
// That ensures we are adding it only to the main module source file.
@do_import depends on !has_ns_import@
@do_import depends on !has_ns_import && nsdeps@
declarer name MODULE_LICENSE;
expression license;
identifier virtual.ns;
@@
MODULE_LICENSE(license);
+ MODULE_IMPORT_NS(ns);
// Dummy rule for report mode that would otherwise be empty and make spatch
// fail ("No rules apply.")
@script:python depends on report@
@@

View File

@ -23,7 +23,7 @@ fi
generate_deps_for_ns() {
$SPATCH --very-quiet --in-place --sp-file \
$srctree/scripts/coccinelle/misc/add_namespace.cocci -D ns=$1 $2
$srctree/scripts/coccinelle/misc/add_namespace.cocci -D nsdeps -D ns=$1 $2
}
generate_deps() {

View File

@ -227,7 +227,7 @@ config IMA_APPRAISE_REQUIRE_POLICY_SIGS
config IMA_APPRAISE_BOOTPARAM
bool "ima_appraise boot parameter"
depends on IMA_APPRAISE && !IMA_ARCH_POLICY
depends on IMA_APPRAISE
default y
help
This option enables the different "ima_appraise=" modes

View File

@ -18,6 +18,12 @@
static int __init default_appraise_setup(char *str)
{
#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
if (arch_ima_get_secureboot()) {
pr_info("Secure boot enabled: ignoring ima_appraise=%s boot parameter option",
str);
return 1;
}
if (strncmp(str, "off", 3) == 0)
ima_appraise = 0;
else if (strncmp(str, "log", 3) == 0)

View File

@ -2720,7 +2720,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file)
static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_smack *tsp = smack_cred(current_cred());
char *data;
int rc;
LIST_HEAD(list_tmp);
@ -2745,11 +2744,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
kfree(data);
if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
struct cred *new;
struct task_smack *tsp;
new = prepare_creds();
if (!new) {
rc = -ENOMEM;
goto out;
}
tsp = smack_cred(new);
smk_destroy_label_list(&tsp->smk_relabel);
list_splice(&list_tmp, &tsp->smk_relabel);
commit_creds(new);
return count;
}
out:
smk_destroy_label_list(&list_tmp);
return rc;
}

View File

@ -168,10 +168,16 @@ static long
odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct seq_oss_devinfo *dp;
long rc;
dp = file->private_data;
if (snd_BUG_ON(!dp))
return -ENXIO;
return snd_seq_oss_ioctl(dp, cmd, arg);
mutex_lock(&register_mutex);
rc = snd_seq_oss_ioctl(dp, cmd, arg);
mutex_unlock(&register_mutex);
return rc;
}
#ifdef CONFIG_COMPAT

View File

@ -2924,6 +2924,10 @@ static int hda_codec_runtime_suspend(struct device *dev)
struct hda_codec *codec = dev_to_hda_codec(dev);
unsigned int state;
/* Nothing to do if card registration fails and the component driver never probes */
if (!codec->card)
return 0;
cancel_delayed_work_sync(&codec->jackpoll_work);
state = hda_call_codec_suspend(codec);
if (codec->link_down_at_suspend ||
@ -2938,6 +2942,10 @@ static int hda_codec_runtime_resume(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
/* Nothing to do if card registration fails and the component driver never probes */
if (!codec->card)
return 0;
codec_display_power(codec, true);
snd_hdac_codec_link_up(&codec->core);
hda_call_codec_resume(codec);

View File

@ -2306,7 +2306,6 @@ static int azx_probe_continue(struct azx *chip)
if (azx_has_pm_runtime(chip)) {
pm_runtime_use_autosuspend(&pci->dev);
pm_runtime_allow(&pci->dev);
pm_runtime_put_autosuspend(&pci->dev);
}

View File

@ -1182,6 +1182,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
{}
};
@ -4670,7 +4671,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
tmp = FLOAT_ONE;
break;
case QUIRK_AE5:
ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
tmp = FLOAT_THREE;
break;
default:
@ -4716,7 +4717,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
break;
case QUIRK_AE5:
ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
break;
default:
break;
@ -4755,7 +4756,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
tmp = FLOAT_ONE;
break;
case QUIRK_AE5:
ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f);
ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
tmp = FLOAT_THREE;
break;
default:
@ -5747,6 +5748,11 @@ static int ca0132_switch_get(struct snd_kcontrol *kcontrol,
return 0;
}
if (nid == ZXR_HEADPHONE_GAIN) {
*valp = spec->zxr_gain_set;
return 0;
}
return 0;
}

View File

@ -6131,6 +6131,11 @@ enum {
ALC289_FIXUP_ASUS_GA502,
ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
ALC285_FIXUP_HP_GPIO_AMP_INIT,
ALC269_FIXUP_CZC_B20,
ALC269_FIXUP_CZC_TMI,
ALC269_FIXUP_CZC_L101,
ALC269_FIXUP_LEMOTE_A1802,
ALC269_FIXUP_LEMOTE_A190X,
};
static const struct hda_fixup alc269_fixups[] = {
@ -7369,6 +7374,89 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_HP_GPIO_LED
},
[ALC269_FIXUP_CZC_B20] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x12, 0x411111f0 },
{ 0x14, 0x90170110 }, /* speaker */
{ 0x15, 0x032f1020 }, /* HP out */
{ 0x17, 0x411111f0 },
{ 0x18, 0x03ab1040 }, /* mic */
{ 0x19, 0xb7a7013f },
{ 0x1a, 0x0181305f },
{ 0x1b, 0x411111f0 },
{ 0x1d, 0x411111f0 },
{ 0x1e, 0x411111f0 },
{ }
},
.chain_id = ALC269_FIXUP_DMIC,
},
[ALC269_FIXUP_CZC_TMI] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x12, 0x4000c000 },
{ 0x14, 0x90170110 }, /* speaker */
{ 0x15, 0x0421401f }, /* HP out */
{ 0x17, 0x411111f0 },
{ 0x18, 0x04a19020 }, /* mic */
{ 0x19, 0x411111f0 },
{ 0x1a, 0x411111f0 },
{ 0x1b, 0x411111f0 },
{ 0x1d, 0x40448505 },
{ 0x1e, 0x411111f0 },
{ 0x20, 0x8000ffff },
{ }
},
.chain_id = ALC269_FIXUP_DMIC,
},
[ALC269_FIXUP_CZC_L101] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x12, 0x40000000 },
{ 0x14, 0x01014010 }, /* speaker */
{ 0x15, 0x411111f0 }, /* HP out */
{ 0x16, 0x411111f0 },
{ 0x18, 0x01a19020 }, /* mic */
{ 0x19, 0x02a19021 },
{ 0x1a, 0x0181302f },
{ 0x1b, 0x0221401f },
{ 0x1c, 0x411111f0 },
{ 0x1d, 0x4044c601 },
{ 0x1e, 0x411111f0 },
{ }
},
.chain_id = ALC269_FIXUP_DMIC,
},
[ALC269_FIXUP_LEMOTE_A1802] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x12, 0x40000000 },
{ 0x14, 0x90170110 }, /* speaker */
{ 0x17, 0x411111f0 },
{ 0x18, 0x03a19040 }, /* mic1 */
{ 0x19, 0x90a70130 }, /* mic2 */
{ 0x1a, 0x411111f0 },
{ 0x1b, 0x411111f0 },
{ 0x1d, 0x40489d2d },
{ 0x1e, 0x411111f0 },
{ 0x20, 0x0003ffff },
{ 0x21, 0x03214020 },
{ }
},
.chain_id = ALC269_FIXUP_DMIC,
},
[ALC269_FIXUP_LEMOTE_A190X] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x14, 0x99130110 }, /* speaker */
{ 0x15, 0x0121401f }, /* HP out */
{ 0x18, 0x01a19c20 }, /* rear mic */
{ 0x19, 0x99a3092f }, /* front mic */
{ 0x1b, 0x0201401f }, /* front lineout */
{ }
},
.chain_id = ALC269_FIXUP_DMIC,
},
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@ -7658,9 +7746,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
#if 0
/* Below is a quirk table taken from the old code.
@ -8945,6 +9038,7 @@ enum {
ALC662_FIXUP_LED_GPIO1,
ALC662_FIXUP_IDEAPAD,
ALC272_FIXUP_MARIO,
ALC662_FIXUP_CZC_ET26,
ALC662_FIXUP_CZC_P10T,
ALC662_FIXUP_SKU_IGNORE,
ALC662_FIXUP_HP_RP5800,
@ -9014,6 +9108,25 @@ static const struct hda_fixup alc662_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc272_fixup_mario,
},
[ALC662_FIXUP_CZC_ET26] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{0x12, 0x403cc000},
{0x14, 0x90170110}, /* speaker */
{0x15, 0x411111f0},
{0x16, 0x411111f0},
{0x18, 0x01a19030}, /* mic */
{0x19, 0x90a7013f}, /* int-mic */
{0x1a, 0x01014020},
{0x1b, 0x0121401f},
{0x1c, 0x411111f0},
{0x1d, 0x411111f0},
{0x1e, 0x40478e35},
{}
},
.chained = true,
.chain_id = ALC662_FIXUP_SKU_IGNORE
},
[ALC662_FIXUP_CZC_P10T] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@ -9397,6 +9510,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),

View File

@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
if (read_expected(TEP_EVENT_DELIM, ")") < 0)
goto out_err;
free_token(token);
type = read_token(&token);
*tok = token;

View File

@ -125,9 +125,8 @@ static int do_setcpu(int cpu)
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (sched_setaffinity(0, sizeof(mask), &mask))
error(1, 0, "setaffinity %d", cpu);
if (cfg_verbose)
fprintf(stderr, "cpu: unable to pin, may increase variance.\n");
else if (cfg_verbose)
fprintf(stderr, "cpu: %u\n", cpu);
return 0;