Merge 6.1.62 into android14-6.1-lts

Changes in 6.1.62
	ASoC: simple-card: fixup asoc_simple_probe() error handling
	coresight: tmc-etr: Disable warnings for allocation failures
	ASoC: tlv320adc3xxx: BUG: Correct micbias setting
	net: sched: cls_u32: Fix allocation size in u32_init()
	irqchip/riscv-intc: Mark all INTC nodes as initialized
	irqchip/stm32-exti: add missing DT IRQ flag translation
	dmaengine: ste_dma40: Fix PM disable depth imbalance in d40_probe
	powerpc/85xx: Fix math emulation exception
	Input: synaptics-rmi4 - handle reset delay when using SMBus trsnsport
	fbdev: atyfb: only use ioremap_uc() on i386 and ia64
	fs/ntfs3: Add ckeck in ni_update_parent()
	fs/ntfs3: Write immediately updated ntfs state
	fs/ntfs3: Use kvmalloc instead of kmalloc(... __GFP_NOWARN)
	fs/ntfs3: Fix possible NULL-ptr-deref in ni_readpage_cmpr()
	fs/ntfs3: Fix NULL pointer dereference on error in attr_allocate_frame()
	fs/ntfs3: Fix directory element type detection
	fs/ntfs3: Avoid possible memory leak
	spi: npcm-fiu: Fix UMA reads when dummy.nbytes == 0
	netfilter: nfnetlink_log: silence bogus compiler warning
	efi: fix memory leak in krealloc failure handling
	ASoC: rt5650: fix the wrong result of key button
	ASoC: codecs: tas2780: Fix log of failed reset via I2C.
	drm/ttm: Reorder sys manager cleanup step
	fbdev: omapfb: fix some error codes
	fbdev: uvesafb: Call cn_del_callback() at the end of uvesafb_exit()
	scsi: mpt3sas: Fix in error path
	drm/amdgpu: Unset context priority is now invalid
	gpu/drm: Eliminate DRM_SCHED_PRIORITY_UNSET
	LoongArch: Export symbol invalid_pud_table for modules building
	LoongArch: Replace kmap_atomic() with kmap_local_page() in copy_user_highpage()
	netfilter: nf_tables: audit log object reset once per table
	platform/mellanox: mlxbf-tmfifo: Fix a warning message
	drm/amdgpu: Reserve fences for VM update
	net: chelsio: cxgb4: add an error code check in t4_load_phy_fw
	r8152: Check for unplug in rtl_phy_patch_request()
	r8152: Check for unplug in r8153b_ups_en() / r8153c_ups_en()
	powerpc/mm: Fix boot crash with FLATMEM
	io_uring: kiocb_done() should *not* trust ->ki_pos if ->{read,write}_iter() failed
	ceph_wait_on_conflict_unlink(): grab reference before dropping ->d_lock
	power: supply: core: Use blocking_notifier_call_chain to avoid RCU complaint
	perf evlist: Avoid frequency mode for the dummy event
	x86: KVM: SVM: always update the x2avic msr interception
	mm/mempolicy: fix set_mempolicy_home_node() previous VMA pointer
	mmap: fix error paths with dup_anon_vma()
	ALSA: usb-audio: add quirk flag to enable native DSD for McIntosh devices
	PCI: Prevent xHCI driver from claiming AMD VanGogh USB3 DRD device
	usb: storage: set 1.50 as the lower bcdDevice for older "Super Top" compatibility
	usb: typec: tcpm: Fix NULL pointer dereference in tcpm_pd_svdm()
	usb: raw-gadget: properly handle interrupted requests
	tty: n_gsm: fix race condition in status line change on dead connections
	tty: 8250: Remove UC-257 and UC-431
	tty: 8250: Add support for additional Brainboxes UC cards
	tty: 8250: Add support for Brainboxes UP cards
	tty: 8250: Add support for Intashield IS-100
	tty: 8250: Fix port count of PX-257
	tty: 8250: Fix up PX-803/PX-857
	tty: 8250: Add support for additional Brainboxes PX cards
	tty: 8250: Add support for Intashield IX cards
	tty: 8250: Add Brainboxes Oxford Semiconductor-based quirks
	misc: pci_endpoint_test: Add deviceID for J721S2 PCIe EP device support
	ALSA: hda: intel-dsp-config: Fix JSL Chromebook quirk detection
	ASoC: SOF: sof-pci-dev: Fix community key quirk detection
	Linux 6.1.62

Change-Id: I2f696c88b48e82eb0d925a26ce6716693595d421
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-11-25 18:59:27 +00:00
commit 0d9fb52165
59 changed files with 619 additions and 143 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 61
SUBLEVEL = 62
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -68,11 +68,11 @@ void copy_user_highpage(struct page *to, struct page *from,
{
void *vfrom, *vto;
vto = kmap_atomic(to);
vfrom = kmap_atomic(from);
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
kunmap_atomic(vto);
kunmap_local(vfrom);
kunmap_local(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
@ -228,6 +228,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;

View File

@ -394,7 +394,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
#endif
/* System Call Interrupt */

View File

@ -946,6 +946,8 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
/* Set max_mapnr before paging_init() */
set_max_mapnr(max_pfn);
/*
* Release secondary cpus out of their spinloops at 0x60 now that

View File

@ -288,7 +288,6 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
kasan_late_init();

View File

@ -822,8 +822,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
if (intercept == svm->x2avic_msrs_intercepted)
return;
if (avic_mode != AVIC_MODE_X2 ||
!apic_x2apic_mode(svm->vcpu.arch.apic))
if (avic_mode != AVIC_MODE_X2)
return;
for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {

View File

@ -3697,6 +3697,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
pm_runtime_disable(base->dev);
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);

View File

@ -245,9 +245,13 @@ static __init int efivar_ssdt_load(void)
if (status == EFI_NOT_FOUND) {
break;
} else if (status == EFI_BUFFER_TOO_SMALL) {
name = krealloc(name, name_size, GFP_KERNEL);
if (!name)
efi_char16_t *name_tmp =
krealloc(name, name_size, GFP_KERNEL);
if (!name_tmp) {
kfree(name);
return -ENOMEM;
}
name = name_tmp;
continue;
}

View File

@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
case AMDGPU_CTX_PRIORITY_UNSET:
return false;
}
}
@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;

View File

@ -400,6 +400,9 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
continue;
}
/* Reserve fences for two SDMA page table updates */
r = dma_resv_reserve_fences(resv, 2);
if (!r)
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);

View File

@ -239,10 +239,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@ -252,6 +248,10 @@ void ttm_device_fini(struct ttm_device *bdev)
if (ttm_bo_delayed_delete(bdev, true))
pr_debug("Delayed destroy list was clean\n");
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))

View File

@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
&flat_buf->daddr,
DMA_FROM_DEVICE, GFP_KERNEL);
DMA_FROM_DEVICE,
GFP_KERNEL | __GFP_NOWARN);
if (!flat_buf->vaddr) {
kfree(flat_buf);
return -ENOMEM;

View File

@ -1753,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
const struct rmi_device_platform_data pdata = {
.reset_delay_ms = 30,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,

View File

@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
{
int retval;
struct i2c_client *client = rmi_smb->client;
int smbus_version;
/*
* psmouse driver resets the controller, we only need to wait
* to give the firmware chance to fully reinitialize.
*/
if (rmi_smb->xport.pdata.reset_delay_ms)
msleep(rmi_smb->xport.pdata.reset_delay_ms);
/* we need to get the smbus version to activate the touchpad */
retval = rmi_smb_get_version(rmi_smb);
if (retval < 0)
return retval;
smbus_version = rmi_smb_get_version(rmi_smb);
if (smbus_version < 0)
return smbus_version;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
}
return 0;
}
@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
rmi_smb_clear_state(rmi_smb);
/*
* we do not call the actual reset command, it has to be handled in
* PS/2 or there will be races between PS/2 and SMBus.
* PS/2 should ensure that a psmouse_reset is called before
* intializing the device and after it has been removed to be in a known
* state.
* We do not call the actual reset command, it has to be handled in
* PS/2 or there will be races between PS/2 and SMBus. PS/2 should
* ensure that a psmouse_reset is called before initializing the
* device and after it has been removed to be in a known state.
*/
return rmi_smb_enable_smbus_mode(rmi_smb);
}
@ -273,7 +289,6 @@ static int rmi_smb_probe(struct i2c_client *client,
{
struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
struct rmi_smb_xport *rmi_smb;
int smbus_version;
int error;
if (!pdata) {
@ -312,18 +327,9 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb);
if (smbus_version < 0)
return smbus_version;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
}
error = rmi_smb_enable_smbus_mode(rmi_smb);
if (error)
return error;
i2c_set_clientdata(client, rmi_smb);

View File

@ -110,8 +110,16 @@ static int __init riscv_intc_init(struct device_node *node,
* for each INTC DT node. We only need to do INTC initialization
* for the INTC DT node belonging to boot CPU (or boot HART).
*/
if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
/*
* The INTC nodes of each CPU are suppliers for downstream
* interrupt controllers (such as PLIC, IMSIC and APLIC
* direct-mode) so we should mark an INTC node as initialized
* if we are not creating IRQ domain for it.
*/
fwnode_dev_initialized(of_fwnode_handle(node), true);
return 0;
}
intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
&riscv_intc_domain_ops, NULL);

View File

@ -458,6 +458,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
.map = irq_map_generic_chip,
.alloc = stm32_exti_alloc,
.free = stm32_exti_free,
.xlate = irq_domain_xlate_twocell,
};
static void stm32_irq_ack(struct irq_data *d)

View File

@ -71,6 +71,7 @@
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_TI_J721S2 0xb013
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
@ -1004,6 +1005,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);

View File

@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
if (ret)
return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.

View File

@ -3655,6 +3655,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@ -3695,6 +3697,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@ -4058,6 +4062,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
for (i = 0; wait && i < 5000; i++) {
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
break;
usleep_range(1000, 2000);
ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
if ((ocp_data & PATCH_READY) ^ check)

View File

@ -595,7 +595,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
/*
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
* claim it.
* claim it. The same applies on the VanGogh platform device ([1022:163a]).
*
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
@ -603,7 +603,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* defines as "USB device (not host controller)". The dwc3 driver can then
* claim it based on its Vendor and Device ID.
*/
static void quirk_amd_nl_class(struct pci_dev *pdev)
static void quirk_amd_dwc_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
@ -613,7 +613,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
class, pdev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
quirk_amd_dwc_class);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
quirk_amd_dwc_class);
/*
* Synopsys USB 3.x host HAPS platform has a class code of

View File

@ -588,22 +588,23 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
if (is_rx) {
if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
else
} else {
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
if (is_rx) {
if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
} else {
data = 0;
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
}

View File

@ -29,7 +29,7 @@
struct class *power_supply_class;
EXPORT_SYMBOL_GPL(power_supply_class);
ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
EXPORT_SYMBOL_GPL(power_supply_notifier);
static struct device_type power_supply_dev_type;
@ -104,7 +104,7 @@ static void power_supply_changed_work(struct work_struct *work)
class_for_each_device(power_supply_class, NULL, psy,
__power_supply_changed_work);
power_supply_update_leds(psy);
atomic_notifier_call_chain(&power_supply_notifier,
blocking_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
spin_lock_irqsave(&psy->changed_lock, flags);
@ -1190,13 +1190,13 @@ static void power_supply_dev_release(struct device *dev)
int power_supply_reg_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&power_supply_notifier, nb);
return blocking_notifier_chain_register(&power_supply_notifier, nb);
}
EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
void power_supply_unreg_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&power_supply_notifier, nb);
blocking_notifier_chain_unregister(&power_supply_notifier, nb);
}
EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);

View File

@ -12914,8 +12914,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
if (error)
if (error) {
mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
}
return error;
}

View File

@ -353,6 +353,7 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
if (op->dummy.nbytes)
uma_cfg |= ilog2(op->dummy.buswidth)
<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)

View File

@ -3404,6 +3404,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
{
if (dlci->gsm->dead)
return -EL2HLT;
if (dlci->adaption == 2) {
/* Send convergence layer type 2 empty data frame. */
gsm_modem_upd_via_data(dlci, brk);

View File

@ -2456,6 +2456,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
/*
* Brainboxes devices - all Oxsemi based
*/
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4027,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4028,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4029,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4019,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4016,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4015,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x400A,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x400E,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x400C,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x400B,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x400F,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4010,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4011,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x401D,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x401E,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4013,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4017,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTASHIELD,
.device = 0x4018,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_oxsemi_tornado_init,
.setup = pci_oxsemi_tornado_setup,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8811,
@ -4950,6 +5097,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b1_bt_1_115200 },
/*
* IntaShield IS-100
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D60,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b2_1_115200 },
/*
* IntaShield IS-200
*/
@ -4962,6 +5115,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
/*
* IntaShield IX-100
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4027,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_1_15625000 },
/*
* IntaShield IX-200
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4028,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_2_15625000 },
/*
* IntaShield IX-400
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4029,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_4_15625000 },
/* Brainboxes Devices */
/*
* Brainboxes UC-101
@ -4977,10 +5151,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-257
* Brainboxes UC-253/UC-734
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0861,
{ PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
@ -5016,6 +5194,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08E2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08E3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
@ -5026,6 +5212,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-313
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08A1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5040,6 +5234,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-346
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B01,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5051,6 +5249,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A82,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5063,12 +5265,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-420/431
* Brainboxes UC-420
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-607
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x09A1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x09A2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x09A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-836
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D41,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UP-189
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-200
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B21,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B22,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B23,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-869
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C01,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C03,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-880
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C21,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C22,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C23,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes PX-101
*/
@ -5101,7 +5385,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, 0x4015,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_4_15625000 },
pbn_oxsemi_2_15625000 },
/*
* Brainboxes PX-260/PX-701
*/
@ -5109,6 +5393,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_4_15625000 },
/*
* Brainboxes PX-275/279
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0E41,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_8_115200 },
/*
* Brainboxes PX-310
*/
@ -5156,16 +5447,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_oxsemi_4_15625000 },
/*
* Brainboxes PX-803
* Brainboxes PX-475
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x401D,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_1_15625000 },
/*
* Brainboxes PX-803/PX-857
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4009,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b0_1_115200 },
pbn_b0_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x4018,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_2_15625000 },
{ PCI_VENDOR_ID_INTASHIELD, 0x401E,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_1_15625000 },
pbn_oxsemi_2_15625000 },
/*
* Brainboxes PX-820
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x4002,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b0_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x4013,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_4_15625000 },
/*
* Brainboxes PX-846
*/

View File

@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
if (WARN_ON(in && dev->ep0_out_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_unlock;
}
if (WARN_ON(!in && dev->ep0_in_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_unlock;
}
dev->req->buf = data;
@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&dev->ep0_done);
@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(dev->gadget->ep0, dev->req);
wait_for_completion(&dev->ep0_done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
if (dev->ep0_status == -ECONNRESET)
dev->ep0_status = -EINTR;
goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
ret = dev->ep0_status;
out_done:
out_interrupted:
ret = dev->ep0_status;
out_queue_failed:
dev->ep0_urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&done);
@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(ep->ep, ep->req);
wait_for_completion(&done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
if (ep->status == -ECONNRESET)
ep->status = -EINTR;
goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
ret = ep->status;
out_done:
out_interrupted:
ret = ep->status;
out_queue_failed:
ep->urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);

View File

@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),

View File

@ -1630,6 +1630,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
if (IS_ERR_OR_NULL(port->partner))
break;
if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));

View File

@ -3447,11 +3447,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
#if defined(__i386__) || defined(__ia64__)
/*
* By using strong UC we force the MTRR to never have an
* effect on the MMIO region on both non-PAT and PAT systems.
*/
par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
#else
par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
#endif
if (par->ati_regbase == NULL)
return -ENOMEM;

View File

@ -1643,13 +1643,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
}
fbdev->int_irq = platform_get_irq(pdev, 0);
if (fbdev->int_irq < 0) {
r = ENXIO;
r = -ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
if (fbdev->ext_irq < 0) {
r = ENXIO;
r = -ENXIO;
goto cleanup;
}

View File

@ -1935,10 +1935,10 @@ static void uvesafb_exit(void)
}
}
cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
cn_del_callback(&uvesafb_cn_id);
}
module_exit(uvesafb_exit);

View File

@ -709,8 +709,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
if (!d_same_name(udentry, pdentry, &dname))
goto next;
found = dget_dlock(udentry);
spin_unlock(&udentry->d_lock);
found = dget(udentry);
break;
next:
spin_unlock(&udentry->d_lock);

View File

@ -1658,10 +1658,8 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
if (!attr_b) {
err = -ENOENT;
goto out;
}
if (!attr_b)
return -ENOENT;
attr = attr_b;
le = le_b;

View File

@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (!attr->non_res) {
lsize = le32_to_cpu(attr->res.data_size);
le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
/* attr is resident: lsize < record_size (1K or 4K) */
le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;
@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (err < 0)
goto out;
le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
/* attr is nonresident.
* The worst case:
* 1T (2^40) extremely fragmented file.
* cluster = 4K (2^12) => 2^28 fragments
* 2^9 fragments per one record => 2^19 records
* 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
*
* the result is 16M bytes per attribute list.
* Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
*/
le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;

View File

@ -662,7 +662,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
wnd->bits_last = wbits;
wnd->free_bits =
kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
if (!wnd->free_bits)
return -ENOMEM;

View File

@ -309,6 +309,10 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
return 0;
}
/* NTFS: symlinks are "dir + reparse" or "file + reparse" */
if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
dt_type = DT_LNK;
else
dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);

View File

@ -2149,7 +2149,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
if (i == idx)
if (i == idx || !pg)
continue;
unlock_page(pg);
put_page(pg);
@ -3198,6 +3198,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
continue;
/* Check simple case when parent inode equals current inode. */
if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
continue;
}
/* ntfs_iget5 may sleep. */
dir = ntfs_iget5(sb, &fname->home, NULL);
if (IS_ERR(dir)) {

View File

@ -2169,8 +2169,10 @@ static int last_log_lsn(struct ntfs_log *log)
if (!page) {
page = kmalloc(log->page_size, GFP_NOFS);
if (!page)
return -ENOMEM;
if (!page) {
err = -ENOMEM;
goto out;
}
}
/*

View File

@ -953,18 +953,11 @@ int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
if (err)
return err;
mark_inode_dirty(&ni->vfs_inode);
mark_inode_dirty_sync(&ni->vfs_inode);
/* verify(!ntfs_update_mftmirr()); */
/*
* If we used wait=1, sync_inode_metadata waits for the io for the
* inode to finish. It hangs when media is removed.
* So wait=0 is sent down to sync_inode_metadata
* and filemap_fdatawrite is used for the data blocks.
*/
err = sync_inode_metadata(&ni->vfs_inode, 0);
if (!err)
err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
/* write mft record on disk. */
err = _ni_write_inode(&ni->vfs_inode, 1);
return err;
}

View File

@ -1141,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto put_inode_out;
}
bytes = inode->i_size;
sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
if (!t) {
err = -ENOMEM;
goto put_inode_out;

View File

@ -55,8 +55,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
DRM_SCHED_PRIORITY_COUNT,
DRM_SCHED_PRIORITY_UNSET = -2
DRM_SCHED_PRIORITY_COUNT
};
/**

View File

@ -568,6 +568,7 @@
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001

View File

@ -781,7 +781,7 @@ struct power_supply_battery_info {
ANDROID_KABI_RESERVE(1);
};
extern struct atomic_notifier_head power_supply_notifier;
extern struct blocking_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)

View File

@ -326,7 +326,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned final_ret = io_fixup_rw_res(req, ret);
if (req->flags & REQ_F_CUR_POS)
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {

View File

@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
/*
* Only update home node if there is an existing vma policy
*/
if (!new)
if (!new) {
prev = vma;
continue;
}
/*
* If any vma in the range got policy other than MPOL_BIND

View File

@ -530,6 +530,7 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
struct anon_vma *anon_vma = vma->anon_vma;
struct file *file = vma->vm_file;
bool remove_next = false;
struct vm_area_struct *anon_dup = NULL;
vma_start_write(vma);
if (next && (vma != next) && (end == next->vm_end)) {
@ -544,6 +545,8 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
error = anon_vma_clone(vma, next);
if (error)
return error;
anon_dup = vma;
}
}
@ -619,6 +622,9 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
return 0;
nomem:
if (anon_dup)
unlink_anon_vmas(anon_dup);
return -ENOMEM;
}
@ -652,6 +658,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
int remove_next = 0;
MA_STATE(mas, &mm->mm_mt, start, end - 1);
struct vm_area_struct *exporter = NULL, *importer = NULL;
struct vm_area_struct *anon_dup = NULL;
vma_start_write(vma);
if (next)
@ -739,6 +746,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
error = anon_vma_clone(importer, exporter);
if (error)
return error;
anon_dup = importer;
}
}
@ -751,8 +760,12 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
if (mas_preallocate(&mas, vma, GFP_KERNEL))
if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
if (anon_dup)
unlink_anon_vmas(anon_dup);
return -ENOMEM;
}
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {

View File

@ -7324,6 +7324,16 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
return -1;
}
static void audit_log_obj_reset(const struct nft_table *table,
unsigned int base_seq, unsigned int nentries)
{
char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
audit_log_nfcfg(buf, table->family, nentries,
AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
kfree(buf);
}
struct nft_obj_filter {
char *table;
u32 type;
@ -7338,8 +7348,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
unsigned int entries = 0;
struct nft_object *obj;
bool reset = false;
int rc = 0;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
@ -7352,6 +7364,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
entries = 0;
list_for_each_entry_rcu(obj, &table->objects, list) {
if (!nft_is_active(net, obj))
goto cont;
@ -7367,34 +7380,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
filter->type != NFT_OBJECT_UNSPEC &&
obj->ops->type->type != filter->type)
goto cont;
if (reset) {
char *buf = kasprintf(GFP_ATOMIC,
"%s:%u",
table->name,
nft_net->base_seq);
audit_log_nfcfg(buf,
family,
obj->handle,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
}
if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
rc = nf_tables_fill_obj_info(skb, net,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWOBJ,
NLM_F_MULTI | NLM_F_APPEND,
table->family, table,
obj, reset) < 0)
goto done;
obj, reset);
if (rc < 0)
break;
entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
if (reset && entries)
audit_log_obj_reset(table, nft_net->base_seq, entries);
if (rc < 0)
break;
}
done:
rcu_read_unlock();
cb->args[0] = idx;
@ -7499,7 +7505,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
audit_log_nfcfg(buf,
family,
obj->handle,
1,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);

View File

@ -688,8 +688,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;

View File

@ -364,7 +364,7 @@ static int u32_init(struct tcf_proto *tp)
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;

View File

@ -343,6 +343,12 @@ static const struct config_entry config_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
}
},
{
.ident = "Google firmware",
.matches = {
DMI_MATCH(DMI_BIOS_VERSION, "Google"),
}
},
{}
}
},

View File

@ -3251,6 +3251,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);

View File

@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
usleep_range(2000, 2050);
}
snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
TAS2780_RST);
if (ret)
dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",

View File

@ -293,7 +293,7 @@
#define ADC3XXX_BYPASS_RPGA 0x80
/* MICBIAS control bits */
#define ADC3XXX_MICBIAS_MASK 0x2
#define ADC3XXX_MICBIAS_MASK 0x3
#define ADC3XXX_MICBIAS1_SHIFT 5
#define ADC3XXX_MICBIAS2_SHIFT 3
@ -1099,7 +1099,7 @@ static int adc3xxx_parse_dt_micbias(struct adc3xxx *adc3xxx,
unsigned int val;
if (!of_property_read_u32(np, propname, &val)) {
if (val >= ADC3XXX_MICBIAS_AVDD) {
if (val > ADC3XXX_MICBIAS_AVDD) {
dev_err(dev, "Invalid property value for '%s'\n", propname);
return -EINVAL;
}

View File

@ -678,10 +678,12 @@ static int asoc_simple_probe(struct platform_device *pdev)
struct snd_soc_dai_link *dai_link = priv->dai_link;
struct simple_dai_props *dai_props = priv->dai_props;
ret = -EINVAL;
cinfo = dev->platform_data;
if (!cinfo) {
dev_err(dev, "no info for asoc-simple-card\n");
return -EINVAL;
goto err;
}
if (!cinfo->name ||
@ -690,7 +692,7 @@ static int asoc_simple_probe(struct platform_device *pdev)
!cinfo->platform ||
!cinfo->cpu_dai.name) {
dev_err(dev, "insufficient asoc_simple_card_info settings\n");
return -EINVAL;
goto err;
}
cpus = dai_link->cpus;

View File

@ -141,6 +141,13 @@ static const struct dmi_system_id community_key_platforms[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
}
},
{
.ident = "Google firmware",
.callback = chromebook_use_community_key,
.matches = {
DMI_MATCH(DMI_BIOS_VERSION, "Google"),
}
},
{},
};

View File

@ -2218,6 +2218,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x2ab6, /* T+A devices */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x2d87, /* Cayin device */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x3336, /* HEM devices */

View File

@ -252,6 +252,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
/* Avoid frequency mode for dummy events to avoid associated timers. */
.freq = 0,
.sample_period = 1,
};
return evsel__new_idx(&attr, evlist->core.nr_entries);
@ -278,8 +281,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_guest = 1;
evsel->core.attr.exclude_hv = 1;
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evsel->name = strdup("dummy:u");

View File

@ -99,6 +99,12 @@ do_test 'nft add counter t1 c1' \
do_test 'nft add counter t2 c1; add counter t2 c2' \
'table=t2 family=2 entries=2 op=nft_register_obj'
for ((i = 3; i <= 500; i++)); do
echo "add counter t2 c$i"
done >$rulefile
do_test "nft -f $rulefile" \
'table=t2 family=2 entries=498 op=nft_register_obj'
# adding/updating quotas
do_test 'nft add quota t1 q1 { 10 bytes }' \
@ -107,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
'table=t2 family=2 entries=2 op=nft_register_obj'
for ((i = 3; i <= 500; i++)); do
echo "add quota t2 q$i { 10 bytes }"
done >$rulefile
do_test "nft -f $rulefile" \
'table=t2 family=2 entries=498 op=nft_register_obj'
# changing the quota value triggers obj update path
do_test 'nft add quota t1 q1 { 20 bytes }' \
'table=t1 family=2 entries=1 op=nft_register_obj'
@ -156,6 +168,40 @@ done
do_test 'nft reset set t1 s' \
'table=t1 family=2 entries=3 op=nft_reset_setelem'
# resetting counters
do_test 'nft reset counter t1 c1' \
'table=t1 family=2 entries=1 op=nft_reset_obj'
do_test 'nft reset counters t1' \
'table=t1 family=2 entries=1 op=nft_reset_obj'
do_test 'nft reset counters t2' \
'table=t2 family=2 entries=342 op=nft_reset_obj
table=t2 family=2 entries=158 op=nft_reset_obj'
do_test 'nft reset counters' \
'table=t1 family=2 entries=1 op=nft_reset_obj
table=t2 family=2 entries=341 op=nft_reset_obj
table=t2 family=2 entries=159 op=nft_reset_obj'
# resetting quotas
do_test 'nft reset quota t1 q1' \
'table=t1 family=2 entries=1 op=nft_reset_obj'
do_test 'nft reset quotas t1' \
'table=t1 family=2 entries=1 op=nft_reset_obj'
do_test 'nft reset quotas t2' \
'table=t2 family=2 entries=315 op=nft_reset_obj
table=t2 family=2 entries=185 op=nft_reset_obj'
do_test 'nft reset quotas' \
'table=t1 family=2 entries=1 op=nft_reset_obj
table=t2 family=2 entries=314 op=nft_reset_obj
table=t2 family=2 entries=186 op=nft_reset_obj'
# deleting rules
readarray -t handles < <(nft -a list chain t1 c1 | \