This is the 5.4.83 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl/TZOAACgkQONu9yGCS aT4Wtw/8DvcG+dIrMU4wRfD9eqtvfldNyHnyTQyEtlKkGEuAbukqQ3FG/sSxW7/P VWN3NviRDlc5dF+0KhGhknIHjE3Di8UsJ/8KNhmqNZOFeukNAkCWmwBgPgQ3EPki +9b5LmVZJkQIdzIvP4PyJfDirZrOZR0YInCKT/1bEu/uhzAvFSS2ix2IckXrW/4Z YeQjN8Kk322xe2yDpVAzvEiBYwVIrdr7ty+FRiebQ8np88EPoCzy/r0wvPT//3wQ 1lh7zbihvQ1/kJqpnJWpbOCwi5fCD8YPChBGGqZD+hBqwlrqMb/Wt5Nk73g8XzjE pIOXyxmcxTmen3zcuon8XqLgqx6TEqObfXiFiJcPGy524F4GzNA9GvekaxXefxzg B2A0DdGhlcSRClpPUtwF+Zpp1qQIaiCpmoMLjznsN6Isvew08ENGUPBHMHV4XrB7 XbByaBIDdGoukXHYv4sKdKpggqi2doWu7uY+M4nyNEtKI82Ct1YffBYXf8zhO6MT XGX3K2Gzpa6SExLDST6tqbATLhf6l34QokLp/fnqtkB+MhBWqfydhKVGVwqCqvqi W2GmgmciNT0uC95SYEqVG+Bl3ej5QK3Hgu60PUo6Up2FcaL5wWEJSkXn64fMLvOX /COnAwFOJfbGmbgwN6uEUYDXdjHRp2HcjnFTImRF83BmdNkhPXQ= =nQYB -----END PGP SIGNATURE----- Merge 5.4.83 into android11-5.4-lts Changes in 5.4.83 pinctrl: baytrail: Replace WARN with dev_info_once when setting direct-irq pin to output pinctrl: baytrail: Fix pin being driven low for a while on gpiod_get(..., GPIOD_OUT_HIGH) Partially revert bpf: Zero-fill re-used per-cpu map element usb: gadget: f_fs: Use local copy of descriptors for userspace copy USB: serial: kl5kusb105: fix memleak on open USB: serial: ch341: add new Product ID for CH341A USB: serial: ch341: sort device-id entries USB: serial: option: add Fibocom NL668 variants USB: serial: option: add support for Thales Cinterion EXS82 USB: serial: option: fix Quectel BG96 matching tty: Fix ->pgrp locking in tiocspgrp() tty: Fix ->session locking ALSA: hda/realtek: Fix bass speaker DAC assignment on Asus Zephyrus G14 ALSA: hda/realtek: Add mute LED quirk to yet another HP x360 model ALSA: hda/realtek: Enable headset of ASUS UX482EG & B9400CEA with ALC294 ALSA: hda/realtek - Add new codec supported for ALC897 ALSA: hda/generic: Add option to enforce preferred_dacs pairs ftrace: Fix updating FTRACE_FL_TRAMP cifs: allow syscalls to be restarted in __smb_send_rqst() cifs: fix potential use-after-free in cifs_echo_request() i2c: imx: Don't generate STOP condition if arbitration has been lost thunderbolt: Fix use-after-free in remove_unplugged_switch() drm/i915/gt: Program mocs:63 for cache eviction on gen9 scsi: mpt3sas: Fix ioctl timeout dm writecache: fix the maximum number of arguments powerpc/64s/powernv: Fix memory corruption when saving SLB entries on MCE genirq/irqdomain: Add an irq_create_mapping_affinity() function powerpc/pseries: Pass MSI affinity to irq_create_mapping() dm: fix bug with RCU locking in dm_blk_report_zones dm: remove invalid sparse __acquires and __releases annotations x86/uprobes: Do not use prefixes.nbytes when looping over prefixes.bytes coredump: fix core_pattern parse error mm: list_lru: set shrinker map bit when child nr_items is not zero mm/swapfile: do not sleep with a spin lock held speakup: Reject setting the speakup line discipline outside of speakup i2c: imx: Fix reset of I2SR_IAL flag i2c: imx: Check for I2SR_IAL after every byte spi: bcm2835: Release the DMA channel if probe fails after dma_init iommu/amd: Set DTE[IntTabLen] to represent 512 IRTEs tracing: Fix userstacktrace option for instances lib/syscall: fix syscall registers retrieval on 32-bit platforms can: af_can: can_rx_unregister(): remove WARN() statement from list operation sanity check gfs2: check for empty rgrp tree in gfs2_ri_update netfilter: ipset: prevent uninit-value in hash_ip6_add tipc: fix a deadlock when flushing scheduled work ASoC: wm_adsp: fix error return code in wm_adsp_load() rtw88: debug: Fix uninitialized memory in debugfs code i2c: qup: Fix error return code in qup_i2c_bam_schedule_desc() dm writecache: remove BUG() and fail gracefully instead Input: i8042 - fix error return code in i8042_setup_aux() netfilter: nf_tables: avoid false-postive lockdep splat netfilter: nftables_offload: set address type in control dissector x86/insn-eval: Use new for_each_insn_prefix() macro to loop over prefixes bytes Revert "geneve: pull IP header before ECN decapsulation" Linux 5.4.83 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I5753999d7c8f19f5b8b6f5f9e850c64744a257fc
This commit is contained in:
commit
ae3e9564f7
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 82
|
||||
SUBLEVEL = 83
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -186,11 +186,16 @@ static void __init pnv_init(void)
|
||||
add_preferred_console("hvc", 0, NULL);
|
||||
|
||||
if (!radix_enabled()) {
|
||||
size_t size = sizeof(struct slb_entry) * mmu_slb_size;
|
||||
int i;
|
||||
|
||||
/* Allocate per cpu area to save old slb contents during MCE */
|
||||
for_each_possible_cpu(i)
|
||||
paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
|
||||
for_each_possible_cpu(i) {
|
||||
paca_ptrs[i]->mce_faulty_slbs =
|
||||
memblock_alloc_node(size,
|
||||
__alignof__(struct slb_entry),
|
||||
cpu_to_node(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -458,7 +458,8 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
|
||||
return hwirq;
|
||||
}
|
||||
|
||||
virq = irq_create_mapping(NULL, hwirq);
|
||||
virq = irq_create_mapping_affinity(NULL, hwirq,
|
||||
entry->affinity);
|
||||
|
||||
if (!virq) {
|
||||
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
|
||||
|
@ -195,6 +195,21 @@ static inline int insn_offset_immediate(struct insn *insn)
|
||||
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_insn_prefix() -- Iterate prefixes in the instruction
|
||||
* @insn: Pointer to struct insn.
|
||||
* @idx: Index storage.
|
||||
* @prefix: Prefix byte.
|
||||
*
|
||||
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
|
||||
* and the index is stored in @idx (note that this @idx is just for a cursor,
|
||||
* do not change it.)
|
||||
* Since prefixes.nbytes can be bigger than 4 if some prefixes
|
||||
* are repeated, it cannot be used for looping over the prefixes.
|
||||
*/
|
||||
#define for_each_insn_prefix(insn, idx, prefix) \
|
||||
for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
|
||||
|
||||
#define POP_SS_OPCODE 0x1f
|
||||
#define MOV_SREG_OPCODE 0x8e
|
||||
|
||||
|
@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
|
||||
|
||||
static bool is_prefix_bad(struct insn *insn)
|
||||
{
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
insn_attr_t attr;
|
||||
|
||||
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
|
||||
attr = inat_get_opcode_attribute(p);
|
||||
switch (attr) {
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_ES):
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_CS):
|
||||
@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
|
||||
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
|
||||
{
|
||||
u8 opc1 = OPCODE1(insn);
|
||||
insn_byte_t p;
|
||||
int i;
|
||||
|
||||
switch (opc1) {
|
||||
@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
|
||||
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
|
||||
* No one uses these insns, reject any branch insns with such prefix.
|
||||
*/
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
if (insn->prefixes.bytes[i] == 0x66)
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
if (p == 0x66)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -70,14 +70,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
|
||||
{
|
||||
int idx = INAT_SEG_REG_DEFAULT;
|
||||
int num_overrides = 0, i;
|
||||
insn_byte_t p;
|
||||
|
||||
insn_get_prefixes(insn);
|
||||
|
||||
/* Look for any segment override prefixes. */
|
||||
for (i = 0; i < insn->prefixes.nbytes; i++) {
|
||||
for_each_insn_prefix(insn, i, p) {
|
||||
insn_attr_t attr;
|
||||
|
||||
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
|
||||
attr = inat_get_opcode_attribute(p);
|
||||
switch (attr) {
|
||||
case INAT_MAKE_PREFIX(INAT_PFX_CS):
|
||||
idx = INAT_SEG_REG_CS;
|
||||
|
@ -130,7 +130,19 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
||||
GEN9_MOCS_ENTRIES,
|
||||
MOCS_ENTRY(I915_MOCS_CACHED,
|
||||
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
|
||||
L3_3_WB)
|
||||
L3_3_WB),
|
||||
|
||||
/*
|
||||
* mocs:63
|
||||
* - used by the L3 for all of its evictions.
|
||||
* Thus it is expected to allow LLC cacheability to enable coherent
|
||||
* flows to be maintained.
|
||||
* - used to force L3 uncachable cycles.
|
||||
* Thus it is expected to make the surface L3 uncacheable.
|
||||
*/
|
||||
MOCS_ENTRY(63,
|
||||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
|
||||
L3_1_UC)
|
||||
};
|
||||
|
||||
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
|
||||
|
@ -414,6 +414,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
|
||||
dma->chan_using = NULL;
|
||||
}
|
||||
|
||||
static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
|
||||
{
|
||||
unsigned int temp;
|
||||
|
||||
/*
|
||||
* i2sr_clr_opcode is the value to clear all interrupts. Here we want to
|
||||
* clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
|
||||
* toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
|
||||
*/
|
||||
temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
}
|
||||
|
||||
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
|
||||
{
|
||||
unsigned long orig_jiffies = jiffies;
|
||||
@ -426,8 +439,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
|
||||
|
||||
/* check for arbitration lost */
|
||||
if (temp & I2SR_IAL) {
|
||||
temp &= ~I2SR_IAL;
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -458,6 +470,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* check for arbitration lost */
|
||||
if (i2c_imx->i2csr & I2SR_IAL) {
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||
|
||||
i2c_imx->i2csr = 0;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
|
||||
i2c_imx->i2csr = 0;
|
||||
return 0;
|
||||
@ -567,6 +589,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
|
||||
/* Stop I2C transaction */
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
if (i2c_imx->dma)
|
||||
temp &= ~I2CR_DMAEN;
|
||||
@ -597,9 +621,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
|
||||
if (temp & I2SR_IIF) {
|
||||
/* save status register */
|
||||
i2c_imx->i2csr = temp;
|
||||
temp &= ~I2SR_IIF;
|
||||
temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
|
||||
wake_up(&i2c_imx->queue);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -732,9 +754,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
|
||||
*/
|
||||
dev_dbg(dev, "<%s> clear MSTA\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
|
||||
i2c_imx_bus_busy(i2c_imx, 0);
|
||||
if (!i2c_imx->stopped)
|
||||
i2c_imx_bus_busy(i2c_imx, 0);
|
||||
} else {
|
||||
/*
|
||||
* For i2c master receiver repeat restart operation like:
|
||||
@ -857,9 +882,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
|
||||
dev_dbg(&i2c_imx->adapter.dev,
|
||||
"<%s> clear MSTA\n", __func__);
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
|
||||
if (!(temp & I2CR_MSTA))
|
||||
i2c_imx->stopped = 1;
|
||||
temp &= ~(I2CR_MSTA | I2CR_MTX);
|
||||
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
|
||||
i2c_imx_bus_busy(i2c_imx, 0);
|
||||
if (!i2c_imx->stopped)
|
||||
i2c_imx_bus_busy(i2c_imx, 0);
|
||||
} else {
|
||||
/*
|
||||
* For i2c master receiver repeat restart operation like:
|
||||
|
@ -806,7 +806,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
|
||||
if (ret || qup->bus_err || qup->qup_err) {
|
||||
reinit_completion(&qup->xfer);
|
||||
|
||||
if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
|
||||
ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
|
||||
if (ret) {
|
||||
dev_err(qup->dev, "change to run state timed out");
|
||||
goto desc_err;
|
||||
}
|
||||
|
@ -1468,7 +1468,8 @@ static int __init i8042_setup_aux(void)
|
||||
if (error)
|
||||
goto err_free_ports;
|
||||
|
||||
if (aux_enable())
|
||||
error = aux_enable();
|
||||
if (error)
|
||||
goto err_free_irq;
|
||||
|
||||
i8042_aux_irq_registered = true;
|
||||
|
@ -254,7 +254,7 @@
|
||||
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
|
||||
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
|
||||
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
|
||||
#define DTE_IRQ_TABLE_LEN (8ULL << 1)
|
||||
#define DTE_IRQ_TABLE_LEN (9ULL << 1)
|
||||
#define DTE_IRQ_REMAP_ENABLE 1ULL
|
||||
|
||||
#define PAGE_MODE_NONE 0x00
|
||||
|
@ -316,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
#else
|
||||
static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
{
|
||||
BUG();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1889,7 +1889,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
struct wc_memory_superblock s;
|
||||
|
||||
static struct dm_arg _args[] = {
|
||||
{0, 10, "Invalid number of feature args"},
|
||||
{0, 16, "Invalid number of feature args"},
|
||||
};
|
||||
|
||||
as.argc = argc;
|
||||
|
@ -491,8 +491,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
return -EAGAIN;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
if (!map)
|
||||
return -EIO;
|
||||
if (!map) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
struct dm_target *tgt;
|
||||
@ -521,7 +523,6 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
|
||||
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
struct block_device **bdev)
|
||||
__acquires(md->io_barrier)
|
||||
{
|
||||
struct dm_target *tgt;
|
||||
struct dm_table *map;
|
||||
@ -555,7 +556,6 @@ static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
}
|
||||
|
||||
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
|
||||
__releases(md->io_barrier)
|
||||
{
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
}
|
||||
|
@ -254,21 +254,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
|
||||
/* Ignore packet loops (and multicast echo) */
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
|
||||
goto rx_error;
|
||||
|
||||
switch (skb_protocol(skb, true)) {
|
||||
case htons(ETH_P_IP):
|
||||
if (pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto rx_error;
|
||||
break;
|
||||
default:
|
||||
goto rx_error;
|
||||
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
|
||||
geneve->dev->stats.rx_errors++;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
oiph = skb_network_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
@ -309,8 +299,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
}
|
||||
return;
|
||||
rx_error:
|
||||
geneve->dev->stats.rx_errors++;
|
||||
drop:
|
||||
/* Consume bad packet */
|
||||
kfree_skb(skb);
|
||||
|
@ -146,6 +146,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size,
|
||||
{
|
||||
int tmp_len;
|
||||
|
||||
memset(tmp, 0, size);
|
||||
|
||||
if (count < num)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -811,6 +811,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
|
||||
pm_runtime_put(&vg->pdev->dev);
|
||||
}
|
||||
|
||||
static void byt_gpio_direct_irq_check(struct byt_gpio *vg,
|
||||
unsigned int offset)
|
||||
{
|
||||
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
|
||||
|
||||
/*
|
||||
* Before making any direction modifications, do a check if gpio is set
|
||||
* for direct IRQ. On Bay Trail, setting GPIO to output does not make
|
||||
* sense, so let's at least inform the caller before they shoot
|
||||
* themselves in the foot.
|
||||
*/
|
||||
if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
|
||||
dev_info_once(&vg->pdev->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
|
||||
}
|
||||
|
||||
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
|
||||
struct pinctrl_gpio_range *range,
|
||||
unsigned int offset,
|
||||
@ -818,7 +833,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
|
||||
{
|
||||
struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
|
||||
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
|
||||
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
|
||||
unsigned long flags;
|
||||
u32 value;
|
||||
|
||||
@ -829,14 +843,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
|
||||
if (input)
|
||||
value |= BYT_OUTPUT_EN;
|
||||
else
|
||||
/*
|
||||
* Before making any direction modifications, do a check if gpio
|
||||
* is set for direct IRQ. On baytrail, setting GPIO to output
|
||||
* does not make sense, so let's at least warn the caller before
|
||||
* they shoot themselves in the foot.
|
||||
*/
|
||||
WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
|
||||
"Potential Error: Setting GPIO with direct_irq_en to output");
|
||||
byt_gpio_direct_irq_check(vg, offset);
|
||||
|
||||
writel(value, val_reg);
|
||||
|
||||
raw_spin_unlock_irqrestore(&byt_lock, flags);
|
||||
@ -1176,19 +1184,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
|
||||
|
||||
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
|
||||
{
|
||||
return pinctrl_gpio_direction_input(chip->base + offset);
|
||||
struct byt_gpio *vg = gpiochip_get_data(chip);
|
||||
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
raw_spin_lock_irqsave(&byt_lock, flags);
|
||||
|
||||
reg = readl(val_reg);
|
||||
reg &= ~BYT_DIR_MASK;
|
||||
reg |= BYT_OUTPUT_EN;
|
||||
writel(reg, val_reg);
|
||||
|
||||
raw_spin_unlock_irqrestore(&byt_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note despite the temptation this MUST NOT be converted into a call to
|
||||
* pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
|
||||
* MUST be done as a single BYT_VAL_REG register write.
|
||||
* See the commit message of the commit adding this comment for details.
|
||||
*/
|
||||
static int byt_gpio_direction_output(struct gpio_chip *chip,
|
||||
unsigned int offset, int value)
|
||||
{
|
||||
int ret = pinctrl_gpio_direction_output(chip->base + offset);
|
||||
struct byt_gpio *vg = gpiochip_get_data(chip);
|
||||
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
raw_spin_lock_irqsave(&byt_lock, flags);
|
||||
|
||||
byt_gpio_set(chip, offset, value);
|
||||
byt_gpio_direct_irq_check(vg, offset);
|
||||
|
||||
reg = readl(val_reg);
|
||||
reg &= ~BYT_DIR_MASK;
|
||||
if (value)
|
||||
reg |= BYT_LEVEL;
|
||||
else
|
||||
reg &= ~BYT_LEVEL;
|
||||
|
||||
writel(reg, val_reg);
|
||||
|
||||
raw_spin_unlock_irqrestore(&byt_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -650,7 +650,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
|
||||
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
|
||||
struct _pcie_device *pcie_device = NULL;
|
||||
u16 smid;
|
||||
u8 timeout;
|
||||
unsigned long timeout;
|
||||
u8 issue_reset;
|
||||
u32 sz, sz_arg;
|
||||
void *psge;
|
||||
|
@ -1310,21 +1310,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
||||
dev_name(&pdev->dev), ctlr);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
|
||||
goto out_clk_disable;
|
||||
goto out_dma_release;
|
||||
}
|
||||
|
||||
err = spi_register_controller(ctlr);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
|
||||
err);
|
||||
goto out_clk_disable;
|
||||
goto out_dma_release;
|
||||
}
|
||||
|
||||
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
|
||||
|
||||
return 0;
|
||||
|
||||
out_clk_disable:
|
||||
out_dma_release:
|
||||
bcm2835_dma_release(ctlr, bs);
|
||||
clk_disable_unprepare(bs->clk);
|
||||
return err;
|
||||
}
|
||||
|
@ -47,27 +47,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
|
||||
{
|
||||
struct spk_ldisc_data *ldisc_data;
|
||||
|
||||
if (tty != speakup_tty)
|
||||
/* Somebody tried to use this line discipline outside speakup */
|
||||
return -ENODEV;
|
||||
|
||||
if (!tty->ops->write)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&speakup_tty_mutex);
|
||||
if (speakup_tty) {
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
speakup_tty = tty;
|
||||
|
||||
ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
|
||||
if (!ldisc_data) {
|
||||
speakup_tty = NULL;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
if (!ldisc_data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_completion(&ldisc_data->completion);
|
||||
ldisc_data->buf_free = true;
|
||||
speakup_tty->disc_data = ldisc_data;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
tty->disc_data = ldisc_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -189,9 +182,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
|
||||
|
||||
tty_unlock(tty);
|
||||
|
||||
mutex_lock(&speakup_tty_mutex);
|
||||
speakup_tty = tty;
|
||||
ret = tty_set_ldisc(tty, N_SPEAKUP);
|
||||
if (ret)
|
||||
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
|
||||
speakup_tty = NULL;
|
||||
mutex_unlock(&speakup_tty_mutex);
|
||||
|
||||
if (!ret)
|
||||
/* Success */
|
||||
return 0;
|
||||
|
||||
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
|
||||
|
||||
tty_lock(tty);
|
||||
if (tty->ops->close)
|
||||
tty->ops->close(tty, NULL);
|
||||
tty_unlock(tty);
|
||||
|
||||
tty_kclose(tty);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1919,7 +1919,9 @@ static int complete_rpm(struct device *dev, void *data)
|
||||
|
||||
static void remove_unplugged_switch(struct tb_switch *sw)
|
||||
{
|
||||
pm_runtime_get_sync(sw->dev.parent);
|
||||
struct device *parent = get_device(sw->dev.parent);
|
||||
|
||||
pm_runtime_get_sync(parent);
|
||||
|
||||
/*
|
||||
* Signal this and switches below for rpm_complete because
|
||||
@ -1930,8 +1932,10 @@ static void remove_unplugged_switch(struct tb_switch *sw)
|
||||
bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
|
||||
tb_switch_remove(sw);
|
||||
|
||||
pm_runtime_mark_last_busy(sw->dev.parent);
|
||||
pm_runtime_put_autosuspend(sw->dev.parent);
|
||||
pm_runtime_mark_last_busy(parent);
|
||||
pm_runtime_put_autosuspend(parent);
|
||||
|
||||
put_device(parent);
|
||||
}
|
||||
|
||||
static void icm_free_unplugged_children(struct tb_switch *sw)
|
||||
|
@ -2894,10 +2894,14 @@ void __do_SAK(struct tty_struct *tty)
|
||||
struct task_struct *g, *p;
|
||||
struct pid *session;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (!tty)
|
||||
return;
|
||||
session = tty->session;
|
||||
|
||||
spin_lock_irqsave(&tty->ctrl_lock, flags);
|
||||
session = get_pid(tty->session);
|
||||
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
|
||||
|
||||
tty_ldisc_flush(tty);
|
||||
|
||||
@ -2929,6 +2933,7 @@ void __do_SAK(struct tty_struct *tty)
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
put_pid(session);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty)
|
||||
put_pid(tty->session);
|
||||
put_pid(tty->pgrp);
|
||||
tty->pgrp = get_pid(task_pgrp(current));
|
||||
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
|
||||
tty->session = get_pid(task_session(current));
|
||||
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
|
||||
if (current->signal->tty) {
|
||||
tty_debug(tty, "current tty %s not NULL!!\n",
|
||||
current->signal->tty->name);
|
||||
@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit)
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
put_pid(current->signal->tty_old_pgrp);
|
||||
current->signal->tty_old_pgrp = NULL;
|
||||
|
||||
tty = tty_kref_get(current->signal->tty);
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (tty) {
|
||||
unsigned long flags;
|
||||
|
||||
tty_lock(tty);
|
||||
spin_lock_irqsave(&tty->ctrl_lock, flags);
|
||||
put_pid(tty->session);
|
||||
put_pid(tty->pgrp);
|
||||
tty->session = NULL;
|
||||
tty->pgrp = NULL;
|
||||
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
|
||||
tty_unlock(tty);
|
||||
tty_kref_put(tty);
|
||||
}
|
||||
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
/* Now clear signal->tty under the lock */
|
||||
read_lock(&tasklist_lock);
|
||||
session_clear_tty(task_session(current));
|
||||
@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
|
||||
return -ENOTTY;
|
||||
if (retval)
|
||||
return retval;
|
||||
if (!current->signal->tty ||
|
||||
(current->signal->tty != real_tty) ||
|
||||
(real_tty->session != task_session(current)))
|
||||
return -ENOTTY;
|
||||
|
||||
if (get_user(pgrp_nr, p))
|
||||
return -EFAULT;
|
||||
if (pgrp_nr < 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&real_tty->ctrl_lock);
|
||||
if (!current->signal->tty ||
|
||||
(current->signal->tty != real_tty) ||
|
||||
(real_tty->session != task_session(current))) {
|
||||
retval = -ENOTTY;
|
||||
goto out_unlock_ctrl;
|
||||
}
|
||||
rcu_read_lock();
|
||||
pgrp = find_vpid(pgrp_nr);
|
||||
retval = -ESRCH;
|
||||
@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
|
||||
if (session_of_pgrp(pgrp) != task_session(current))
|
||||
goto out_unlock;
|
||||
retval = 0;
|
||||
spin_lock_irq(&tty->ctrl_lock);
|
||||
put_pid(real_tty->pgrp);
|
||||
real_tty->pgrp = get_pid(pgrp);
|
||||
spin_unlock_irq(&tty->ctrl_lock);
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out_unlock_ctrl:
|
||||
spin_unlock_irq(&real_tty->ctrl_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -511,20 +519,30 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
|
||||
*
|
||||
* Obtain the session id of the tty. If there is no session
|
||||
* return an error.
|
||||
*
|
||||
* Locking: none. Reference to current->signal->tty is safe.
|
||||
*/
|
||||
static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
pid_t sid;
|
||||
|
||||
/*
|
||||
* (tty == real_tty) is a cheap way of
|
||||
* testing if the tty is NOT a master pty.
|
||||
*/
|
||||
if (tty == real_tty && current->signal->tty != real_tty)
|
||||
return -ENOTTY;
|
||||
|
||||
spin_lock_irqsave(&real_tty->ctrl_lock, flags);
|
||||
if (!real_tty->session)
|
||||
return -ENOTTY;
|
||||
return put_user(pid_vnr(real_tty->session), p);
|
||||
goto err;
|
||||
sid = pid_vnr(real_tty->session);
|
||||
spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
|
||||
|
||||
return put_user(sid, p);
|
||||
|
||||
err:
|
||||
spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1328,7 +1328,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
|
||||
case FUNCTIONFS_ENDPOINT_DESC:
|
||||
{
|
||||
int desc_idx;
|
||||
struct usb_endpoint_descriptor *desc;
|
||||
struct usb_endpoint_descriptor desc1, *desc;
|
||||
|
||||
switch (epfile->ffs->gadget->speed) {
|
||||
case USB_SPEED_SUPER:
|
||||
@ -1340,10 +1340,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
|
||||
default:
|
||||
desc_idx = 0;
|
||||
}
|
||||
|
||||
desc = epfile->ep->descs[desc_idx];
|
||||
memcpy(&desc1, desc, desc->bLength);
|
||||
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
ret = copy_to_user((void __user *)value, desc, desc->bLength);
|
||||
ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
return ret;
|
||||
|
@ -80,10 +80,11 @@
|
||||
#define CH341_LCR_CS5 0x00
|
||||
|
||||
static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(0x4348, 0x5523) },
|
||||
{ USB_DEVICE(0x1a86, 0x5512) },
|
||||
{ USB_DEVICE(0x1a86, 0x5523) },
|
||||
{ USB_DEVICE(0x1a86, 0x7522) },
|
||||
{ USB_DEVICE(0x1a86, 0x7523) },
|
||||
{ USB_DEVICE(0x1a86, 0x5523) },
|
||||
{ USB_DEVICE(0x4348, 0x5523) },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, id_table);
|
||||
|
@ -276,12 +276,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
|
||||
priv->cfg.unknown2 = cfg->unknown2;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
kfree(cfg);
|
||||
|
||||
/* READ_ON and urb submission */
|
||||
rc = usb_serial_generic_open(tty, port);
|
||||
if (rc) {
|
||||
retval = rc;
|
||||
goto err_free_cfg;
|
||||
}
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = usb_control_msg(port->serial->dev,
|
||||
usb_sndctrlpipe(port->serial->dev, 0),
|
||||
@ -324,8 +324,6 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
|
||||
KLSI_TIMEOUT);
|
||||
err_generic_close:
|
||||
usb_serial_generic_close(port);
|
||||
err_free_cfg:
|
||||
kfree(cfg);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define CINTERION_PRODUCT_PH8 0x0053
|
||||
#define CINTERION_PRODUCT_AHXX 0x0055
|
||||
#define CINTERION_PRODUCT_PLXX 0x0060
|
||||
#define CINTERION_PRODUCT_EXS82 0x006c
|
||||
#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
|
||||
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
|
||||
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
|
||||
@ -1105,9 +1106,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
|
||||
.driver_info = NUMEP2 },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
|
||||
.driver_info = NUMEP2 },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
|
||||
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
|
||||
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
|
||||
@ -1902,6 +1902,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
|
||||
.driver_info = RSVD(0) | RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) },
|
||||
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
|
||||
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
|
||||
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
|
||||
@ -2046,12 +2047,13 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
|
||||
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
|
||||
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
|
||||
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
|
||||
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
|
||||
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
|
||||
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(4) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(6) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
|
||||
|
@ -975,6 +975,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||
list_del_init(&server->tcp_ses_list);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
cancel_delayed_work_sync(&server->echo);
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
server->tcpStatus = CifsExiting;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
@ -340,8 +340,8 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
||||
return -EAGAIN;
|
||||
|
||||
if (signal_pending(current)) {
|
||||
cifs_dbg(FYI, "signal is pending before sending any data\n");
|
||||
return -EINTR;
|
||||
cifs_dbg(FYI, "signal pending before send request\n");
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
/* cork the socket */
|
||||
|
@ -224,7 +224,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
|
||||
*/
|
||||
if (ispipe) {
|
||||
if (isspace(*pat_ptr)) {
|
||||
was_space = true;
|
||||
if (cn->used != 0)
|
||||
was_space = true;
|
||||
pat_ptr++;
|
||||
continue;
|
||||
} else if (was_space) {
|
||||
|
@ -1008,6 +1008,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
|
||||
fs_err(sdp, "no resource groups found in the file system.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
set_rgrp_preferences(sdp);
|
||||
|
||||
sdp->sd_rindex_uptodate = 1;
|
||||
|
@ -389,11 +389,19 @@ extern void irq_domain_associate_many(struct irq_domain *domain,
|
||||
extern void irq_domain_disassociate(struct irq_domain *domain,
|
||||
unsigned int irq);
|
||||
|
||||
extern unsigned int irq_create_mapping(struct irq_domain *host,
|
||||
irq_hw_number_t hwirq);
|
||||
extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
|
||||
irq_hw_number_t hwirq,
|
||||
const struct irq_affinity_desc *affinity);
|
||||
extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
|
||||
extern void irq_dispose_mapping(unsigned int virq);
|
||||
|
||||
static inline unsigned int irq_create_mapping(struct irq_domain *host,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
return irq_create_mapping_affinity(host, hwirq, NULL);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* irq_linear_revmap() - Find a linux irq from a hw irq number.
|
||||
* @domain: domain owning this hardware interrupt
|
||||
|
@ -306,6 +306,10 @@ struct tty_struct {
|
||||
struct termiox *termiox; /* May be NULL for unsupported */
|
||||
char name[64];
|
||||
struct pid *pgrp; /* Protected by ctrl lock */
|
||||
/*
|
||||
* Writes protected by both ctrl lock and legacy mutex, readers must use
|
||||
* at least one of them.
|
||||
*/
|
||||
struct pid *session;
|
||||
unsigned long flags;
|
||||
int count;
|
||||
|
@ -37,6 +37,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
|
||||
|
||||
struct nft_flow_key {
|
||||
struct flow_dissector_key_basic basic;
|
||||
struct flow_dissector_key_control control;
|
||||
union {
|
||||
struct flow_dissector_key_ipv4_addrs ipv4;
|
||||
struct flow_dissector_key_ipv6_addrs ipv6;
|
||||
@ -61,6 +62,9 @@ struct nft_flow_rule {
|
||||
|
||||
#define NFT_OFFLOAD_F_ACTION (1 << 0)
|
||||
|
||||
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
|
||||
enum flow_dissector_key_id addr_type);
|
||||
|
||||
struct nft_rule;
|
||||
struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
|
||||
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
|
||||
|
@ -639,17 +639,19 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
|
||||
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
|
||||
|
||||
/**
|
||||
* irq_create_mapping() - Map a hardware interrupt into linux irq space
|
||||
* irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
|
||||
* @domain: domain owning this hardware interrupt or NULL for default domain
|
||||
* @hwirq: hardware irq number in that domain space
|
||||
* @affinity: irq affinity
|
||||
*
|
||||
* Only one mapping per hardware interrupt is permitted. Returns a linux
|
||||
* irq number.
|
||||
* If the sense/trigger is to be specified, set_irq_type() should be called
|
||||
* on the number returned from that call.
|
||||
*/
|
||||
unsigned int irq_create_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq)
|
||||
unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq,
|
||||
const struct irq_affinity_desc *affinity)
|
||||
{
|
||||
struct device_node *of_node;
|
||||
int virq;
|
||||
@ -675,7 +677,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
|
||||
}
|
||||
|
||||
/* Allocate a virtual interrupt number */
|
||||
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
|
||||
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
|
||||
affinity);
|
||||
if (virq <= 0) {
|
||||
pr_debug("-> virq allocation failed\n");
|
||||
return 0;
|
||||
@ -691,7 +694,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
|
||||
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_mapping);
|
||||
EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
|
||||
|
||||
/**
|
||||
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
|
||||
|
@ -1626,6 +1626,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
|
||||
|
||||
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
@ -1763,7 +1765,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
* to it.
|
||||
*/
|
||||
if (ftrace_rec_count(rec) == 1 &&
|
||||
ftrace_find_tramp_ops_any(rec))
|
||||
ftrace_find_tramp_ops_any_other(rec, ops))
|
||||
rec->flags |= FTRACE_FL_TRAMP;
|
||||
else
|
||||
rec->flags &= ~FTRACE_FL_TRAMP;
|
||||
@ -2191,6 +2193,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
|
||||
{
|
||||
struct ftrace_ops *op;
|
||||
unsigned long ip = rec->ip;
|
||||
|
||||
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
||||
|
||||
if (op == op_exclude || !op->trampoline)
|
||||
continue;
|
||||
|
||||
if (hash_contains_ip(ip, op->func_hash))
|
||||
return op;
|
||||
} while_for_each_ftrace_op(op);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
|
||||
struct ftrace_ops *op)
|
||||
|
@ -160,7 +160,8 @@ static union trace_eval_map_item *trace_eval_maps;
|
||||
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
|
||||
|
||||
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
|
||||
static void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||
static void ftrace_trace_userstack(struct trace_array *tr,
|
||||
struct ring_buffer *buffer,
|
||||
unsigned long flags, int pc);
|
||||
|
||||
#define MAX_TRACER_SIZE 100
|
||||
@ -2621,7 +2622,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
|
||||
* two. They are not that meaningful.
|
||||
*/
|
||||
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
|
||||
ftrace_trace_userstack(buffer, flags, pc);
|
||||
ftrace_trace_userstack(tr, buffer, flags, pc);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2936,13 +2937,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
|
||||
static DEFINE_PER_CPU(int, user_stack_count);
|
||||
|
||||
static void
|
||||
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
ftrace_trace_userstack(struct trace_array *tr,
|
||||
struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
{
|
||||
struct trace_event_call *call = &event_user_stack;
|
||||
struct ring_buffer_event *event;
|
||||
struct userstack_entry *entry;
|
||||
|
||||
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
|
||||
if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -2981,7 +2983,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||
preempt_enable();
|
||||
}
|
||||
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
|
||||
static void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||
static void ftrace_trace_userstack(struct trace_array *tr,
|
||||
struct ring_buffer *buffer,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
static int collect_syscall(struct task_struct *target, struct syscall_info *info)
|
||||
{
|
||||
unsigned long args[6] = { };
|
||||
struct pt_regs *regs;
|
||||
|
||||
if (!try_get_task_stack(target)) {
|
||||
@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
|
||||
|
||||
info->data.nr = syscall_get_nr(target, regs);
|
||||
if (info->data.nr != -1L)
|
||||
syscall_get_arguments(target, regs,
|
||||
(unsigned long *)&info->data.args[0]);
|
||||
syscall_get_arguments(target, regs, args);
|
||||
|
||||
info->data.args[0] = args[0];
|
||||
info->data.args[1] = args[1];
|
||||
info->data.args[2] = args[2];
|
||||
info->data.args[3] = args[3];
|
||||
info->data.args[4] = args[4];
|
||||
info->data.args[5] = args[5];
|
||||
|
||||
put_task_stack(target);
|
||||
return 0;
|
||||
|
@ -544,7 +544,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
|
||||
struct list_lru_node *nlru = &lru->node[nid];
|
||||
int dst_idx = dst_memcg->kmemcg_id;
|
||||
struct list_lru_one *src, *dst;
|
||||
bool set;
|
||||
|
||||
/*
|
||||
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
|
||||
@ -556,11 +555,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
|
||||
dst = list_lru_from_memcg_idx(nlru, dst_idx);
|
||||
|
||||
list_splice_init(&src->list, &dst->list);
|
||||
set = (!dst->nr_items && src->nr_items);
|
||||
dst->nr_items += src->nr_items;
|
||||
if (set)
|
||||
|
||||
if (src->nr_items) {
|
||||
dst->nr_items += src->nr_items;
|
||||
memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
|
||||
src->nr_items = 0;
|
||||
src->nr_items = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&nlru->lock);
|
||||
}
|
||||
|
@ -2824,6 +2824,7 @@ late_initcall(max_swapfiles_check);
|
||||
static struct swap_info_struct *alloc_swap_info(void)
|
||||
{
|
||||
struct swap_info_struct *p;
|
||||
struct swap_info_struct *defer = NULL;
|
||||
unsigned int type;
|
||||
int i;
|
||||
|
||||
@ -2852,7 +2853,7 @@ static struct swap_info_struct *alloc_swap_info(void)
|
||||
smp_wmb();
|
||||
WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
|
||||
} else {
|
||||
kvfree(p);
|
||||
defer = p;
|
||||
p = swap_info[type];
|
||||
/*
|
||||
* Do not memset this entry: a racing procfs swap_next()
|
||||
@ -2865,6 +2866,7 @@ static struct swap_info_struct *alloc_swap_info(void)
|
||||
plist_node_init(&p->avail_lists[i], 0);
|
||||
p->flags = SWP_USED;
|
||||
spin_unlock(&swap_lock);
|
||||
kvfree(defer);
|
||||
spin_lock_init(&p->lock);
|
||||
spin_lock_init(&p->cont_lock);
|
||||
|
||||
|
@ -539,10 +539,13 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
|
||||
|
||||
/* Check for bugs in CAN protocol implementations using af_can.c:
|
||||
* 'rcv' will be NULL if no matching list item was found for removal.
|
||||
* As this case may potentially happen when closing a socket while
|
||||
* the notifier for removing the CAN netdev is running we just print
|
||||
* a warning here.
|
||||
*/
|
||||
if (!rcv) {
|
||||
WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n",
|
||||
DNAME(dev), can_id, mask);
|
||||
pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
|
||||
DNAME(dev), can_id, mask);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -285,8 +285,7 @@ flag_nested(const struct nlattr *nla)
|
||||
|
||||
static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
|
||||
[IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
|
||||
[IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
|
||||
.len = sizeof(struct in6_addr) },
|
||||
[IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -560,7 +560,8 @@ static int nft_request_module(struct net *net, const char *fmt, ...)
|
||||
static void lockdep_nfnl_nft_mutex_not_held(void)
|
||||
{
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
|
||||
if (debug_locks)
|
||||
WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,23 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
|
||||
return flow;
|
||||
}
|
||||
|
||||
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
|
||||
enum flow_dissector_key_id addr_type)
|
||||
{
|
||||
struct nft_flow_match *match = &flow->match;
|
||||
struct nft_flow_key *mask = &match->mask;
|
||||
struct nft_flow_key *key = &match->key;
|
||||
|
||||
if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
|
||||
return;
|
||||
|
||||
key->control.addr_type = addr_type;
|
||||
mask->control.addr_type = 0xffff;
|
||||
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
|
||||
match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
|
||||
offsetof(struct nft_flow_key, control);
|
||||
}
|
||||
|
||||
struct nft_flow_rule *nft_flow_rule_create(struct net *net,
|
||||
const struct nft_rule *rule)
|
||||
{
|
||||
|
@ -197,6 +197,7 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
|
||||
sizeof(struct in_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
|
||||
break;
|
||||
case offsetof(struct iphdr, daddr):
|
||||
if (priv->len != sizeof(struct in_addr))
|
||||
@ -204,6 +205,7 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
|
||||
sizeof(struct in_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
|
||||
break;
|
||||
case offsetof(struct iphdr, protocol):
|
||||
if (priv->len != sizeof(__u8))
|
||||
@ -233,6 +235,7 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
|
||||
sizeof(struct in6_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, daddr):
|
||||
if (priv->len != sizeof(struct in6_addr))
|
||||
@ -240,6 +243,7 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
|
||||
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
|
||||
sizeof(struct in6_addr), reg);
|
||||
nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, nexthdr):
|
||||
if (priv->len != sizeof(__u8))
|
||||
|
@ -59,6 +59,7 @@ static int __net_init tipc_init_net(struct net *net)
|
||||
tn->trial_addr = 0;
|
||||
tn->addr_trial_end = 0;
|
||||
tn->capabilities = TIPC_NODE_CAPABILITIES;
|
||||
INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
|
||||
memset(tn->node_id, 0, sizeof(tn->node_id));
|
||||
memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
|
||||
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
|
||||
@ -96,13 +97,13 @@ static int __net_init tipc_init_net(struct net *net)
|
||||
|
||||
static void __net_exit tipc_exit_net(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = tipc_net(net);
|
||||
|
||||
tipc_detach_loopback(net);
|
||||
/* Make sure the tipc_net_finalize_work() finished */
|
||||
cancel_work_sync(&tn->final_work.work);
|
||||
tipc_net_stop(net);
|
||||
|
||||
/* Make sure the tipc_net_finalize_work stopped
|
||||
* before releasing the resources.
|
||||
*/
|
||||
flush_scheduled_work();
|
||||
tipc_bcast_stop(net);
|
||||
tipc_nametbl_stop(net);
|
||||
tipc_sk_rht_destroy(net);
|
||||
|
@ -86,6 +86,12 @@ extern unsigned int tipc_net_id __read_mostly;
|
||||
extern int sysctl_tipc_rmem[3] __read_mostly;
|
||||
extern int sysctl_tipc_named_timeout __read_mostly;
|
||||
|
||||
struct tipc_net_work {
|
||||
struct work_struct work;
|
||||
struct net *net;
|
||||
u32 addr;
|
||||
};
|
||||
|
||||
struct tipc_net {
|
||||
u8 node_id[NODE_ID_LEN];
|
||||
u32 node_addr;
|
||||
@ -134,6 +140,9 @@ struct tipc_net {
|
||||
|
||||
/* Tracing of node internal messages */
|
||||
struct packet_type loopback_pt;
|
||||
|
||||
/* Work item for net finalize */
|
||||
struct tipc_net_work final_work;
|
||||
};
|
||||
|
||||
static inline struct tipc_net *tipc_net(struct net *net)
|
||||
|
@ -105,12 +105,6 @@
|
||||
* - A local spin_lock protecting the queue of subscriber events.
|
||||
*/
|
||||
|
||||
struct tipc_net_work {
|
||||
struct work_struct work;
|
||||
struct net *net;
|
||||
u32 addr;
|
||||
};
|
||||
|
||||
static void tipc_net_finalize(struct net *net, u32 addr);
|
||||
|
||||
int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
|
||||
@ -142,25 +136,21 @@ static void tipc_net_finalize(struct net *net, u32 addr)
|
||||
TIPC_CLUSTER_SCOPE, 0, addr);
|
||||
}
|
||||
|
||||
static void tipc_net_finalize_work(struct work_struct *work)
|
||||
void tipc_net_finalize_work(struct work_struct *work)
|
||||
{
|
||||
struct tipc_net_work *fwork;
|
||||
|
||||
fwork = container_of(work, struct tipc_net_work, work);
|
||||
tipc_net_finalize(fwork->net, fwork->addr);
|
||||
kfree(fwork);
|
||||
}
|
||||
|
||||
void tipc_sched_net_finalize(struct net *net, u32 addr)
|
||||
{
|
||||
struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
|
||||
struct tipc_net *tn = tipc_net(net);
|
||||
|
||||
if (!fwork)
|
||||
return;
|
||||
INIT_WORK(&fwork->work, tipc_net_finalize_work);
|
||||
fwork->net = net;
|
||||
fwork->addr = addr;
|
||||
schedule_work(&fwork->work);
|
||||
tn->final_work.net = net;
|
||||
tn->final_work.addr = addr;
|
||||
schedule_work(&tn->final_work.work);
|
||||
}
|
||||
|
||||
void tipc_net_stop(struct net *net)
|
||||
|
@ -42,6 +42,7 @@
|
||||
extern const struct nla_policy tipc_nl_net_policy[];
|
||||
|
||||
int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
|
||||
void tipc_net_finalize_work(struct work_struct *work);
|
||||
void tipc_sched_net_finalize(struct net *net, u32 addr);
|
||||
void tipc_net_stop(struct net *net);
|
||||
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
|
@ -1364,16 +1364,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
|
||||
struct nid_path *path;
|
||||
hda_nid_t pin = pins[i];
|
||||
|
||||
path = snd_hda_get_path_from_idx(codec, path_idx[i]);
|
||||
if (path) {
|
||||
badness += assign_out_path_ctls(codec, path);
|
||||
continue;
|
||||
if (!spec->obey_preferred_dacs) {
|
||||
path = snd_hda_get_path_from_idx(codec, path_idx[i]);
|
||||
if (path) {
|
||||
badness += assign_out_path_ctls(codec, path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
dacs[i] = get_preferred_dac(codec, pin);
|
||||
if (dacs[i]) {
|
||||
if (is_dac_already_used(codec, dacs[i]))
|
||||
badness += bad->shared_primary;
|
||||
} else if (spec->obey_preferred_dacs) {
|
||||
badness += BAD_NO_PRIMARY_DAC;
|
||||
}
|
||||
|
||||
if (!dacs[i])
|
||||
|
@ -236,6 +236,7 @@ struct hda_gen_spec {
|
||||
unsigned int power_down_unused:1; /* power down unused widgets */
|
||||
unsigned int dac_min_mute:1; /* minimal = mute for DACs */
|
||||
unsigned int suppress_vmaster:1; /* don't create vmaster kctls */
|
||||
unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */
|
||||
|
||||
/* other internal flags */
|
||||
unsigned int no_analog:1; /* digital I/O only */
|
||||
|
@ -436,6 +436,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
|
||||
alc_update_coef_idx(codec, 0x7, 1<<5, 0);
|
||||
break;
|
||||
case 0x10ec0892:
|
||||
case 0x10ec0897:
|
||||
alc_update_coef_idx(codec, 0x7, 1<<5, 0);
|
||||
break;
|
||||
case 0x10ec0899:
|
||||
@ -5990,6 +5991,21 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
|
||||
codec->power_save_node = 0;
|
||||
}
|
||||
|
||||
/* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */
|
||||
static void alc289_fixup_asus_ga401(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix, int action)
|
||||
{
|
||||
static const hda_nid_t preferred_pairs[] = {
|
||||
0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0
|
||||
};
|
||||
struct alc_spec *spec = codec->spec;
|
||||
|
||||
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
||||
spec->gen.preferred_dacs = preferred_pairs;
|
||||
spec->gen.obey_preferred_dacs = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
|
||||
static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix, int action)
|
||||
@ -7543,11 +7559,10 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||
.chain_id = ALC269_FIXUP_HEADSET_MIC
|
||||
},
|
||||
[ALC289_FIXUP_ASUS_GA401] = {
|
||||
.type = HDA_FIXUP_PINS,
|
||||
.v.pins = (const struct hda_pintbl[]) {
|
||||
{ 0x19, 0x03a11020 }, /* headset mic with jack detect */
|
||||
{ }
|
||||
},
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc289_fixup_asus_ga401,
|
||||
.chained = true,
|
||||
.chain_id = ALC289_FIXUP_ASUS_GA502,
|
||||
},
|
||||
[ALC289_FIXUP_ASUS_GA502] = {
|
||||
.type = HDA_FIXUP_PINS,
|
||||
@ -7671,7 +7686,7 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||
{ }
|
||||
},
|
||||
.chained = true,
|
||||
.chain_id = ALC289_FIXUP_ASUS_GA401
|
||||
.chain_id = ALC289_FIXUP_ASUS_GA502
|
||||
},
|
||||
[ALC274_FIXUP_HP_MIC] = {
|
||||
.type = HDA_FIXUP_VERBS,
|
||||
@ -7847,6 +7862,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
|
||||
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
|
||||
SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
@ -8573,6 +8589,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
||||
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
ALC292_STANDARD_PINS,
|
||||
{0x13, 0x90a60140}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE,
|
||||
{0x17, 0x90170110},
|
||||
{0x21, 0x04211020}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
|
||||
{0x14, 0x90170110},
|
||||
{0x1b, 0x90a70130},
|
||||
@ -10156,6 +10175,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
|
||||
HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
|
||||
HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662),
|
||||
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
|
||||
|
@ -1912,6 +1912,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
|
||||
mem = wm_adsp_find_region(dsp, type);
|
||||
if (!mem) {
|
||||
adsp_err(dsp, "No region of type: %x\n", type);
|
||||
ret = -EINVAL;
|
||||
goto out_fw;
|
||||
}
|
||||
|
||||
|
@ -195,6 +195,21 @@ static inline int insn_offset_immediate(struct insn *insn)
|
||||
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_insn_prefix() -- Iterate prefixes in the instruction
|
||||
* @insn: Pointer to struct insn.
|
||||
* @idx: Index storage.
|
||||
* @prefix: Prefix byte.
|
||||
*
|
||||
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
|
||||
* and the index is stored in @idx (note that this @idx is just for a cursor,
|
||||
* do not change it.)
|
||||
* Since prefixes.nbytes can be bigger than 4 if some prefixes
|
||||
* are repeated, it cannot be used for looping over the prefixes.
|
||||
*/
|
||||
#define for_each_insn_prefix(insn, idx, prefix) \
|
||||
for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
|
||||
|
||||
#define POP_SS_OPCODE 0x1f
|
||||
#define MOV_SREG_OPCODE 0x8e
|
||||
|
||||
|
@ -1,214 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "test_map_init.skel.h"
|
||||
|
||||
#define TEST_VALUE 0x1234
|
||||
#define FILL_VALUE 0xdeadbeef
|
||||
|
||||
static int nr_cpus;
|
||||
static int duration;
|
||||
|
||||
typedef unsigned long long map_key_t;
|
||||
typedef unsigned long long map_value_t;
|
||||
typedef struct {
|
||||
map_value_t v; /* padding */
|
||||
} __bpf_percpu_val_align pcpu_map_value_t;
|
||||
|
||||
|
||||
static int map_populate(int map_fd, int num)
|
||||
{
|
||||
pcpu_map_value_t value[nr_cpus];
|
||||
int i, err;
|
||||
map_key_t key;
|
||||
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
bpf_percpu(value, i) = FILL_VALUE;
|
||||
|
||||
for (key = 1; key <= num; key++) {
|
||||
err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
|
||||
if (!ASSERT_OK(err, "bpf_map_update_elem"))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
|
||||
int *map_fd, int populate)
|
||||
{
|
||||
struct test_map_init *skel;
|
||||
int err;
|
||||
|
||||
skel = test_map_init__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return NULL;
|
||||
|
||||
err = bpf_map__set_type(skel->maps.hashmap1, map_type);
|
||||
if (!ASSERT_OK(err, "bpf_map__set_type"))
|
||||
goto error;
|
||||
|
||||
err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
|
||||
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
|
||||
goto error;
|
||||
|
||||
err = test_map_init__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto error;
|
||||
|
||||
*map_fd = bpf_map__fd(skel->maps.hashmap1);
|
||||
if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
|
||||
goto error;
|
||||
|
||||
err = map_populate(*map_fd, populate);
|
||||
if (!ASSERT_OK(err, "map_populate"))
|
||||
goto error_map;
|
||||
|
||||
return skel;
|
||||
|
||||
error_map:
|
||||
close(*map_fd);
|
||||
error:
|
||||
test_map_init__destroy(skel);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* executes bpf program that updates map with key, value */
|
||||
static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
|
||||
map_value_t value)
|
||||
{
|
||||
struct test_map_init__bss *bss;
|
||||
|
||||
bss = skel->bss;
|
||||
|
||||
bss->inKey = key;
|
||||
bss->inValue = value;
|
||||
bss->inPid = getpid();
|
||||
|
||||
if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
|
||||
return -1;
|
||||
|
||||
/* Let tracepoint trigger */
|
||||
syscall(__NR_getpgid);
|
||||
|
||||
test_map_init__detach(skel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
|
||||
{
|
||||
int i, nzCnt = 0;
|
||||
map_value_t val;
|
||||
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
val = bpf_percpu(value, i);
|
||||
if (val) {
|
||||
if (CHECK(val != expected, "map value",
|
||||
"unexpected for cpu %d: 0x%llx\n", i, val))
|
||||
return -1;
|
||||
nzCnt++;
|
||||
}
|
||||
}
|
||||
|
||||
if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
|
||||
nzCnt))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Add key=1 elem with values set for all CPUs
|
||||
* Delete elem key=1
|
||||
* Run bpf prog that inserts new key=1 elem with value=0x1234
|
||||
* (bpf prog can only set value for current CPU)
|
||||
* Lookup Key=1 and check value is as expected for all CPUs:
|
||||
* value set by bpf prog for one CPU, 0 for all others
|
||||
*/
|
||||
static void test_pcpu_map_init(void)
|
||||
{
|
||||
pcpu_map_value_t value[nr_cpus];
|
||||
struct test_map_init *skel;
|
||||
int map_fd, err;
|
||||
map_key_t key;
|
||||
|
||||
/* max 1 elem in map so insertion is forced to reuse freed entry */
|
||||
skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
|
||||
if (!ASSERT_OK_PTR(skel, "prog_setup"))
|
||||
return;
|
||||
|
||||
/* delete element so the entry can be re-used*/
|
||||
key = 1;
|
||||
err = bpf_map_delete_elem(map_fd, &key);
|
||||
if (!ASSERT_OK(err, "bpf_map_delete_elem"))
|
||||
goto cleanup;
|
||||
|
||||
/* run bpf prog that inserts new elem, re-using the slot just freed */
|
||||
err = prog_run_insert_elem(skel, key, TEST_VALUE);
|
||||
if (!ASSERT_OK(err, "prog_run_insert_elem"))
|
||||
goto cleanup;
|
||||
|
||||
/* check that key=1 was re-created by bpf prog */
|
||||
err = bpf_map_lookup_elem(map_fd, &key, value);
|
||||
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
|
||||
goto cleanup;
|
||||
|
||||
/* and has expected values */
|
||||
check_values_one_cpu(value, TEST_VALUE);
|
||||
|
||||
cleanup:
|
||||
test_map_init__destroy(skel);
|
||||
}
|
||||
|
||||
/* Add key=1 and key=2 elems with values set for all CPUs
|
||||
* Run bpf prog that inserts new key=3 elem
|
||||
* (only for current cpu; other cpus should have initial value = 0)
|
||||
* Lookup Key=1 and check value is as expected for all CPUs
|
||||
*/
|
||||
static void test_pcpu_lru_map_init(void)
|
||||
{
|
||||
pcpu_map_value_t value[nr_cpus];
|
||||
struct test_map_init *skel;
|
||||
int map_fd, err;
|
||||
map_key_t key;
|
||||
|
||||
/* Set up LRU map with 2 elements, values filled for all CPUs.
|
||||
* With these 2 elements, the LRU map is full
|
||||
*/
|
||||
skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
|
||||
if (!ASSERT_OK_PTR(skel, "prog_setup"))
|
||||
return;
|
||||
|
||||
/* run bpf prog that inserts new key=3 element, re-using LRU slot */
|
||||
key = 3;
|
||||
err = prog_run_insert_elem(skel, key, TEST_VALUE);
|
||||
if (!ASSERT_OK(err, "prog_run_insert_elem"))
|
||||
goto cleanup;
|
||||
|
||||
/* check that key=3 replaced one of earlier elements */
|
||||
err = bpf_map_lookup_elem(map_fd, &key, value);
|
||||
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
|
||||
goto cleanup;
|
||||
|
||||
/* and has expected values */
|
||||
check_values_one_cpu(value, TEST_VALUE);
|
||||
|
||||
cleanup:
|
||||
test_map_init__destroy(skel);
|
||||
}
|
||||
|
||||
void test_map_init(void)
|
||||
{
|
||||
nr_cpus = bpf_num_possible_cpus();
|
||||
if (nr_cpus <= 1) {
|
||||
printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
if (test__start_subtest("pcpu_map_init"))
|
||||
test_pcpu_map_init();
|
||||
if (test__start_subtest("pcpu_lru_map_init"))
|
||||
test_pcpu_lru_map_init();
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
__u64 inKey = 0;
|
||||
__u64 inValue = 0;
|
||||
__u32 inPid = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
||||
__uint(max_entries, 2);
|
||||
__type(key, __u64);
|
||||
__type(value, __u64);
|
||||
} hashmap1 SEC(".maps");
|
||||
|
||||
|
||||
SEC("tp/syscalls/sys_enter_getpgid")
|
||||
int sysenter_getpgid(const void *ctx)
|
||||
{
|
||||
/* Just do it for once, when called from our own test prog. This
|
||||
* ensures the map value is only updated for a single CPU.
|
||||
*/
|
||||
int cur_pid = bpf_get_current_pid_tgid() >> 32;
|
||||
|
||||
if (cur_pid == inPid)
|
||||
bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user