Merge 1ebfa66638
("drm/amd/display: Add null check for top_pipe_to_program in commit_planes_for_stream") into android12-5.10-lts
Steps on the way to 5.10.227 Change-Id: I5e69192f2ecbbda523c18c2749bb1f181837e99c Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
b48eba851f
@ -4161,6 +4161,16 @@
|
||||
printk.time= Show timing data prefixed to each printk message line
|
||||
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
|
||||
|
||||
proc_mem.force_override= [KNL]
|
||||
Format: {always | ptrace | never}
|
||||
Traditionally /proc/pid/mem allows memory permissions to be
|
||||
overridden without restrictions. This option may be set to
|
||||
restrict that. Can be one of:
|
||||
- 'always': traditional behavior always allows mem overrides.
|
||||
- 'ptrace': only allow mem overrides for active ptracers.
|
||||
- 'never': never allow mem overrides.
|
||||
If not specified, default is the CONFIG_PROC_MEM_* choice.
|
||||
|
||||
processor.max_cstate= [HW,ACPI]
|
||||
Limit processor to maximum C-state
|
||||
max_cstate=9 overrides any DMI blacklist limit.
|
||||
|
@ -85,7 +85,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned long *args)
|
||||
{
|
||||
memcpy(args, ®s->bx, 6 * sizeof(args[0]));
|
||||
args[0] = regs->bx;
|
||||
args[1] = regs->cx;
|
||||
args[2] = regs->dx;
|
||||
args[3] = regs->si;
|
||||
args[4] = regs->di;
|
||||
args[5] = regs->bp;
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
|
@ -2022,7 +2022,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
|
||||
struct ioc_now *now)
|
||||
{
|
||||
struct ioc_gq *iocg;
|
||||
u64 dur, usage_pct, nr_cycles;
|
||||
u64 dur, usage_pct, nr_cycles, nr_cycles_shift;
|
||||
|
||||
/* if no debtor, reset the cycle */
|
||||
if (!nr_debtors) {
|
||||
@ -2084,10 +2084,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
|
||||
old_debt = iocg->abs_vdebt;
|
||||
old_delay = iocg->delay;
|
||||
|
||||
nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1);
|
||||
if (iocg->abs_vdebt)
|
||||
iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
|
||||
iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1;
|
||||
|
||||
if (iocg->delay)
|
||||
iocg->delay = iocg->delay >> nr_cycles ?: 1;
|
||||
iocg->delay = iocg->delay >> nr_cycles_shift ?: 1;
|
||||
|
||||
iocg_kick_waitq(iocg, true, now);
|
||||
|
||||
|
@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
|
||||
elements =
|
||||
ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
|
||||
sizeof(union acpi_object));
|
||||
if (!elements)
|
||||
return (AE_NO_MEMORY);
|
||||
|
||||
this = string;
|
||||
for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {
|
||||
|
@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
|
||||
|
||||
if (info->connection_node) {
|
||||
second_desc = info->connection_node->object;
|
||||
if (second_desc == NULL) {
|
||||
break;
|
||||
}
|
||||
if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
|
||||
status =
|
||||
acpi_ds_get_buffer_arguments(second_desc);
|
||||
|
@ -786,6 +786,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
|
||||
unsigned long tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (t->rdata)
|
||||
memset(t->rdata, 0, t->rlen);
|
||||
|
||||
/* start transaction */
|
||||
spin_lock_irqsave(&ec->lock, tmp);
|
||||
/* Enable GPE for command processing (IBF=0/OBF=1) */
|
||||
@ -822,8 +825,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
|
||||
|
||||
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
|
||||
return -EINVAL;
|
||||
if (t->rdata)
|
||||
memset(t->rdata, 0, t->rlen);
|
||||
|
||||
mutex_lock(&ec->mutex);
|
||||
if (ec->global_lock) {
|
||||
@ -850,7 +851,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
|
||||
.wdata = NULL, .rdata = &d,
|
||||
.wlen = 0, .rlen = 1};
|
||||
|
||||
return acpi_ec_transaction(ec, &t);
|
||||
return acpi_ec_transaction_unlocked(ec, &t);
|
||||
}
|
||||
|
||||
static int acpi_ec_burst_disable(struct acpi_ec *ec)
|
||||
@ -860,7 +861,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
|
||||
.wlen = 0, .rlen = 0};
|
||||
|
||||
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
|
||||
acpi_ec_transaction(ec, &t) : 0;
|
||||
acpi_ec_transaction_unlocked(ec, &t) : 0;
|
||||
}
|
||||
|
||||
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
|
||||
@ -876,6 +877,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
|
||||
{
|
||||
int result;
|
||||
u8 d;
|
||||
struct transaction t = {.command = ACPI_EC_COMMAND_READ,
|
||||
.wdata = &address, .rdata = &d,
|
||||
.wlen = 1, .rlen = 1};
|
||||
|
||||
result = acpi_ec_transaction_unlocked(ec, &t);
|
||||
*data = d;
|
||||
return result;
|
||||
}
|
||||
|
||||
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
|
||||
{
|
||||
u8 wdata[2] = { address, data };
|
||||
@ -886,6 +900,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
|
||||
return acpi_ec_transaction(ec, &t);
|
||||
}
|
||||
|
||||
static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
|
||||
{
|
||||
u8 wdata[2] = { address, data };
|
||||
struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
|
||||
.wdata = wdata, .rdata = NULL,
|
||||
.wlen = 2, .rlen = 0};
|
||||
|
||||
return acpi_ec_transaction_unlocked(ec, &t);
|
||||
}
|
||||
|
||||
int ec_read(u8 addr, u8 *val)
|
||||
{
|
||||
int err;
|
||||
@ -1306,6 +1330,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
||||
struct acpi_ec *ec = handler_context;
|
||||
int result = 0, i, bytes = bits / 8;
|
||||
u8 *value = (u8 *)value64;
|
||||
u32 glk;
|
||||
|
||||
if ((address > 0xFF) || !value || !handler_context)
|
||||
return AE_BAD_PARAMETER;
|
||||
@ -1313,13 +1338,25 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
||||
if (function != ACPI_READ && function != ACPI_WRITE)
|
||||
return AE_BAD_PARAMETER;
|
||||
|
||||
mutex_lock(&ec->mutex);
|
||||
|
||||
if (ec->global_lock) {
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
result = -ENODEV;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_enable(ec);
|
||||
|
||||
for (i = 0; i < bytes; ++i, ++address, ++value) {
|
||||
result = (function == ACPI_READ) ?
|
||||
acpi_ec_read(ec, address, value) :
|
||||
acpi_ec_write(ec, address, *value);
|
||||
acpi_ec_read_unlocked(ec, address, value) :
|
||||
acpi_ec_write_unlocked(ec, address, *value);
|
||||
if (result < 0)
|
||||
break;
|
||||
}
|
||||
@ -1327,6 +1364,12 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
||||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_disable(ec);
|
||||
|
||||
if (ec->global_lock)
|
||||
acpi_release_global_lock(glk);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ec->mutex);
|
||||
|
||||
switch (result) {
|
||||
case -EINVAL:
|
||||
return AE_BAD_PARAMETER;
|
||||
|
@ -2614,7 +2614,8 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
|
||||
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
if (top_pipe_to_program &&
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
|
@ -206,6 +206,13 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
|
||||
u32 smr;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* MSM8998 LPASS SMMU reports 13 context banks, but accessing
|
||||
* the last context bank crashes the system.
|
||||
*/
|
||||
if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && smmu->num_context_banks == 13)
|
||||
smmu->num_context_banks = 12;
|
||||
|
||||
/*
|
||||
* Some platforms support more than the Arm SMMU architected maximum of
|
||||
* 128 stream matching groups. For unknown reasons, the additional
|
||||
|
@ -1193,9 +1193,7 @@ static void free_iommu(struct intel_iommu *iommu)
|
||||
*/
|
||||
static inline void reclaim_free_desc(struct q_inval *qi)
|
||||
{
|
||||
while (qi->desc_status[qi->free_tail] == QI_DONE ||
|
||||
qi->desc_status[qi->free_tail] == QI_ABORT) {
|
||||
qi->desc_status[qi->free_tail] = QI_FREE;
|
||||
while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
|
||||
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
|
||||
qi->free_cnt++;
|
||||
}
|
||||
@ -1350,8 +1348,16 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
|
||||
raw_spin_lock(&qi->q_lock);
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
|
||||
/*
|
||||
* The reclaim code can free descriptors from multiple submissions
|
||||
* starting from the tail of the queue. When count == 0, the
|
||||
* status of the standalone wait descriptor at the tail of the queue
|
||||
* must be set to QI_FREE to allow the reclaim code to proceed.
|
||||
* It is also possible that descriptors from one of the previous
|
||||
* submissions has to be reclaimed by a subsequent submission.
|
||||
*/
|
||||
for (i = 0; i <= count; i++)
|
||||
qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
|
||||
|
||||
reclaim_free_desc(qi);
|
||||
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
|
||||
|
@ -1847,9 +1847,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
|
||||
* entry for first-level or pass-through translation modes should
|
||||
* be programmed with a domain id different from those used for
|
||||
* second-level or nested translation. We reserve a domain id for
|
||||
* this purpose.
|
||||
* this purpose. This domain id is also used for identity domain
|
||||
* in legacy mode.
|
||||
*/
|
||||
if (sm_supported(iommu))
|
||||
set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
|
||||
|
||||
return 0;
|
||||
|
@ -256,7 +256,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
|
||||
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
|
||||
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
|
||||
char tc_string[8];
|
||||
int tc;
|
||||
unsigned int tc;
|
||||
|
||||
memset(tc_string, 0, sizeof(tc_string));
|
||||
memcpy(p, aq_ethtool_stat_names,
|
||||
@ -265,7 +265,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
|
||||
|
||||
for (tc = 0; tc < cfg->tcs; tc++) {
|
||||
if (cfg->is_qos)
|
||||
snprintf(tc_string, 8, "TC%d ", tc);
|
||||
snprintf(tc_string, 8, "TC%u ", tc);
|
||||
|
||||
for (i = 0; i < cfg->vecs; i++) {
|
||||
for (si = 0; si < rx_stat_cnt; si++) {
|
||||
|
@ -999,7 +999,7 @@ struct mvpp2 {
|
||||
unsigned int max_port_rxqs;
|
||||
|
||||
/* Workqueue to gather hardware statistics */
|
||||
char queue_name[30];
|
||||
char queue_name[31];
|
||||
struct workqueue_struct *stats_queue;
|
||||
|
||||
/* Debugfs root entry */
|
||||
|
@ -2631,8 +2631,8 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
||||
|
||||
snprintf(r_vec->name, sizeof(r_vec->name),
|
||||
"%s-rxtx-%d", nfp_net_name(nn), idx);
|
||||
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
|
||||
r_vec);
|
||||
err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
|
||||
r_vec->name, r_vec);
|
||||
if (err) {
|
||||
if (nn->dp.netdev)
|
||||
netif_napi_del(&r_vec->napi);
|
||||
@ -2642,7 +2642,6 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
||||
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
|
||||
return err;
|
||||
}
|
||||
disable_irq(r_vec->irq_vector);
|
||||
|
||||
irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
|
||||
|
||||
|
@ -2621,7 +2621,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
|
||||
if (push_reason !=
|
||||
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
|
||||
dev_kfree_skb_any(msdu);
|
||||
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
|
||||
ab->soc_stats.hal_reo_error[ring_id]++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1593,7 +1593,7 @@ struct host_cmd_ds_802_11_scan_rsp {
|
||||
|
||||
struct host_cmd_ds_802_11_scan_ext {
|
||||
u32 reserved;
|
||||
u8 tlv_buffer[1];
|
||||
u8 tlv_buffer[];
|
||||
} __packed;
|
||||
|
||||
struct mwifiex_ie_types_bss_mode {
|
||||
|
@ -2561,8 +2561,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
|
||||
ext_scan_resp = &resp->params.ext_scan;
|
||||
|
||||
tlv = (void *)ext_scan_resp->tlv_buffer;
|
||||
buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
|
||||
- 1);
|
||||
buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
|
||||
|
||||
while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
|
||||
type = le16_to_cpu(tlv->type);
|
||||
|
@ -12,6 +12,7 @@ if RTW88
|
||||
|
||||
config RTW88_CORE
|
||||
tristate
|
||||
select WANT_DEV_COREDUMP
|
||||
|
||||
config RTW88_PCI
|
||||
tristate
|
||||
|
@ -72,9 +72,6 @@ static int brcmstb_restart_handler(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
while (1)
|
||||
;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
@ -2408,6 +2408,7 @@ static int pxafb_remove(struct platform_device *dev)
|
||||
info = &fbi->fb;
|
||||
|
||||
pxafb_overlay_exit(fbi);
|
||||
cancel_work_sync(&fbi->task);
|
||||
unregister_framebuffer(info);
|
||||
|
||||
pxafb_disable_controller(fbi);
|
||||
|
@ -86,6 +86,7 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched/autogroup.h>
|
||||
@ -116,6 +117,40 @@
|
||||
static u8 nlink_tid __ro_after_init;
|
||||
static u8 nlink_tgid __ro_after_init;
|
||||
|
||||
enum proc_mem_force {
|
||||
PROC_MEM_FORCE_ALWAYS,
|
||||
PROC_MEM_FORCE_PTRACE,
|
||||
PROC_MEM_FORCE_NEVER
|
||||
};
|
||||
|
||||
static enum proc_mem_force proc_mem_force_override __ro_after_init =
|
||||
IS_ENABLED(CONFIG_PROC_MEM_NO_FORCE) ? PROC_MEM_FORCE_NEVER :
|
||||
IS_ENABLED(CONFIG_PROC_MEM_FORCE_PTRACE) ? PROC_MEM_FORCE_PTRACE :
|
||||
PROC_MEM_FORCE_ALWAYS;
|
||||
|
||||
static const struct constant_table proc_mem_force_table[] __initconst = {
|
||||
{ "always", PROC_MEM_FORCE_ALWAYS },
|
||||
{ "ptrace", PROC_MEM_FORCE_PTRACE },
|
||||
{ "never", PROC_MEM_FORCE_NEVER },
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init early_proc_mem_force_override(char *buf)
|
||||
{
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* lookup_constant() defaults to proc_mem_force_override to preseve
|
||||
* the initial Kconfig choice in case an invalid param gets passed.
|
||||
*/
|
||||
proc_mem_force_override = lookup_constant(proc_mem_force_table,
|
||||
buf, proc_mem_force_override);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("proc_mem.force_override", early_proc_mem_force_override);
|
||||
|
||||
struct pid_entry {
|
||||
const char *name;
|
||||
unsigned int len;
|
||||
@ -833,6 +868,28 @@ static int mem_open(struct inode *inode, struct file *file)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool proc_mem_foll_force(struct file *file, struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *task;
|
||||
bool ptrace_active = false;
|
||||
|
||||
switch (proc_mem_force_override) {
|
||||
case PROC_MEM_FORCE_NEVER:
|
||||
return false;
|
||||
case PROC_MEM_FORCE_PTRACE:
|
||||
task = get_proc_task(file_inode(file));
|
||||
if (task) {
|
||||
ptrace_active = READ_ONCE(task->ptrace) &&
|
||||
READ_ONCE(task->mm) == mm &&
|
||||
READ_ONCE(task->parent) == current;
|
||||
put_task_struct(task);
|
||||
}
|
||||
return ptrace_active;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t mem_rw(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos, int write)
|
||||
{
|
||||
@ -853,7 +910,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
|
||||
if (!mmget_not_zero(mm))
|
||||
goto free;
|
||||
|
||||
flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
|
||||
flags = write ? FOLL_WRITE : 0;
|
||||
if (proc_mem_foll_force(file, mm))
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
while (count > 0) {
|
||||
size_t this_len = min_t(size_t, count, PAGE_SIZE);
|
||||
|
@ -398,7 +398,7 @@ rcu_scale_writer(void *arg)
|
||||
udelay(writer_holdoff);
|
||||
wdp = &wdpp[i];
|
||||
*wdp = ktime_get_mono_fast_ns();
|
||||
if (gp_async) {
|
||||
if (gp_async && !WARN_ON_ONCE(!cur_ops->async)) {
|
||||
retry:
|
||||
if (!rhp)
|
||||
rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
|
||||
@ -454,7 +454,7 @@ rcu_scale_writer(void *arg)
|
||||
i++;
|
||||
rcu_scale_wait_shutdown();
|
||||
} while (!torture_must_stop());
|
||||
if (gp_async) {
|
||||
if (gp_async && cur_ops->async) {
|
||||
cur_ops->gp_barrier();
|
||||
}
|
||||
writer_n_durations[me] = i_max + 1;
|
||||
|
@ -1829,10 +1829,11 @@ struct sigqueue *sigqueue_alloc(void)
|
||||
|
||||
void sigqueue_free(struct sigqueue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = ¤t->sighand->siglock;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
|
||||
return;
|
||||
/*
|
||||
* We must hold ->siglock while testing q->list
|
||||
* to serialize with collect_signal() or with
|
||||
@ -1860,7 +1861,10 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
|
||||
unsigned long flags;
|
||||
int ret, result;
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(q->info.si_code != SI_TIMER))
|
||||
return 0;
|
||||
|
||||
ret = -1;
|
||||
rcu_read_lock();
|
||||
@ -1879,7 +1883,6 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
|
||||
* If an SI_TIMER entry is already queue just increment
|
||||
* the overrun count.
|
||||
*/
|
||||
BUG_ON(q->info.si_code != SI_TIMER);
|
||||
q->info.si_overrun++;
|
||||
result = TRACE_SIGNAL_ALREADY_PENDING;
|
||||
goto out;
|
||||
|
@ -566,10 +566,6 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (!in_dev) {
|
||||
inet_free_ifa(ifa);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
ipv4_devconf_setall(in_dev);
|
||||
neigh_parms_data_state_setall(in_dev->arp_parms);
|
||||
if (ifa->ifa_dev != in_dev) {
|
||||
@ -1150,6 +1146,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
|
||||
|
||||
if (!ifa) {
|
||||
ret = -ENOBUFS;
|
||||
if (!in_dev)
|
||||
break;
|
||||
ifa = inet_alloc_ifa();
|
||||
if (!ifa)
|
||||
break;
|
||||
|
@ -1334,7 +1334,7 @@ static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
|
||||
struct flowi4 fl4 = {
|
||||
.flowi4_mark = frn->fl_mark,
|
||||
.daddr = frn->fl_addr,
|
||||
.flowi4_tos = frn->fl_tos,
|
||||
.flowi4_tos = frn->fl_tos & IPTOS_RT_MASK,
|
||||
.flowi4_scope = frn->fl_scope,
|
||||
};
|
||||
struct fib_table *tb;
|
||||
|
@ -111,6 +111,9 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
||||
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (tw->tw_substate == TCP_FIN_WAIT2)
|
||||
reuse = 0;
|
||||
|
||||
if (reuse == 2) {
|
||||
/* Still does not detect *everything* that goes through
|
||||
* lo, since we require a loopback src or dst address
|
||||
|
@ -1577,7 +1577,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rcu_assign_pointer(q->admin_sched, new_admin);
|
||||
/* Not going to race against advance_sched(), but still */
|
||||
admin = rcu_replace_pointer(q->admin_sched, new_admin,
|
||||
lockdep_rtnl_is_held());
|
||||
if (admin)
|
||||
call_rcu(&admin->rcu, taprio_free_sched_cb);
|
||||
} else {
|
||||
|
@ -161,8 +161,12 @@ static int bearer_name_validate(const char *name,
|
||||
|
||||
/* return bearer name components, if necessary */
|
||||
if (name_parts) {
|
||||
strcpy(name_parts->media_name, media_name);
|
||||
strcpy(name_parts->if_name, if_name);
|
||||
if (strscpy(name_parts->media_name, media_name,
|
||||
TIPC_MAX_MEDIA_NAME) < 0)
|
||||
return 0;
|
||||
if (strscpy(name_parts->if_name, if_name,
|
||||
TIPC_MAX_IF_NAME) < 0)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -19,6 +19,38 @@ config SECURITY_DMESG_RESTRICT
|
||||
|
||||
If you are unsure how to answer this question, answer N.
|
||||
|
||||
choice
|
||||
prompt "Allow /proc/pid/mem access override"
|
||||
default PROC_MEM_ALWAYS_FORCE
|
||||
help
|
||||
Traditionally /proc/pid/mem allows users to override memory
|
||||
permissions for users like ptrace, assuming they have ptrace
|
||||
capability.
|
||||
|
||||
This allows people to limit that - either never override, or
|
||||
require actual active ptrace attachment.
|
||||
|
||||
Defaults to the traditional behavior (for now)
|
||||
|
||||
config PROC_MEM_ALWAYS_FORCE
|
||||
bool "Traditional /proc/pid/mem behavior"
|
||||
help
|
||||
This allows /proc/pid/mem accesses to override memory mapping
|
||||
permissions if you have ptrace access rights.
|
||||
|
||||
config PROC_MEM_FORCE_PTRACE
|
||||
bool "Require active ptrace() use for access override"
|
||||
help
|
||||
This allows /proc/pid/mem accesses to override memory mapping
|
||||
permissions for active ptracers like gdb.
|
||||
|
||||
config PROC_MEM_NO_FORCE
|
||||
bool "Never"
|
||||
help
|
||||
Never override memory mapping permissions
|
||||
|
||||
endchoice
|
||||
|
||||
config SECURITY
|
||||
bool "Enable different security models"
|
||||
depends on SYSFS
|
||||
|
@ -713,7 +713,7 @@ static u16 HPIMSGX__init(struct hpi_message *phm,
|
||||
phr->error = HPI_ERROR_PROCESSING_MESSAGE;
|
||||
return phr->error;
|
||||
}
|
||||
if (hr.error == 0) {
|
||||
if (hr.error == 0 && hr.u.s.adapter_index < HPI_MAX_ADAPTERS) {
|
||||
/* the adapter was created successfully
|
||||
save the mapping for future use */
|
||||
hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
|
||||
|
@ -1303,8 +1303,10 @@ static int snd_hdsp_midi_output_possible (struct hdsp *hdsp, int id)
|
||||
|
||||
static void snd_hdsp_flush_midi_input (struct hdsp *hdsp, int id)
|
||||
{
|
||||
while (snd_hdsp_midi_input_available (hdsp, id))
|
||||
snd_hdsp_midi_read_byte (hdsp, id);
|
||||
int count = 256;
|
||||
|
||||
while (snd_hdsp_midi_input_available(hdsp, id) && --count)
|
||||
snd_hdsp_midi_read_byte(hdsp, id);
|
||||
}
|
||||
|
||||
static int snd_hdsp_midi_output_write (struct hdsp_midi *hmidi)
|
||||
|
@ -1839,8 +1839,10 @@ static inline int snd_hdspm_midi_output_possible (struct hdspm *hdspm, int id)
|
||||
|
||||
static void snd_hdspm_flush_midi_input(struct hdspm *hdspm, int id)
|
||||
{
|
||||
while (snd_hdspm_midi_input_available (hdspm, id))
|
||||
snd_hdspm_midi_read_byte (hdspm, id);
|
||||
int count = 256;
|
||||
|
||||
while (snd_hdspm_midi_input_available(hdspm, id) && --count)
|
||||
snd_hdspm_midi_read_byte(hdspm, id);
|
||||
}
|
||||
|
||||
static int snd_hdspm_midi_output_write (struct hdspm_midi *hmidi)
|
||||
|
@ -556,6 +556,12 @@ static const struct usb_audio_device_name usb_audio_names[] = {
|
||||
/* Creative/Toshiba Multimedia Center SB-0500 */
|
||||
DEVICE_NAME(0x041e, 0x3048, "Toshiba", "SB-0500"),
|
||||
|
||||
/* Logitech Audio Devices */
|
||||
DEVICE_NAME(0x046d, 0x0867, "Logitech, Inc.", "Logi-MeetUp"),
|
||||
DEVICE_NAME(0x046d, 0x0874, "Logitech, Inc.", "Logi-Tap-Audio"),
|
||||
DEVICE_NAME(0x046d, 0x087c, "Logitech, Inc.", "Logi-Huddle"),
|
||||
DEVICE_NAME(0x046d, 0x0898, "Logitech, Inc.", "Logi-RB-Audio"),
|
||||
DEVICE_NAME(0x046d, 0x08d2, "Logitech, Inc.", "Logi-RBM-Audio"),
|
||||
DEVICE_NAME(0x046d, 0x0990, "Logitech, Inc.", "QuickCam Pro 9000"),
|
||||
|
||||
/* ASUS ROG Zenith II: this machine has also two devices, one for
|
||||
|
@ -35,6 +35,83 @@
|
||||
.bInterfaceClass = USB_CLASS_AUDIO, \
|
||||
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
|
||||
|
||||
/* Quirk .driver_info, followed by the definition of the quirk entry;
|
||||
* put like QUIRK_DRIVER_INFO { ... } in each entry of the quirk table
|
||||
*/
|
||||
#define QUIRK_DRIVER_INFO \
|
||||
.driver_info = (unsigned long)&(const struct snd_usb_audio_quirk)
|
||||
|
||||
/*
|
||||
* Macros for quirk data entries
|
||||
*/
|
||||
|
||||
/* Quirk data entry for ignoring the interface */
|
||||
#define QUIRK_DATA_IGNORE(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_IGNORE_INTERFACE
|
||||
/* Quirk data entry for a standard audio interface */
|
||||
#define QUIRK_DATA_STANDARD_AUDIO(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||||
/* Quirk data entry for a standard MIDI interface */
|
||||
#define QUIRK_DATA_STANDARD_MIDI(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_MIDI_STANDARD_INTERFACE
|
||||
/* Quirk data entry for a standard mixer interface */
|
||||
#define QUIRK_DATA_STANDARD_MIXER(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_AUDIO_STANDARD_MIXER
|
||||
|
||||
/* Quirk data entry for Yamaha MIDI */
|
||||
#define QUIRK_DATA_MIDI_YAMAHA(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_MIDI_YAMAHA
|
||||
/* Quirk data entry for Edirol UAxx */
|
||||
#define QUIRK_DATA_EDIROL_UAXX(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_AUDIO_EDIROL_UAXX
|
||||
/* Quirk data entry for raw bytes interface */
|
||||
#define QUIRK_DATA_RAW_BYTES(_ifno) \
|
||||
.ifnum = (_ifno), .type = QUIRK_MIDI_RAW_BYTES
|
||||
|
||||
/* Quirk composite array terminator */
|
||||
#define QUIRK_COMPOSITE_END { .ifnum = -1 }
|
||||
|
||||
/* Quirk data entry for composite quirks;
|
||||
* followed by the quirk array that is terminated with QUIRK_COMPOSITE_END
|
||||
* e.g. QUIRK_DATA_COMPOSITE { { quirk1 }, { quirk2 },..., QUIRK_COMPOSITE_END }
|
||||
*/
|
||||
#define QUIRK_DATA_COMPOSITE \
|
||||
.ifnum = QUIRK_ANY_INTERFACE, \
|
||||
.type = QUIRK_COMPOSITE, \
|
||||
.data = &(const struct snd_usb_audio_quirk[])
|
||||
|
||||
/* Quirk data entry for a fixed audio endpoint;
|
||||
* followed by audioformat definition
|
||||
* e.g. QUIRK_DATA_AUDIOFORMAT(n) { .formats = xxx, ... }
|
||||
*/
|
||||
#define QUIRK_DATA_AUDIOFORMAT(_ifno) \
|
||||
.ifnum = (_ifno), \
|
||||
.type = QUIRK_AUDIO_FIXED_ENDPOINT, \
|
||||
.data = &(const struct audioformat)
|
||||
|
||||
/* Quirk data entry for a fixed MIDI endpoint;
|
||||
* followed by snd_usb_midi_endpoint_info definition
|
||||
* e.g. QUIRK_DATA_MIDI_FIXED_ENDPOINT(n) { .out_cables = x, .in_cables = y }
|
||||
*/
|
||||
#define QUIRK_DATA_MIDI_FIXED_ENDPOINT(_ifno) \
|
||||
.ifnum = (_ifno), \
|
||||
.type = QUIRK_MIDI_FIXED_ENDPOINT, \
|
||||
.data = &(const struct snd_usb_midi_endpoint_info)
|
||||
/* Quirk data entry for a MIDIMAN MIDI endpoint */
|
||||
#define QUIRK_DATA_MIDI_MIDIMAN(_ifno) \
|
||||
.ifnum = (_ifno), \
|
||||
.type = QUIRK_MIDI_MIDIMAN, \
|
||||
.data = &(const struct snd_usb_midi_endpoint_info)
|
||||
/* Quirk data entry for a EMAGIC MIDI endpoint */
|
||||
#define QUIRK_DATA_MIDI_EMAGIC(_ifno) \
|
||||
.ifnum = (_ifno), \
|
||||
.type = QUIRK_MIDI_EMAGIC, \
|
||||
.data = &(const struct snd_usb_midi_endpoint_info)
|
||||
|
||||
/*
|
||||
* Here we go... the quirk table definition begins:
|
||||
*/
|
||||
|
||||
/* FTDI devices */
|
||||
{
|
||||
USB_DEVICE(0x0403, 0xb8d8),
|
||||
|
Loading…
Reference in New Issue
Block a user