Merge e0dd13b49d
("wifi: rtl8xxxu: RTL8192EU always needs full init") into android12-5.10-lts
Steps on the way to 5.10.180 Change-Id: Id1ae1d6b019603d17be21ebc68f399eb60bde38a Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
6f849f24da
@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm)
|
||||
/* save fn */
|
||||
copy %arg2, %r31
|
||||
|
||||
/* set up the new ap */
|
||||
ldo 64(%arg1), %r29
|
||||
|
||||
/* load up the arg registers from the saved arg area */
|
||||
/* 32-bit calling convention passes first 4 args in registers */
|
||||
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
|
||||
@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm)
|
||||
ldd 7*REG_SZ(%arg1), %r19
|
||||
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
|
||||
|
||||
/* set up real-mode stack and real-mode ap */
|
||||
tophys_r1 %sp
|
||||
ldo -16(%sp), %r29 /* Reference param save area */
|
||||
|
||||
b,l rfi_virt2real,%r2
|
||||
nop
|
||||
|
@ -7537,6 +7537,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
||||
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
|
||||
break;
|
||||
|
||||
case x86_intercept_pause:
|
||||
/*
|
||||
* PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
|
||||
* with vanilla NOPs in the emulator. Apply the interception
|
||||
* check only to actual PAUSE instructions. Don't check
|
||||
* PAUSE-loop-exiting, software can't expect a given PAUSE to
|
||||
* exit, i.e. KVM is within its rights to allow L2 to execute
|
||||
* the PAUSE.
|
||||
*/
|
||||
if ((info->rep_prefix != REPE_PREFIX) ||
|
||||
!nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
break;
|
||||
|
||||
/* TODO: check more intercepts... */
|
||||
default:
|
||||
break;
|
||||
|
@ -456,7 +456,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
|
||||
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
|
||||
return;
|
||||
|
||||
BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
|
||||
if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
|
||||
return;
|
||||
|
||||
if (alg->cra_destroy)
|
||||
alg->cra_destroy(alg);
|
||||
|
||||
|
@ -564,8 +564,10 @@ static void retry_timeout(struct timer_list *t)
|
||||
|
||||
if (waiting)
|
||||
start_get(ssif_info);
|
||||
if (resend)
|
||||
if (resend) {
|
||||
start_resend(ssif_info);
|
||||
ssif_inc_stat(ssif_info, send_retries);
|
||||
}
|
||||
}
|
||||
|
||||
static void watch_timeout(struct timer_list *t)
|
||||
@ -792,9 +794,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|
||||
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
|
||||
/*
|
||||
* Don't abort here, maybe it was a queued
|
||||
* response to a previous command.
|
||||
* Recv error response, give up.
|
||||
*/
|
||||
ssif_info->ssif_state = SSIF_IDLE;
|
||||
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||
dev_warn(&ssif_info->client->dev,
|
||||
"Invalid response getting flags: %x %x\n",
|
||||
|
@ -1634,19 +1634,23 @@ static int safexcel_probe_generic(void *pdev,
|
||||
&priv->ring[i].rdr);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize rings\n");
|
||||
return ret;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
priv->ring[i].rdr_req = devm_kcalloc(dev,
|
||||
EIP197_DEFAULT_RING_SIZE,
|
||||
sizeof(*priv->ring[i].rdr_req),
|
||||
GFP_KERNEL);
|
||||
if (!priv->ring[i].rdr_req)
|
||||
return -ENOMEM;
|
||||
if (!priv->ring[i].rdr_req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
|
||||
if (!ring_irq)
|
||||
return -ENOMEM;
|
||||
if (!ring_irq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
ring_irq->priv = priv;
|
||||
ring_irq->ring = i;
|
||||
@ -1660,7 +1664,8 @@ static int safexcel_probe_generic(void *pdev,
|
||||
ring_irq);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
|
||||
return irq;
|
||||
ret = irq;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
priv->ring[i].irq = irq;
|
||||
@ -1672,8 +1677,10 @@ static int safexcel_probe_generic(void *pdev,
|
||||
snprintf(wq_name, 9, "wq_ring%d", i);
|
||||
priv->ring[i].workqueue =
|
||||
create_singlethread_workqueue(wq_name);
|
||||
if (!priv->ring[i].workqueue)
|
||||
return -ENOMEM;
|
||||
if (!priv->ring[i].workqueue) {
|
||||
ret = -ENOMEM;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
priv->ring[i].requests = 0;
|
||||
priv->ring[i].busy = false;
|
||||
@ -1690,16 +1697,26 @@ static int safexcel_probe_generic(void *pdev,
|
||||
ret = safexcel_hw_init(priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "HW init failed (%d)\n", ret);
|
||||
return ret;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
ret = safexcel_register_algorithms(priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
|
||||
return ret;
|
||||
goto err_cleanup_rings;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup_rings:
|
||||
for (i = 0; i < priv->config.rings; i++) {
|
||||
if (priv->ring[i].irq)
|
||||
irq_set_affinity_hint(priv->ring[i].irq, NULL);
|
||||
if (priv->ring[i].workqueue)
|
||||
destroy_workqueue(priv->ring[i].workqueue);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
|
||||
|
@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
|
||||
u16 stat;
|
||||
|
||||
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
|
||||
mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
|
||||
mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
|
||||
|
||||
if (stat & mask)
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
|
@ -897,8 +897,8 @@ struct amd_ir_data {
|
||||
*/
|
||||
struct irq_cfg *cfg;
|
||||
int ga_vector;
|
||||
int ga_root_ptr;
|
||||
int ga_tag;
|
||||
u64 ga_root_ptr;
|
||||
u32 ga_tag;
|
||||
};
|
||||
|
||||
struct amd_irte_ops {
|
||||
|
@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
|
||||
struct zynqmp_ipi_message *msg;
|
||||
u64 arg0, arg3;
|
||||
struct arm_smccc_res res;
|
||||
int ret, i;
|
||||
int ret, i, status = IRQ_NONE;
|
||||
|
||||
(void)irq;
|
||||
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
|
||||
@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
|
||||
memcpy_fromio(msg->data, mchan->req_buf,
|
||||
msg->len);
|
||||
mbox_chan_received_data(chan, (void *)msg);
|
||||
return IRQ_HANDLED;
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
return IRQ_NONE;
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2900,10 +2900,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
sector_t chunk_mask = conf->geo.chunk_mask;
|
||||
int page_idx = 0;
|
||||
|
||||
if (!mempool_initialized(&conf->r10buf_pool))
|
||||
if (init_resync(conf))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Allow skipping a full rebuild for incremental assembly
|
||||
* of a clean array, like RAID1 does.
|
||||
@ -2919,6 +2915,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
return mddev->dev_sectors - sector_nr;
|
||||
}
|
||||
|
||||
if (!mempool_initialized(&conf->r10buf_pool))
|
||||
if (init_resync(conf))
|
||||
return 0;
|
||||
|
||||
skipped:
|
||||
max_sector = mddev->dev_sectors;
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
|
||||
|
@ -947,7 +947,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
|
||||
int offset, int len)
|
||||
{
|
||||
struct ubi_device *ubi = vol->ubi;
|
||||
int pnum, opnum, err, vol_id = vol->vol_id;
|
||||
int pnum, opnum, err, err2, vol_id = vol->vol_id;
|
||||
|
||||
pnum = ubi_wl_get_peb(ubi);
|
||||
if (pnum < 0) {
|
||||
@ -982,10 +982,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
|
||||
out_put:
|
||||
up_read(&ubi->fm_eba_sem);
|
||||
|
||||
if (err && pnum >= 0)
|
||||
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
|
||||
else if (!err && opnum >= 0)
|
||||
err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
|
||||
if (err && pnum >= 0) {
|
||||
err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
|
||||
if (err2) {
|
||||
ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
|
||||
pnum, err2);
|
||||
}
|
||||
} else if (!err && opnum >= 0) {
|
||||
err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
|
||||
if (err2) {
|
||||
ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
|
||||
opnum, err2);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1702,6 +1702,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
|
||||
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
|
||||
.has_s0s1 = 0,
|
||||
.gen2_thermal_meter = 1,
|
||||
.needs_full_init = 1,
|
||||
.adda_1t_init = 0x0fc01616,
|
||||
.adda_1t_path_on = 0x0fc01616,
|
||||
.adda_2t_path_on_a = 0x0fc01616,
|
||||
|
@ -424,7 +424,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
|
||||
};
|
||||
|
||||
static const char * const pwm_axg_ao_parent_names[] = {
|
||||
"aoclk81", "xtal", "fclk_div4", "fclk_div5"
|
||||
"xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
|
||||
};
|
||||
|
||||
static const struct meson_pwm_data pwm_axg_ao_data = {
|
||||
@ -433,7 +433,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
|
||||
};
|
||||
|
||||
static const char * const pwm_g12a_ao_ab_parent_names[] = {
|
||||
"xtal", "aoclk81", "fclk_div4", "fclk_div5"
|
||||
"xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
|
||||
};
|
||||
|
||||
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
|
||||
@ -442,7 +442,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
|
||||
};
|
||||
|
||||
static const char * const pwm_g12a_ao_cd_parent_names[] = {
|
||||
"xtal", "aoclk81",
|
||||
"xtal", "g12a_ao_clk81",
|
||||
};
|
||||
|
||||
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
|
||||
|
@ -884,6 +884,16 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If wb_tryget fails, the wb has been shutdown, skip it.
|
||||
*
|
||||
* Pin @wb so that it stays on @bdi->wb_list. This allows
|
||||
* continuing iteration from @wb after dropping and
|
||||
* regrabbing rcu read lock.
|
||||
*/
|
||||
if (!wb_tryget(wb))
|
||||
continue;
|
||||
|
||||
/* alloc failed, execute synchronously using on-stack fallback */
|
||||
work = &fallback_work;
|
||||
*work = *base_work;
|
||||
@ -892,13 +902,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
work->done = &fallback_work_done;
|
||||
|
||||
wb_queue_work(wb, work);
|
||||
|
||||
/*
|
||||
* Pin @wb so that it stays on @bdi->wb_list. This allows
|
||||
* continuing iteration from @wb after dropping and
|
||||
* regrabbing rcu read lock.
|
||||
*/
|
||||
wb_get(wb);
|
||||
last_wb = wb;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
|
||||
|
||||
down_read(&bmap->b_sem);
|
||||
ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
|
||||
if (ret < 0) {
|
||||
ret = nilfs_bmap_convert_error(bmap, __func__, ret);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (NILFS_BMAP_USE_VBN(bmap)) {
|
||||
ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
|
||||
&blocknr);
|
||||
if (!ret)
|
||||
*ptrp = blocknr;
|
||||
else if (ret == -ENOENT) {
|
||||
/*
|
||||
* If there was no valid entry in DAT for the block
|
||||
* address obtained by b_ops->bop_lookup, then pass
|
||||
* internal code -EINVAL to nilfs_bmap_convert_error
|
||||
* to treat it as metadata corruption.
|
||||
*/
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
up_read(&bmap->b_sem);
|
||||
return ret;
|
||||
return nilfs_bmap_convert_error(bmap, __func__, ret);
|
||||
}
|
||||
|
||||
int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
|
||||
|
@ -2044,6 +2044,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
||||
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
|
||||
int err;
|
||||
|
||||
if (sb_rdonly(sci->sc_super))
|
||||
return -EROFS;
|
||||
|
||||
nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
|
||||
sci->sc_cno = nilfs->ns_cno;
|
||||
|
||||
@ -2729,7 +2732,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
|
||||
|
||||
flush_work(&sci->sc_iput_work);
|
||||
|
||||
} while (ret && retrycount-- > 0);
|
||||
} while (ret && ret != -EROFS && retrycount-- > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,11 +82,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
|
||||
struct inode *inode,
|
||||
struct reiserfs_security_handle *sec)
|
||||
{
|
||||
char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
|
||||
int error;
|
||||
if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
|
||||
|
||||
if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
|
||||
strlcat(xattr_name, sec->name, sizeof(xattr_name));
|
||||
|
||||
error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
|
||||
sec->length, XATTR_CREATE);
|
||||
if (error == -ENODATA || error == -EOPNOTSUPP)
|
||||
error = 0;
|
||||
|
@ -426,6 +426,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
|
||||
mutex_unlock(&dir_ui->ui_mutex);
|
||||
|
||||
ubifs_release_budget(c, &req);
|
||||
fscrypt_free_filename(&nm);
|
||||
|
||||
return 0;
|
||||
|
||||
|
144
fs/ubifs/tnc.c
144
fs/ubifs/tnc.c
@ -44,6 +44,33 @@ enum {
|
||||
NOT_ON_MEDIA = 3,
|
||||
};
|
||||
|
||||
static void do_insert_old_idx(struct ubifs_info *c,
|
||||
struct ubifs_old_idx *old_idx)
|
||||
{
|
||||
struct ubifs_old_idx *o;
|
||||
struct rb_node **p, *parent = NULL;
|
||||
|
||||
p = &c->old_idx.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
o = rb_entry(parent, struct ubifs_old_idx, rb);
|
||||
if (old_idx->lnum < o->lnum)
|
||||
p = &(*p)->rb_left;
|
||||
else if (old_idx->lnum > o->lnum)
|
||||
p = &(*p)->rb_right;
|
||||
else if (old_idx->offs < o->offs)
|
||||
p = &(*p)->rb_left;
|
||||
else if (old_idx->offs > o->offs)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ubifs_err(c, "old idx added twice!");
|
||||
kfree(old_idx);
|
||||
}
|
||||
}
|
||||
rb_link_node(&old_idx->rb, parent, p);
|
||||
rb_insert_color(&old_idx->rb, &c->old_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* insert_old_idx - record an index node obsoleted since the last commit start.
|
||||
* @c: UBIFS file-system description object
|
||||
@ -69,35 +96,15 @@ enum {
|
||||
*/
|
||||
static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
|
||||
{
|
||||
struct ubifs_old_idx *old_idx, *o;
|
||||
struct rb_node **p, *parent = NULL;
|
||||
struct ubifs_old_idx *old_idx;
|
||||
|
||||
old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
|
||||
if (unlikely(!old_idx))
|
||||
return -ENOMEM;
|
||||
old_idx->lnum = lnum;
|
||||
old_idx->offs = offs;
|
||||
do_insert_old_idx(c, old_idx);
|
||||
|
||||
p = &c->old_idx.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
o = rb_entry(parent, struct ubifs_old_idx, rb);
|
||||
if (lnum < o->lnum)
|
||||
p = &(*p)->rb_left;
|
||||
else if (lnum > o->lnum)
|
||||
p = &(*p)->rb_right;
|
||||
else if (offs < o->offs)
|
||||
p = &(*p)->rb_left;
|
||||
else if (offs > o->offs)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ubifs_err(c, "old idx added twice!");
|
||||
kfree(old_idx);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
rb_link_node(&old_idx->rb, parent, p);
|
||||
rb_insert_color(&old_idx->rb, &c->old_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
|
||||
__set_bit(DIRTY_ZNODE, &zn->flags);
|
||||
__clear_bit(COW_ZNODE, &zn->flags);
|
||||
|
||||
ubifs_assert(c, !ubifs_zn_obsolete(znode));
|
||||
__set_bit(OBSOLETE_ZNODE, &znode->flags);
|
||||
|
||||
if (znode->level != 0) {
|
||||
int i;
|
||||
const int n = zn->child_cnt;
|
||||
|
||||
/* The children now have new parent */
|
||||
for (i = 0; i < n; i++) {
|
||||
struct ubifs_zbranch *zbr = &zn->zbranch[i];
|
||||
|
||||
if (zbr->znode)
|
||||
zbr->znode->parent = zn;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_long_inc(&c->dirty_zn_cnt);
|
||||
return zn;
|
||||
}
|
||||
|
||||
@ -233,6 +223,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
|
||||
return ubifs_add_dirt(c, lnum, dirt);
|
||||
}
|
||||
|
||||
/**
|
||||
* replace_znode - replace old znode with new znode.
|
||||
* @c: UBIFS file-system description object
|
||||
* @new_zn: new znode
|
||||
* @old_zn: old znode
|
||||
* @zbr: the branch of parent znode
|
||||
*
|
||||
* Replace old znode with new znode in TNC.
|
||||
*/
|
||||
static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
|
||||
struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
|
||||
{
|
||||
ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
|
||||
__set_bit(OBSOLETE_ZNODE, &old_zn->flags);
|
||||
|
||||
if (old_zn->level != 0) {
|
||||
int i;
|
||||
const int n = new_zn->child_cnt;
|
||||
|
||||
/* The children now have new parent */
|
||||
for (i = 0; i < n; i++) {
|
||||
struct ubifs_zbranch *child = &new_zn->zbranch[i];
|
||||
|
||||
if (child->znode)
|
||||
child->znode->parent = new_zn;
|
||||
}
|
||||
}
|
||||
|
||||
zbr->znode = new_zn;
|
||||
zbr->lnum = 0;
|
||||
zbr->offs = 0;
|
||||
zbr->len = 0;
|
||||
|
||||
atomic_long_inc(&c->dirty_zn_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* dirty_cow_znode - ensure a znode is not being committed.
|
||||
* @c: UBIFS file-system description object
|
||||
@ -265,28 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
|
||||
return zn;
|
||||
|
||||
if (zbr->len) {
|
||||
err = insert_old_idx(c, zbr->lnum, zbr->offs);
|
||||
if (unlikely(err))
|
||||
/*
|
||||
* Obsolete znodes will be freed by tnc_destroy_cnext()
|
||||
* or free_obsolete_znodes(), copied up znodes should
|
||||
* be added back to tnc and freed by
|
||||
* ubifs_destroy_tnc_subtree().
|
||||
*/
|
||||
struct ubifs_old_idx *old_idx;
|
||||
|
||||
old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
|
||||
if (unlikely(!old_idx)) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
old_idx->lnum = zbr->lnum;
|
||||
old_idx->offs = zbr->offs;
|
||||
|
||||
err = add_idx_dirt(c, zbr->lnum, zbr->len);
|
||||
} else
|
||||
err = 0;
|
||||
if (err) {
|
||||
kfree(old_idx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do_insert_old_idx(c, old_idx);
|
||||
}
|
||||
|
||||
replace_znode(c, zn, znode, zbr);
|
||||
|
||||
return zn;
|
||||
|
||||
out:
|
||||
zbr->znode = zn;
|
||||
zbr->lnum = 0;
|
||||
zbr->offs = 0;
|
||||
zbr->len = 0;
|
||||
|
||||
if (unlikely(err))
|
||||
return ERR_PTR(err);
|
||||
return zn;
|
||||
kfree(zn);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -9,7 +9,7 @@
|
||||
* @data: message payload
|
||||
*
|
||||
* This is the structure for data used in mbox_send_message
|
||||
* the maximum length of data buffer is fixed to 12 bytes.
|
||||
* the maximum length of data buffer is fixed to 32 bytes.
|
||||
* Client is supposed to be aware of this.
|
||||
*/
|
||||
struct zynqmp_ipi_message {
|
||||
|
@ -26,15 +26,15 @@ asm (
|
||||
" .popsection \n"
|
||||
);
|
||||
|
||||
extern char kernel_headers_data;
|
||||
extern char kernel_headers_data_end;
|
||||
extern char kernel_headers_data[];
|
||||
extern char kernel_headers_data_end[];
|
||||
|
||||
static ssize_t
|
||||
ikheaders_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t len)
|
||||
{
|
||||
memcpy(buf, &kernel_headers_data + off, len);
|
||||
memcpy(buf, &kernel_headers_data[off], len);
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {
|
||||
|
||||
static int __init ikheaders_init(void)
|
||||
{
|
||||
kheaders_attr.size = (&kernel_headers_data_end -
|
||||
&kernel_headers_data);
|
||||
kheaders_attr.size = (kernel_headers_data_end -
|
||||
kernel_headers_data);
|
||||
return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
|
||||
}
|
||||
|
||||
|
@ -964,6 +964,7 @@ void __rcu_irq_enter_check_tick(void)
|
||||
}
|
||||
raw_spin_unlock_rcu_node(rdp->mynode);
|
||||
}
|
||||
NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
|
||||
#endif /* CONFIG_NO_HZ_FULL */
|
||||
|
||||
/**
|
||||
|
@ -1077,7 +1077,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
|
||||
size_t subbuf_size = buf->chan->subbuf_size;
|
||||
size_t n_subbufs = buf->chan->n_subbufs;
|
||||
size_t consumed = buf->subbufs_consumed % n_subbufs;
|
||||
size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
|
||||
size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
|
||||
% (n_subbufs * subbuf_size);
|
||||
|
||||
read_subbuf = read_pos / subbuf_size;
|
||||
padding = buf->padding[read_subbuf];
|
||||
|
@ -1644,6 +1644,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
struct list_head *head = cpu_buffer->pages;
|
||||
struct buffer_page *bpage, *tmp;
|
||||
|
||||
irq_work_sync(&cpu_buffer->irq_work.work);
|
||||
|
||||
free_buffer_page(cpu_buffer->reader_page);
|
||||
|
||||
if (head) {
|
||||
@ -1750,6 +1752,8 @@ ring_buffer_free(struct trace_buffer *buffer)
|
||||
|
||||
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
|
||||
|
||||
irq_work_sync(&buffer->irq_work.work);
|
||||
|
||||
for_each_buffer_cpu(buffer, cpu)
|
||||
rb_free_cpu_buffer(buffer->buffers[cpu]);
|
||||
|
||||
|
@ -378,6 +378,15 @@ static void wb_exit(struct bdi_writeback *wb)
|
||||
static DEFINE_SPINLOCK(cgwb_lock);
|
||||
static struct workqueue_struct *cgwb_release_wq;
|
||||
|
||||
static void cgwb_free_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
struct bdi_writeback *wb = container_of(rcu_head,
|
||||
struct bdi_writeback, rcu);
|
||||
|
||||
percpu_ref_exit(&wb->refcnt);
|
||||
kfree(wb);
|
||||
}
|
||||
|
||||
static void cgwb_release_workfn(struct work_struct *work)
|
||||
{
|
||||
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
|
||||
@ -397,7 +406,7 @@ static void cgwb_release_workfn(struct work_struct *work)
|
||||
fprop_local_destroy_percpu(&wb->memcg_completions);
|
||||
percpu_ref_exit(&wb->refcnt);
|
||||
wb_exit(wb);
|
||||
kfree_rcu(wb, rcu);
|
||||
call_rcu(&wb->rcu, cgwb_free_rcu);
|
||||
}
|
||||
|
||||
static void cgwb_release(struct percpu_ref *refcnt)
|
||||
|
@ -88,11 +88,7 @@ static inline int ioctl_return(int __user *addr, int value)
|
||||
*/
|
||||
|
||||
extern int dmasound_init(void);
|
||||
#ifdef MODULE
|
||||
extern void dmasound_deinit(void);
|
||||
#else
|
||||
#define dmasound_deinit() do { } while (0)
|
||||
#endif
|
||||
|
||||
/* description of the set-up applies to either hard or soft settings */
|
||||
|
||||
@ -114,9 +110,7 @@ typedef struct {
|
||||
void *(*dma_alloc)(unsigned int, gfp_t);
|
||||
void (*dma_free)(void *, unsigned int);
|
||||
int (*irqinit)(void);
|
||||
#ifdef MODULE
|
||||
void (*irqcleanup)(void);
|
||||
#endif
|
||||
void (*init)(void);
|
||||
void (*silence)(void);
|
||||
int (*setFormat)(int);
|
||||
|
@ -206,12 +206,10 @@ module_param(writeBufSize, int, 0);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#ifdef MODULE
|
||||
static int sq_unit = -1;
|
||||
static int mixer_unit = -1;
|
||||
static int state_unit = -1;
|
||||
static int irq_installed;
|
||||
#endif /* MODULE */
|
||||
|
||||
/* control over who can modify resources shared between play/record */
|
||||
static fmode_t shared_resource_owner;
|
||||
@ -391,9 +389,6 @@ static const struct file_operations mixer_fops =
|
||||
|
||||
static void mixer_init(void)
|
||||
{
|
||||
#ifndef MODULE
|
||||
int mixer_unit;
|
||||
#endif
|
||||
mixer_unit = register_sound_mixer(&mixer_fops, -1);
|
||||
if (mixer_unit < 0)
|
||||
return;
|
||||
@ -1176,9 +1171,6 @@ static const struct file_operations sq_fops =
|
||||
static int sq_init(void)
|
||||
{
|
||||
const struct file_operations *fops = &sq_fops;
|
||||
#ifndef MODULE
|
||||
int sq_unit;
|
||||
#endif
|
||||
|
||||
sq_unit = register_sound_dsp(fops, -1);
|
||||
if (sq_unit < 0) {
|
||||
@ -1380,9 +1372,6 @@ static const struct file_operations state_fops = {
|
||||
|
||||
static int state_init(void)
|
||||
{
|
||||
#ifndef MODULE
|
||||
int state_unit;
|
||||
#endif
|
||||
state_unit = register_sound_special(&state_fops, SND_DEV_STATUS);
|
||||
if (state_unit < 0)
|
||||
return state_unit ;
|
||||
@ -1400,10 +1389,9 @@ static int state_init(void)
|
||||
int dmasound_init(void)
|
||||
{
|
||||
int res ;
|
||||
#ifdef MODULE
|
||||
|
||||
if (irq_installed)
|
||||
return -EBUSY;
|
||||
#endif
|
||||
|
||||
/* Set up sound queue, /dev/audio and /dev/dsp. */
|
||||
|
||||
@ -1422,9 +1410,7 @@ int dmasound_init(void)
|
||||
printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
#ifdef MODULE
|
||||
irq_installed = 1;
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "%s DMA sound driver rev %03d installed\n",
|
||||
dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) +
|
||||
@ -1438,8 +1424,6 @@ int dmasound_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef MODULE
|
||||
|
||||
void dmasound_deinit(void)
|
||||
{
|
||||
if (irq_installed) {
|
||||
@ -1458,8 +1442,6 @@ void dmasound_deinit(void)
|
||||
unregister_sound_dsp(sq_unit);
|
||||
}
|
||||
|
||||
#else /* !MODULE */
|
||||
|
||||
static int dmasound_setup(char *str)
|
||||
{
|
||||
int ints[6], size;
|
||||
@ -1503,8 +1485,6 @@ static int dmasound_setup(char *str)
|
||||
|
||||
__setup("dmasound=", dmasound_setup);
|
||||
|
||||
#endif /* !MODULE */
|
||||
|
||||
/*
|
||||
* Conversion tables
|
||||
*/
|
||||
@ -1591,9 +1571,7 @@ char dmasound_alaw2dma8[] = {
|
||||
|
||||
EXPORT_SYMBOL(dmasound);
|
||||
EXPORT_SYMBOL(dmasound_init);
|
||||
#ifdef MODULE
|
||||
EXPORT_SYMBOL(dmasound_deinit);
|
||||
#endif
|
||||
EXPORT_SYMBOL(dmasound_write_sq);
|
||||
EXPORT_SYMBOL(dmasound_catchRadius);
|
||||
#ifdef HAS_8BIT_TABLES
|
||||
|
Loading…
Reference in New Issue
Block a user