Merge 5.10.31 into android12-5.10
Changes in 5.10.31 interconnect: core: fix error return code of icc_link_destroy() gfs2: Flag a withdraw if init_threads() fails KVM: arm64: Hide system instruction access to Trace registers KVM: arm64: Disable guest access to trace filter controls drm/imx: imx-ldb: fix out of bounds array access warning gfs2: report "already frozen/thawed" errors ftrace: Check if pages were allocated before calling free_pages() tools/kvm_stat: Add restart delay drm/tegra: dc: Don't set PLL clock to 0Hz gpu: host1x: Use different lock classes for each client XArray: Fix splitting to non-zero orders block: only update parent bi_status when bio fail radix tree test suite: Register the main thread with the RCU library idr test suite: Take RCU read lock in idr_find_test_1 idr test suite: Create anchor before launching throbber null_blk: fix command timeout completion handling io_uring: don't mark S_ISBLK async work as unbounded riscv,entry: fix misaligned base for excp_vect_table block: don't ignore REQ_NOWAIT for direct IO netfilter: x_tables: fix compat match/target pad out-of-bound write perf map: Tighten snprintf() string precision to pass gcc check on some 32-bit arches net: sfp: relax bitrate-derived mode check net: sfp: cope with SFPs that set both LOS normal and LOS inverted xen/events: fix setting irq affinity Linux 5.10.31 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I19a7cfbdaab23e578dd82c552aea86d367c2f40f
This commit is contained in:
commit
ab8b108b0a
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 30
|
||||
SUBLEVEL = 31
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -447,6 +447,7 @@ ENDPROC(__switch_to)
|
||||
#endif
|
||||
|
||||
.section ".rodata"
|
||||
.align LGREG
|
||||
/* Exception vector table */
|
||||
ENTRY(excp_vect_table)
|
||||
RISCV_PTR do_trap_insn_misaligned
|
||||
|
@ -313,7 +313,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
|
||||
{
|
||||
struct bio *parent = bio->bi_private;
|
||||
|
||||
if (!parent->bi_status)
|
||||
if (bio->bi_status && !parent->bi_status)
|
||||
parent->bi_status = bio->bi_status;
|
||||
bio_put(bio);
|
||||
return parent;
|
||||
|
@ -20,6 +20,7 @@ struct nullb_cmd {
|
||||
blk_status_t error;
|
||||
struct nullb_queue *nq;
|
||||
struct hrtimer timer;
|
||||
bool fake_timeout;
|
||||
};
|
||||
|
||||
struct nullb_queue {
|
||||
|
@ -1367,10 +1367,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
||||
}
|
||||
|
||||
if (dev->zoned)
|
||||
cmd->error = null_process_zoned_cmd(cmd, op,
|
||||
sector, nr_sectors);
|
||||
sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
|
||||
else
|
||||
cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
|
||||
sts = null_process_cmd(cmd, op, sector, nr_sectors);
|
||||
|
||||
/* Do not overwrite errors (e.g. timeout errors) */
|
||||
if (cmd->error == BLK_STS_OK)
|
||||
cmd->error = sts;
|
||||
|
||||
out:
|
||||
nullb_complete_cmd(cmd);
|
||||
@ -1449,8 +1452,20 @@ static bool should_requeue_request(struct request *rq)
|
||||
|
||||
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
|
||||
{
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
pr_info("rq %p timed out\n", rq);
|
||||
blk_mq_complete_request(rq);
|
||||
|
||||
/*
|
||||
* If the device is marked as blocking (i.e. memory backed or zoned
|
||||
* device), the submission path may be blocked waiting for resources
|
||||
* and cause real timeouts. For these real timeouts, the submission
|
||||
* path will complete the request using blk_mq_complete_request().
|
||||
* Only fake timeouts need to execute blk_mq_complete_request() here.
|
||||
*/
|
||||
cmd->error = BLK_STS_TIMEOUT;
|
||||
if (cmd->fake_timeout)
|
||||
blk_mq_complete_request(rq);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
@ -1471,6 +1486,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
cmd->rq = bd->rq;
|
||||
cmd->error = BLK_STS_OK;
|
||||
cmd->nq = nq;
|
||||
cmd->fake_timeout = should_timeout_request(bd->rq);
|
||||
|
||||
blk_mq_start_request(bd->rq);
|
||||
|
||||
@ -1487,7 +1503,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
}
|
||||
if (should_timeout_request(bd->rq))
|
||||
if (cmd->fake_timeout)
|
||||
return BLK_STS_OK;
|
||||
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
||||
|
@ -190,6 +190,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
|
||||
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
|
||||
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
|
||||
|
||||
if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
|
||||
dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_panel_prepare(imx_ldb_ch->panel);
|
||||
|
||||
if (dual) {
|
||||
@ -248,6 +253,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
|
||||
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
|
||||
u32 bus_format = imx_ldb_ch->bus_format;
|
||||
|
||||
if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
|
||||
dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode->clock > 170000) {
|
||||
dev_warn(ldb->dev,
|
||||
"%s: mode exceeds 170 MHz pixel clock\n", __func__);
|
||||
|
@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
|
||||
dev_err(dc->dev,
|
||||
"failed to set clock rate to %lu Hz\n",
|
||||
state->pclk);
|
||||
|
||||
err = clk_set_rate(dc->clk, state->pclk);
|
||||
if (err < 0)
|
||||
dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
|
||||
dc->clk, state->pclk, err);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
|
||||
@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
|
||||
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
|
||||
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
|
||||
}
|
||||
|
||||
err = clk_set_rate(dc->clk, state->pclk);
|
||||
if (err < 0)
|
||||
dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
|
||||
dc->clk, state->pclk, err);
|
||||
}
|
||||
|
||||
static void tegra_dc_stop(struct tegra_dc *dc)
|
||||
|
@ -704,8 +704,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
|
||||
EXPORT_SYMBOL(host1x_driver_unregister);
|
||||
|
||||
/**
|
||||
* host1x_client_register() - register a host1x client
|
||||
* __host1x_client_register() - register a host1x client
|
||||
* @client: host1x client
|
||||
* @key: lock class key for the client-specific mutex
|
||||
*
|
||||
* Registers a host1x client with each host1x controller instance. Note that
|
||||
* each client will only match their parent host1x controller and will only be
|
||||
@ -714,13 +715,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
|
||||
* device and call host1x_device_init(), which will in turn call each client's
|
||||
* &host1x_client_ops.init implementation.
|
||||
*/
|
||||
int host1x_client_register(struct host1x_client *client)
|
||||
int __host1x_client_register(struct host1x_client *client,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
struct host1x *host1x;
|
||||
int err;
|
||||
|
||||
INIT_LIST_HEAD(&client->list);
|
||||
mutex_init(&client->lock);
|
||||
__mutex_init(&client->lock, "host1x client lock", key);
|
||||
client->usecount = 0;
|
||||
|
||||
mutex_lock(&devices_lock);
|
||||
@ -741,7 +743,7 @@ int host1x_client_register(struct host1x_client *client)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(host1x_client_register);
|
||||
EXPORT_SYMBOL(__host1x_client_register);
|
||||
|
||||
/**
|
||||
* host1x_client_unregister() - unregister a host1x client
|
||||
|
@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
|
||||
GFP_KERNEL);
|
||||
if (new)
|
||||
src->links = new;
|
||||
else
|
||||
ret = -ENOMEM;
|
||||
|
||||
out:
|
||||
mutex_unlock(&icc_lock);
|
||||
|
@ -349,14 +349,13 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
|
||||
}
|
||||
|
||||
/* If we haven't discovered any modes that this module supports, try
|
||||
* the encoding and bitrate to determine supported modes. Some BiDi
|
||||
* modules (eg, 1310nm/1550nm) are not 1000BASE-BX compliant due to
|
||||
* the differing wavelengths, so do not set any transceiver bits.
|
||||
* the bitrate to determine supported modes. Some BiDi modules (eg,
|
||||
* 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing
|
||||
* wavelengths, so do not set any transceiver bits.
|
||||
*/
|
||||
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
|
||||
/* If the encoding and bit rate allows 1000baseX */
|
||||
if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom &&
|
||||
br_min <= 1300 && br_max >= 1200)
|
||||
/* If the bit rate allows 1000baseX */
|
||||
if (br_nom && br_min <= 1300 && br_max >= 1200)
|
||||
phylink_set(modes, 1000baseX_Full);
|
||||
}
|
||||
|
||||
|
@ -1501,15 +1501,19 @@ static void sfp_sm_link_down(struct sfp *sfp)
|
||||
|
||||
static void sfp_sm_link_check_los(struct sfp *sfp)
|
||||
{
|
||||
unsigned int los = sfp->state & SFP_F_LOS;
|
||||
const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
|
||||
const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
|
||||
__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
|
||||
bool los = false;
|
||||
|
||||
/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
|
||||
* are set, we assume that no LOS signal is available.
|
||||
* are set, we assume that no LOS signal is available. If both are
|
||||
* set, we assume LOS is not implemented (and is meaningless.)
|
||||
*/
|
||||
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
|
||||
los ^= SFP_F_LOS;
|
||||
else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
|
||||
los = 0;
|
||||
if (los_options == los_inverted)
|
||||
los = !(sfp->state & SFP_F_LOS);
|
||||
else if (los_options == los_normal)
|
||||
los = !!(sfp->state & SFP_F_LOS);
|
||||
|
||||
if (los)
|
||||
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
|
||||
@ -1519,18 +1523,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
|
||||
|
||||
static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
|
||||
{
|
||||
return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
|
||||
event == SFP_E_LOS_LOW) ||
|
||||
(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
|
||||
event == SFP_E_LOS_HIGH);
|
||||
const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
|
||||
const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
|
||||
__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
|
||||
|
||||
return (los_options == los_inverted && event == SFP_E_LOS_LOW) ||
|
||||
(los_options == los_normal && event == SFP_E_LOS_HIGH);
|
||||
}
|
||||
|
||||
static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
|
||||
{
|
||||
return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
|
||||
event == SFP_E_LOS_HIGH) ||
|
||||
(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
|
||||
event == SFP_E_LOS_LOW);
|
||||
const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
|
||||
const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
|
||||
__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
|
||||
|
||||
return (los_options == los_inverted && event == SFP_E_LOS_HIGH) ||
|
||||
(los_options == los_normal && event == SFP_E_LOS_LOW);
|
||||
}
|
||||
|
||||
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
|
||||
|
@ -1809,7 +1809,7 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
|
||||
|
||||
if (VALID_EVTCHN(evtchn)) {
|
||||
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
|
||||
event_handler_exit(info);
|
||||
ack_dynirq(data);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1820,7 +1820,7 @@ static void lateeoi_mask_ack_dynirq(struct irq_data *data)
|
||||
|
||||
if (VALID_EVTCHN(evtchn)) {
|
||||
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
event_handler_exit(info);
|
||||
ack_dynirq(data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -280,6 +280,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
|
||||
bio.bi_opf = dio_bio_write_op(iocb);
|
||||
task_io_account_write(ret);
|
||||
}
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
bio.bi_opf |= REQ_NOWAIT;
|
||||
if (iocb->ki_flags & IOCB_HIPRI)
|
||||
bio_set_polled(&bio, iocb);
|
||||
|
||||
@ -433,6 +435,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
bio->bi_opf = dio_bio_write_op(iocb);
|
||||
task_io_account_write(bio->bi_iter.bi_size);
|
||||
}
|
||||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
bio->bi_opf |= REQ_NOWAIT;
|
||||
|
||||
dio->size += bio->bi_iter.bi_size;
|
||||
pos += bio->bi_iter.bi_size;
|
||||
|
@ -169,8 +169,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
||||
int error;
|
||||
|
||||
error = init_threads(sdp);
|
||||
if (error)
|
||||
if (error) {
|
||||
gfs2_withdraw_delayed(sdp);
|
||||
return error;
|
||||
}
|
||||
|
||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
@ -767,11 +769,13 @@ void gfs2_freeze_func(struct work_struct *work)
|
||||
static int gfs2_freeze(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error = 0;
|
||||
int error;
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
|
||||
error = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
@ -812,10 +816,10 @@ static int gfs2_unfreeze(struct super_block *sb)
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
|
||||
!gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
@ -1439,7 +1439,7 @@ static void io_prep_async_work(struct io_kiocb *req)
|
||||
if (req->flags & REQ_F_ISREG) {
|
||||
if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
|
||||
io_wq_hash_work(&req->work, file_inode(req->file));
|
||||
} else {
|
||||
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
|
||||
if (def->unbound_nonreg_file)
|
||||
req->work.flags |= IO_WQ_WORK_UNBOUND;
|
||||
}
|
||||
|
@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
|
||||
int host1x_device_init(struct host1x_device *device);
|
||||
int host1x_device_exit(struct host1x_device *device);
|
||||
|
||||
int host1x_client_register(struct host1x_client *client);
|
||||
int __host1x_client_register(struct host1x_client *client,
|
||||
struct lock_class_key *key);
|
||||
#define host1x_client_register(class) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
__host1x_client_register(class, &__key); \
|
||||
})
|
||||
|
||||
int host1x_client_unregister(struct host1x_client *client);
|
||||
|
||||
int host1x_client_suspend(struct host1x_client *client);
|
||||
|
@ -3230,7 +3230,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
|
||||
pg = start_pg;
|
||||
while (pg) {
|
||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
if (order >= 0)
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
start_pg = pg->next;
|
||||
kfree(pg);
|
||||
pg = start_pg;
|
||||
@ -6452,7 +6453,8 @@ void ftrace_release_mod(struct module *mod)
|
||||
clear_mod_from_hashes(pg);
|
||||
|
||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
if (order >= 0)
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
tmp_page = pg->next;
|
||||
kfree(pg);
|
||||
ftrace_number_of_pages -= 1 << order;
|
||||
@ -6812,7 +6814,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
|
||||
if (!pg->index) {
|
||||
*last_pg = pg->next;
|
||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
if (order >= 0)
|
||||
free_pages((unsigned long)pg->records, order);
|
||||
ftrace_number_of_pages -= 1 << order;
|
||||
ftrace_number_of_groups--;
|
||||
kfree(pg);
|
||||
|
@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
|
||||
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
static void check_split_1(struct xarray *xa, unsigned long index,
|
||||
unsigned int order)
|
||||
unsigned int order, unsigned int new_order)
|
||||
{
|
||||
XA_STATE(xas, xa, index);
|
||||
void *entry;
|
||||
unsigned int i = 0;
|
||||
XA_STATE_ORDER(xas, xa, index, new_order);
|
||||
unsigned int i;
|
||||
|
||||
xa_store_order(xa, index, order, xa, GFP_KERNEL);
|
||||
|
||||
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
|
||||
xas_lock(&xas);
|
||||
xas_split(&xas, xa, order);
|
||||
for (i = 0; i < (1 << order); i += (1 << new_order))
|
||||
__xa_store(xa, index + i, xa_mk_index(index + i), 0);
|
||||
xas_unlock(&xas);
|
||||
|
||||
xa_for_each(xa, index, entry) {
|
||||
XA_BUG_ON(xa, entry != xa);
|
||||
i++;
|
||||
for (i = 0; i < (1 << order); i++) {
|
||||
unsigned int val = index + (i & ~((1 << new_order) - 1));
|
||||
XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
|
||||
}
|
||||
XA_BUG_ON(xa, i != 1 << order);
|
||||
|
||||
xa_set_mark(xa, index, XA_MARK_0);
|
||||
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
|
||||
@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
|
||||
|
||||
static noinline void check_split(struct xarray *xa)
|
||||
{
|
||||
unsigned int order;
|
||||
unsigned int order, new_order;
|
||||
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
|
||||
check_split_1(xa, 0, order);
|
||||
check_split_1(xa, 1UL << order, order);
|
||||
check_split_1(xa, 3UL << order, order);
|
||||
for (new_order = 0; new_order < order; new_order++) {
|
||||
check_split_1(xa, 0, order, new_order);
|
||||
check_split_1(xa, 1UL << order, order, new_order);
|
||||
check_split_1(xa, 3UL << order, order, new_order);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
|
||||
|
||||
do {
|
||||
unsigned int i;
|
||||
void *sibling;
|
||||
void *sibling = NULL;
|
||||
struct xa_node *node;
|
||||
|
||||
node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
|
||||
@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
|
||||
for (i = 0; i < XA_CHUNK_SIZE; i++) {
|
||||
if ((i & mask) == 0) {
|
||||
RCU_INIT_POINTER(node->slots[i], entry);
|
||||
sibling = xa_mk_sibling(0);
|
||||
sibling = xa_mk_sibling(i);
|
||||
} else {
|
||||
RCU_INIT_POINTER(node->slots[i], sibling);
|
||||
}
|
||||
|
@ -1193,6 +1193,8 @@ static int translate_compat_table(struct net *net,
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
memset(newinfo->entries, 0, size);
|
||||
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
|
@ -1428,6 +1428,8 @@ translate_compat_table(struct net *net,
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
memset(newinfo->entries, 0, size);
|
||||
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
|
@ -1443,6 +1443,8 @@ translate_compat_table(struct net *net,
|
||||
if (!newinfo)
|
||||
goto out_unlock;
|
||||
|
||||
memset(newinfo->entries, 0, size);
|
||||
|
||||
newinfo->number = compatr->num_entries;
|
||||
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
||||
newinfo->hook_entry[i] = compatr->hook_entry[i];
|
||||
|
@ -733,7 +733,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
{
|
||||
const struct xt_match *match = m->u.kernel.match;
|
||||
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
|
||||
int pad, off = xt_compat_match_offset(match);
|
||||
int off = xt_compat_match_offset(match);
|
||||
u_int16_t msize = cm->u.user.match_size;
|
||||
char name[sizeof(m->u.user.name)];
|
||||
|
||||
@ -743,9 +743,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
||||
match->compat_from_user(m->data, cm->data);
|
||||
else
|
||||
memcpy(m->data, cm->data, msize - sizeof(*cm));
|
||||
pad = XT_ALIGN(match->matchsize) - match->matchsize;
|
||||
if (pad > 0)
|
||||
memset(m->data + match->matchsize, 0, pad);
|
||||
|
||||
msize += off;
|
||||
m->u.user.match_size = msize;
|
||||
@ -1116,7 +1113,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
||||
{
|
||||
const struct xt_target *target = t->u.kernel.target;
|
||||
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
|
||||
int pad, off = xt_compat_target_offset(target);
|
||||
int off = xt_compat_target_offset(target);
|
||||
u_int16_t tsize = ct->u.user.target_size;
|
||||
char name[sizeof(t->u.user.name)];
|
||||
|
||||
@ -1126,9 +1123,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
||||
target->compat_from_user(t->data, ct->data);
|
||||
else
|
||||
memcpy(t->data, ct->data, tsize - sizeof(*ct));
|
||||
pad = XT_ALIGN(target->targetsize) - target->targetsize;
|
||||
if (pad > 0)
|
||||
memset(t->data + target->targetsize, 0, pad);
|
||||
|
||||
tsize += off;
|
||||
t->u.user.target_size = tsize;
|
||||
|
@ -9,6 +9,7 @@ Type=simple
|
||||
ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=60s
|
||||
SyslogIdentifier=kvm_stat
|
||||
SyslogLevel=debug
|
||||
|
||||
|
@ -77,8 +77,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
|
||||
if (strstarts(filename, "/system/lib/")) {
|
||||
char *ndk, *app;
|
||||
const char *arch;
|
||||
size_t ndk_length;
|
||||
size_t app_length;
|
||||
int ndk_length, app_length;
|
||||
|
||||
ndk = getenv("NDK_ROOT");
|
||||
app = getenv("APP_PLATFORM");
|
||||
@ -106,8 +105,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
|
||||
if (new_length > PATH_MAX)
|
||||
return false;
|
||||
snprintf(newfilename, new_length,
|
||||
"%s/platforms/%s/arch-%s/usr/lib/%s",
|
||||
ndk, app, arch, libname);
|
||||
"%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
|
||||
ndk_length, ndk, app_length, app, arch, libname);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -301,16 +301,20 @@ void idr_find_test_1(int anchor_id, int throbber_id)
|
||||
pthread_t throbber;
|
||||
time_t start = time(NULL);
|
||||
|
||||
pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
|
||||
|
||||
BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
|
||||
anchor_id + 1, GFP_KERNEL) != anchor_id);
|
||||
|
||||
pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
int id = 0;
|
||||
void *entry = idr_get_next(&find_idr, &id);
|
||||
rcu_read_unlock();
|
||||
BUG_ON(entry != xa_mk_value(id));
|
||||
rcu_read_lock();
|
||||
} while (time(NULL) < start + 11);
|
||||
rcu_read_unlock();
|
||||
|
||||
pthread_join(throbber, NULL);
|
||||
|
||||
@ -577,6 +581,7 @@ void ida_tests(void)
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
rcu_register_thread();
|
||||
radix_tree_init();
|
||||
idr_checks();
|
||||
ida_tests();
|
||||
@ -584,5 +589,6 @@ int __weak main(void)
|
||||
rcu_barrier();
|
||||
if (nr_allocated)
|
||||
printf("nr_allocated = %d\n", nr_allocated);
|
||||
rcu_unregister_thread();
|
||||
return 0;
|
||||
}
|
||||
|
@ -224,7 +224,9 @@ void multiorder_checks(void)
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
rcu_register_thread();
|
||||
radix_tree_init();
|
||||
multiorder_checks();
|
||||
rcu_unregister_thread();
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,11 +25,13 @@ void xarray_tests(void)
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
rcu_register_thread();
|
||||
radix_tree_init();
|
||||
xarray_tests();
|
||||
radix_tree_cpu_dead(1);
|
||||
rcu_barrier();
|
||||
if (nr_allocated)
|
||||
printf("nr_allocated = %d\n", nr_allocated);
|
||||
rcu_unregister_thread();
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user