Merge adc2d82eee ("crypto: sa2ul - Select CRYPTO_DES") into android12-5.10-lts

Steps on the way to 5.10.180

Change-Id: I2356127ad84f0179909589c63453c3367e99f4ee
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-25 11:41:36 +00:00
commit be7602cdd6
55 changed files with 710 additions and 462 deletions

View File

@ -609,6 +609,22 @@ &i2c3 {
clock-frequency = <100000>;
};
&mcspi1 {
status = "disabled";
};
&mcspi2 {
status = "disabled";
};
&mcspi3 {
status = "disabled";
};
&mcspi4 {
status = "disabled";
};
&usb_otg_hs {
interface-type = <0>;
usb-phy = <&usb2_phy>;

View File

@ -116,6 +116,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);

View File

@ -439,6 +439,11 @@ int kernel_active_single_step(void)
}
NOKPROBE_SYMBOL(kernel_active_single_step);
void kernel_rewind_single_step(struct pt_regs *regs)
{
set_regs_spsr_ss(regs);
}
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{

View File

@ -223,6 +223,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
else
kernel_rewind_single_step(linux_regs);
err = 0;
break;
default:

View File

@ -410,10 +410,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
} while (rsvd != new);
} while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
rsvd &= ~APIC_EILVT_MASKED;
rsvd = new & ~APIC_EILVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);

View File

@ -2442,17 +2442,21 @@ static int io_apic_get_redir_entries(int ioapic)
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
unsigned int ret;
/*
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
if (!ioapic_initialized)
return gsi_top;
ret = ioapic_dynirq_base ? : gsi_top;
/*
* For DT enabled machines ioapic_dynirq_base is irrelevant and not
* updated. So simply return @from if ioapic_dynirq_base == 0.
* For DT enabled machines ioapic_dynirq_base is irrelevant and
* always 0. gsi_top can be 0 if there is no IO/APIC registered.
* 0 is an invalid interrupt number for dynamic allocations. Return
* @from instead.
*/
return ioapic_dynirq_base ? : from;
return ret ? : from;
}
#ifdef CONFIG_X86_32

View File

@ -897,6 +897,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512

View File

@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
/* Clear the contents before using the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);

View File

@ -5844,7 +5844,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
drm_WARN(encoder->base.dev, num_encoders != 1,
drm_WARN(state->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
num_encoders, pipe_name(crtc->pipe));

View File

@ -392,8 +392,10 @@ static int lima_pdev_probe(struct platform_device *pdev)
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
if (IS_ERR(ddev)) {
err = PTR_ERR(ddev);
goto err_out0;
}
ddev->dev_private = ldev;
ldev->ddev = ddev;

View File

@ -1553,12 +1553,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
}
sec_offset_err = mr_status.sig_err.sig_err_offset;
do_div(sec_offset_err, block_size);
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
isert_err("PI error found type %d at sector 0x%llx "
"expected 0x%x vs actual 0x%x\n",
mr_status.sig_err.err_type,
(unsigned long long)se_cmd->bad_sector,
(unsigned long long)se_cmd->sense_info,
mr_status.sig_err.expected,
mr_status.sig_err.actual);
ret = 1;

View File

@ -1178,6 +1178,7 @@ static void dm1105_remove(struct pci_dev *pdev)
struct dvb_demux *dvbdemux = &dev->demux;
struct dmx_demux *dmx = &dvbdemux->dmx;
cancel_work_sync(&dev->ir.work);
dm1105_ir_exit(dev);
dmx->close(dmx);
dvb_net_release(&dev->dvbnet);

View File

@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)
int saa7134_ts_fini(struct saa7134_dev *dev)
{
del_timer_sync(&dev->ts_q.timeout);
saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
return 0;
}

View File

@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
int saa7134_vbi_fini(struct saa7134_dev *dev)
{
/* nothing */
del_timer_sync(&dev->vbi_q.timeout);
return 0;
}

View File

@ -2153,6 +2153,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
void saa7134_video_fini(struct saa7134_dev *dev)
{
del_timer_sync(&dev->video_q.timeout);
/* free stuff */
saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);

View File

@ -283,7 +283,6 @@ enum venus_dec_state {
VENUS_DEC_STATE_DRAIN = 5,
VENUS_DEC_STATE_DECODING = 6,
VENUS_DEC_STATE_DRC = 7,
VENUS_DEC_STATE_DRC_FLUSH_DONE = 8,
};
struct venus_ts_metadata {
@ -348,7 +347,7 @@ struct venus_ts_metadata {
* @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
* @last_buf: last capture buffer for dynamic-resoluton-change
* @next_buf_last: a flag to mark next queued capture buffer as last
*/
struct venus_inst {
struct list_head list;
@ -410,7 +409,8 @@ struct venus_inst {
union hfi_get_property hprop;
unsigned int core_acquired: 1;
unsigned int bit_depth;
struct vb2_buffer *last_buf;
bool next_buf_last;
bool drain_active;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)

View File

@ -1347,6 +1347,12 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
/* Skip processing queued capture buffers after LAST flag */
if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
inst->codec_state == VENUS_DEC_STATE_DRC)
goto unlock;
cache_payload(inst, vb);
if (inst->session_type == VIDC_SESSION_TYPE_ENC &&

View File

@ -495,6 +495,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
struct vb2_queue *dst_vq;
struct hfi_frame_data fdata = {0};
int ret;
@ -518,8 +519,17 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
ret = hfi_session_process_buf(inst, &fdata);
if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING)
if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING) {
inst->codec_state = VENUS_DEC_STATE_DRAIN;
inst->drain_active = true;
}
} else if (cmd->cmd == V4L2_DEC_CMD_START &&
inst->codec_state == VENUS_DEC_STATE_STOPPED) {
dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
vb2_clear_last_buffer_dequeued(dst_vq);
inst->codec_state = VENUS_DEC_STATE_DECODING;
}
unlock:
@ -636,6 +646,7 @@ static int vdec_output_conf(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct hfi_enable en = { .enable = 1 };
struct hfi_buffer_requirements bufreq;
u32 width = inst->out_width;
u32 height = inst->out_height;
u32 out_fmt, out2_fmt;
@ -711,6 +722,23 @@ static int vdec_output_conf(struct venus_inst *inst)
}
if (IS_V3(core) || IS_V4(core)) {
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
if (bufreq.size > inst->output_buf_size)
return -EINVAL;
if (inst->dpb_fmt) {
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT2,
&bufreq);
if (ret)
return ret;
if (bufreq.size > inst->output2_buf_size)
return -EINVAL;
}
if (inst->output2_buf_size) {
ret = venus_helper_set_bufsize(inst,
inst->output2_buf_size,
@ -916,10 +944,6 @@ static int vdec_start_capture(struct venus_inst *inst)
return 0;
reconfigure:
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
if (ret)
return ret;
ret = vdec_output_conf(inst);
if (ret)
return ret;
@ -947,15 +971,21 @@ static int vdec_start_capture(struct venus_inst *inst)
venus_pm_load_scale(inst);
inst->next_buf_last = false;
ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
inst->codec_state = VENUS_DEC_STATE_DECODING;
if (inst->drain_active)
inst->codec_state = VENUS_DEC_STATE_DRAIN;
inst->streamon_cap = 1;
inst->sequence_cap = 0;
inst->reconfig = false;
inst->drain_active = false;
return 0;
@ -971,7 +1001,10 @@ static int vdec_start_output(struct venus_inst *inst)
if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
ret = venus_helper_process_initial_out_bufs(inst);
inst->codec_state = VENUS_DEC_STATE_DECODING;
if (inst->next_buf_last)
inst->codec_state = VENUS_DEC_STATE_DRC;
else
inst->codec_state = VENUS_DEC_STATE_DECODING;
goto done;
}
@ -987,6 +1020,7 @@ static int vdec_start_output(struct venus_inst *inst)
venus_helper_init_instance(inst);
inst->sequence_out = 0;
inst->reconfig = false;
inst->next_buf_last = false;
ret = vdec_set_properties(inst);
if (ret)
@ -1076,13 +1110,14 @@ static int vdec_stop_capture(struct venus_inst *inst)
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
fallthrough;
case VENUS_DEC_STATE_DRAIN:
vdec_cancel_dst_buffers(inst);
inst->codec_state = VENUS_DEC_STATE_STOPPED;
inst->drain_active = false;
fallthrough;
case VENUS_DEC_STATE_SEEK:
vdec_cancel_dst_buffers(inst);
break;
case VENUS_DEC_STATE_DRC:
WARN_ON(1);
fallthrough;
case VENUS_DEC_STATE_DRC_FLUSH_DONE:
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
venus_helper_free_dpb_bufs(inst);
break;
@ -1101,6 +1136,7 @@ static int vdec_stop_output(struct venus_inst *inst)
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
case VENUS_DEC_STATE_STOPPED:
case VENUS_DEC_STATE_DRC:
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
inst->codec_state = VENUS_DEC_STATE_SEEK;
break;
@ -1206,9 +1242,28 @@ static void vdec_buf_cleanup(struct vb2_buffer *vb)
static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS };
vdec_pm_get_put(inst);
mutex_lock(&inst->lock);
if (inst->next_buf_last && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
inst->codec_state == VENUS_DEC_STATE_DRC) {
vbuf->flags |= V4L2_BUF_FLAG_LAST;
vbuf->sequence = inst->sequence_cap++;
vbuf->field = V4L2_FIELD_NONE;
vb2_set_plane_payload(vb, 0, 0);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
v4l2_event_queue_fh(&inst->fh, &eos);
inst->next_buf_last = false;
mutex_unlock(&inst->lock);
return;
}
mutex_unlock(&inst->lock);
venus_helper_vb2_buf_queue(vb);
}
@ -1252,20 +1307,15 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
if (inst->last_buf == vb) {
inst->last_buf = NULL;
vbuf->flags |= V4L2_BUF_FLAG_LAST;
vb2_set_plane_payload(vb, 0, 0);
vb->timestamp = 0;
}
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
v4l2_event_queue_fh(&inst->fh, &ev);
if (inst->codec_state == VENUS_DEC_STATE_DRAIN)
if (inst->codec_state == VENUS_DEC_STATE_DRAIN) {
inst->drain_active = false;
inst->codec_state = VENUS_DEC_STATE_STOPPED;
}
}
if (!bytesused)
@ -1321,19 +1371,16 @@ static void vdec_event_change(struct venus_inst *inst,
dev_dbg(dev, VDBGM "event %s sufficient resources (%ux%u)\n",
sufficient ? "" : "not", ev_data->width, ev_data->height);
if (sufficient) {
hfi_session_continue(inst);
} else {
switch (inst->codec_state) {
case VENUS_DEC_STATE_INIT:
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
break;
case VENUS_DEC_STATE_DECODING:
inst->codec_state = VENUS_DEC_STATE_DRC;
break;
default:
break;
}
switch (inst->codec_state) {
case VENUS_DEC_STATE_INIT:
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
break;
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
inst->codec_state = VENUS_DEC_STATE_DRC;
break;
default:
break;
}
/*
@ -1342,19 +1389,17 @@ static void vdec_event_change(struct venus_inst *inst,
* itself doesn't mark the last decoder output buffer with HFI EOS flag.
*/
if (!sufficient && inst->codec_state == VENUS_DEC_STATE_DRC) {
struct vb2_v4l2_buffer *last;
if (inst->codec_state == VENUS_DEC_STATE_DRC) {
int ret;
last = v4l2_m2m_last_dst_buf(inst->m2m_ctx);
if (last)
inst->last_buf = &last->vb2_buf;
inst->next_buf_last = true;
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false);
if (ret)
dev_dbg(dev, VDBGH "flush output error %d\n", ret);
}
inst->next_buf_last = true;
inst->reconfig = true;
v4l2_event_queue_fh(&inst->fh, &ev);
wake_up(&inst->reconf_wait);
@ -1397,8 +1442,7 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
static void vdec_flush_done(struct venus_inst *inst)
{
if (inst->codec_state == VENUS_DEC_STATE_DRC)
inst->codec_state = VENUS_DEC_STATE_DRC_FLUSH_DONE;
dev_dbg(inst->core->dev_dec, VDBGH "flush done\n");
}
static const struct hfi_inst_ops vdec_hfi_ops = {

View File

@ -2121,9 +2121,7 @@ static int fdp1_open(struct file *file)
if (ctx->hdl.error) {
ret = ctx->hdl.error;
v4l2_ctrl_handler_free(&ctx->hdl);
kfree(ctx);
goto done;
goto error_ctx;
}
ctx->fh.ctrl_handler = &ctx->hdl;
@ -2137,20 +2135,27 @@ static int fdp1_open(struct file *file)
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->hdl);
kfree(ctx);
goto done;
goto error_ctx;
}
/* Perform any power management required */
pm_runtime_get_sync(fdp1->dev);
ret = pm_runtime_resume_and_get(fdp1->dev);
if (ret < 0)
goto error_pm;
v4l2_fh_add(&ctx->fh);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
mutex_unlock(&fdp1->dev_mutex);
return 0;
error_pm:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_ctx:
v4l2_ctrl_handler_free(&ctx->hdl);
kfree(ctx);
done:
mutex_unlock(&fdp1->dev_mutex);
return ret;
@ -2255,7 +2260,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
struct resource *res;
struct clk *clk;
unsigned int i;
@ -2282,17 +2286,15 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
/* Interrupt service routine registration */
fdp1->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
}
fdp1->irq = ret;
ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
dev_name(&pdev->dev), fdp1);
@ -2315,8 +2317,10 @@ static int fdp1_probe(struct platform_device *pdev)
/* Determine our clock rate */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto put_dev;
}
fdp1->clk_rate = clk_get_rate(clk);
clk_put(clk);
@ -2325,7 +2329,7 @@ static int fdp1_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
if (ret) {
v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
return ret;
goto put_dev;
}
/* M2M registration */
@ -2355,7 +2359,9 @@ static int fdp1_probe(struct platform_device *pdev)
/* Power up the cells to read HW */
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(fdp1->dev);
ret = pm_runtime_resume_and_get(fdp1->dev);
if (ret < 0)
goto disable_pm;
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
@ -2384,12 +2390,17 @@ static int fdp1_probe(struct platform_device *pdev)
return 0;
disable_pm:
pm_runtime_disable(fdp1->dev);
release_m2m:
v4l2_m2m_release(fdp1->m2m_dev);
unreg_dev:
v4l2_device_unregister(&fdp1->v4l2_dev);
put_dev:
rcar_fcp_put(fdp1->fcp);
return ret;
}
@ -2401,6 +2412,7 @@ static int fdp1_remove(struct platform_device *pdev)
video_unregister_device(&fdp1->vfd);
v4l2_device_unregister(&fdp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
rcar_fcp_put(fdp1->fcp);
return 0;
}

View File

@ -107,6 +107,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
if (of_property_read_bool(np, "wakeup-source"))
device_init_wakeup(dev, true);
rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {

View File

@ -126,6 +126,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
@ -137,6 +138,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
/*
* Some controllers have unreliable Data Line Active
* bit for commands with busy signal. This affects
* Command Inhibit (data) bit. Just ignore it since
* MMC core driver has already polled card status
* with CMD13 after any command with busy siganl.
*/
if (esdhc->quirk_ignore_data_inhibit)
ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}
@ -151,19 +162,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
/*
* Some controllers have unreliable Data Line Active
* bit for commands with busy signal. This affects
* Command Inhibit (data) bit. Just ignore it since
* MMC core driver has already polled card status
* with CMD13 after any command with busy siganl.
*/
if ((spec_reg == SDHCI_PRESENT_STATE) &&
(esdhc->quirk_ignore_data_inhibit == true)) {
ret = value & ~SDHCI_DATA_INHIBIT;
return ret;
}
ret = value;
return ret;
}

View File

@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
ee->ee_n_piers[mode]++;
freq2 = (val >> 8) & 0xff;
if (!freq2)
if (!freq2 || i >= max)
break;
pc[i++].freq = ath5k_eeprom_bin2freq(ee,

View File

@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
return -EACCES;
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
size = sizeof(cid) + sizeof(addr) + sizeof(*param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;

View File

@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
* Thus the possibility of ar->htc_target being NULL
* via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/
if (WARN_ON_ONCE(!target)) {
ath6kl_err("Target not yet initialized\n");
if (!target) {
ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
status = -EINVAL;
goto free_skb;
}

View File

@ -534,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
.send = hif_usb_send,
};
/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
* in case ath9k_hif_usb_rx_stream wasn't called next time to
* process the buffer and subsequently free it.
*/
static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
{
unsigned long flags;
spin_lock_irqsave(&hif_dev->rx_lock, flags);
if (hif_dev->remain_skb) {
dev_kfree_skb_any(hif_dev->remain_skb);
hif_dev->remain_skb = NULL;
hif_dev->rx_remain_len = 0;
RX_STAT_INC(hif_dev, skb_dropped);
}
spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
}
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
@ -868,6 +886,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
ath9k_hif_usb_free_rx_remain_skb(hif_dev);
}
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)

View File

@ -233,7 +233,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
if (ret)
return -EBUSY;
return ret;
idx++;
} while (1);
@ -247,6 +247,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
int ret;
if (rtw_chip_wcpu_11ac(rtwdev)) {
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
@ -270,8 +271,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
return -EINVAL;
ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
if (ret)
return ret;
return 0;
}

View File

@ -216,6 +216,78 @@ static void regulator_unlock(struct regulator_dev *rdev)
mutex_unlock(&regulator_nesting_mutex);
}
/**
* regulator_lock_two - lock two regulators
* @rdev1: first regulator
* @rdev2: second regulator
* @ww_ctx: w/w mutex acquire context
*
* Locks both rdevs using the regulator_ww_class.
*/
static void regulator_lock_two(struct regulator_dev *rdev1,
struct regulator_dev *rdev2,
struct ww_acquire_ctx *ww_ctx)
{
struct regulator_dev *tmp;
int ret;
ww_acquire_init(ww_ctx, &regulator_ww_class);
/* Try to just grab both of them */
ret = regulator_lock_nested(rdev1, ww_ctx);
WARN_ON(ret);
ret = regulator_lock_nested(rdev2, ww_ctx);
if (ret != -EDEADLOCK) {
WARN_ON(ret);
goto exit;
}
while (true) {
/*
* Start of loop: rdev1 was locked and rdev2 was contended.
* Need to unlock rdev1, slowly lock rdev2, then try rdev1
* again.
*/
regulator_unlock(rdev1);
ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
rdev2->ref_cnt++;
rdev2->mutex_owner = current;
ret = regulator_lock_nested(rdev1, ww_ctx);
if (ret == -EDEADLOCK) {
/* More contention; swap which needs to be slow */
tmp = rdev1;
rdev1 = rdev2;
rdev2 = tmp;
} else {
WARN_ON(ret);
break;
}
}
exit:
ww_acquire_done(ww_ctx);
}
/**
* regulator_unlock_two - unlock two regulators
* @rdev1: first regulator
* @rdev2: second regulator
* @ww_ctx: w/w mutex acquire context
*
* The inverse of regulator_lock_two().
*/
static void regulator_unlock_two(struct regulator_dev *rdev1,
struct regulator_dev *rdev2,
struct ww_acquire_ctx *ww_ctx)
{
regulator_unlock(rdev2);
regulator_unlock(rdev1);
ww_acquire_fini(ww_ctx);
}
static bool regulator_supply_is_couple(struct regulator_dev *rdev)
{
struct regulator_dev *c_rdev;
@ -343,6 +415,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
old_contended_rdev->mutex_owner = current;
}
err = regulator_lock_recursive(rdev,
@ -1459,8 +1532,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
/**
* set_supply - set regulator supply regulator
* @rdev: regulator name
* @supply_rdev: supply regulator name
* @rdev: regulator (locked)
* @supply_rdev: supply regulator (locked))
*
* Called by platform initialisation code to set the supply regulator for this
* regulator. This ensures that a regulators supply will also be enabled by the
@ -1632,6 +1705,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
struct regulator *regulator;
int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
if (dev) {
char buf[REG_STR_SIZE];
int size;
@ -1659,9 +1734,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator->rdev = rdev;
regulator->supply_name = supply_name;
regulator_lock(rdev);
list_add(&regulator->list, &rdev->consumer_list);
regulator_unlock(rdev);
if (dev) {
regulator->dev = dev;
@ -1827,6 +1900,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
struct ww_acquire_ctx ww_ctx;
int ret = 0;
/* No supply to resolve? */
@ -1893,23 +1967,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
* between rdev->supply null check and setting rdev->supply in
* set_supply() from concurrent tasks.
*/
regulator_lock(rdev);
regulator_lock_two(rdev, r, &ww_ctx);
/* Supply just resolved by a concurrent task? */
if (rdev->supply) {
regulator_unlock(rdev);
regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
regulator_unlock(rdev);
regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
regulator_unlock(rdev);
regulator_unlock_two(rdev, r, &ww_ctx);
/*
* In set_machine_constraints() we may have turned this regulator on
@ -2022,7 +2096,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
regulator_lock(rdev);
regulator = create_regulator(rdev, dev, id);
regulator_unlock(rdev);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
@ -5800,6 +5876,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
old_contended_rdev->mutex_owner = current;
}
err = regulator_summary_lock_all(ww_ctx,

View File

@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {
static int stm32_pwr_regulator_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct stm32_pwr_reg *priv;
void __iomem *base;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i, ret = 0;
base = of_iomap(np, 0);
if (!base) {
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map IO memory\n");
return -ENOMEM;
return PTR_ERR(base);
}
config.dev = &pdev->dev;

View File

@ -1443,6 +1443,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
cmd = scb->cmd;
list_del_init(&scb->list);
scb->state = SCB_FREE;

View File

@ -1077,6 +1077,8 @@ static int rkvdec_remove(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&rkvdec->watchdog_work);
rkvdec_v4l2_cleanup(rkvdec);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);

View File

@ -4084,9 +4084,12 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
spin_lock_irq(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_ABORTED) {
if (!se_cmd->se_tfo)
continue;
spin_lock_irq(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_ABORTED) {
if (!(se_cmd->transport_state & CMD_T_TAS))
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
@ -4094,11 +4097,10 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
} else {
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
spin_unlock_irq(&se_cmd->t_state_lock);
} else {
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);

View File

@ -724,11 +724,24 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
struct se_lun *xcopy_lun;
int i;
dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
if (!dev->queues) {
dev->transport->free_device(dev);
return NULL;
}
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
INIT_LIST_HEAD(&dev->queues[i].state_list);
spin_lock_init(&dev->queues[i].lock);
}
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
@ -738,9 +751,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
@ -759,6 +770,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_alua.lba_map_lock);
INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
mutex_init(&dev->lun_reset_mutex);
dev->t10_wwn.t10_dev = dev;
dev->t10_alua.t10_dev = dev;
@ -1014,6 +1026,7 @@ void target_free_device(struct se_device *dev)
if (dev->transport->free_prot)
dev->transport->free_prot(dev);
kfree(dev->queues);
dev->transport->free_device(dev);
}

View File

@ -1438,7 +1438,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
if (rc) {
kunmap_atomic(daddr - dsg->offset);
kunmap_atomic(paddr - psg->offset);
cmd->bad_sector = sector;
cmd->sense_info = sector;
return rc;
}
next:

View File

@ -121,57 +121,61 @@ void core_tmr_abort_task(
unsigned long flags;
bool rc;
u64 ref_tag;
int i;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->state_list, state_list) {
for (i = 0; i < dev->queue_cnt; i++) {
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
state_list) {
if (se_sess != se_cmd->se_sess)
continue;
if (se_sess != se_cmd->se_sess)
continue;
/*
* skip task management functions, including
* tmr->task_cmd
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
/* skip task management functions, including tmr->task_cmd */
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
ref_tag = se_cmd->tag;
if (tmr->ref_task_tag != ref_tag)
continue;
ref_tag = se_cmd->tag;
if (tmr->ref_task_tag != ref_tag)
continue;
pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->fabric_name, ref_tag);
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->fabric_name, ref_tag);
spin_lock(&se_sess->sess_cmd_lock);
rc = __target_check_io_state(se_cmd, se_sess, 0);
spin_unlock(&se_sess->sess_cmd_lock);
if (!rc)
continue;
spin_lock(&se_sess->sess_cmd_lock);
rc = __target_check_io_state(se_cmd, se_sess, 0);
spin_unlock(&se_sess->sess_cmd_lock);
if (!rc)
continue;
list_move_tail(&se_cmd->state_list, &aborted_list);
se_cmd->state_active = false;
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
list_move_tail(&se_cmd->state_list, &aborted_list);
se_cmd->state_active = false;
/*
* Ensure that this ABORT request is visible to the LU
* RESET code.
*/
if (!tmr->tmr_dev)
WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
&aborted_list);
/*
* Ensure that this ABORT request is visible to the LU RESET
* code.
*/
if (!tmr->tmr_dev)
WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) <
0);
list_del_init(&se_cmd->state_list);
target_put_cmd_and_wait(se_cmd);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
&aborted_list);
list_del_init(&se_cmd->state_list);
target_put_cmd_and_wait(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
atomic_long_inc(&dev->aborts_complete);
return;
pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
atomic_long_inc(&dev->aborts_complete);
return;
}
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
@ -198,14 +202,23 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
if (tmr)
list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
if (tmr_p == tmr)
continue;
cmd = tmr_p->task_cmd;
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* We only execute one LUN_RESET at a time so we can't wait
* on them below.
*/
if (tmr_p->function == TMR_LUN_RESET)
continue;
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
@ -273,7 +286,7 @@ static void core_tmr_drain_state_list(
struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
int rc;
int rc, i;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@ -297,35 +310,39 @@ static void core_tmr_drain_state_list(
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
for (i = 0; i < dev->queue_cnt; i++) {
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list,
cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
rc = __target_check_io_state(cmd, tmr_sess, tas);
spin_unlock(&sess->sess_cmd_lock);
if (!rc)
continue;
spin_lock(&sess->sess_cmd_lock);
rc = __target_check_io_state(cmd, tmr_sess, tas);
spin_unlock(&sess->sess_cmd_lock);
if (!rc)
continue;
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, preempt_and_abort_list ?
@ -382,14 +399,25 @@ int core_tmr_lun_reset(
tmr_nacl->initiatorname);
}
}
/*
* We only allow one reset or preempt and abort to execute at a time
* to prevent one call from claiming all the cmds causing a second
* call from returning while cmds it should have waited on are still
* running.
*/
mutex_lock(&dev->lun_reset_mutex);
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
mutex_unlock(&dev->lun_reset_mutex);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET

View File

@ -650,12 +650,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev)
return;
spin_lock_irqsave(&dev->execute_task_lock, flags);
spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@ -866,10 +866,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
if (cmd->se_cmd_flags & SCF_USE_CPUID)
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
else
queue_work(target_completion_wq, &cmd->work);
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@ -904,12 +901,13 @@ static void target_add_to_state_list(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags);
spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) {
list_add_tail(&cmd->state_list, &dev->state_list);
list_add_tail(&cmd->state_list,
&dev->queues[cmd->cpuid].state_list);
cmd->state_active = true;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@ -1397,6 +1395,9 @@ void transport_init_se_cmd(
cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun;
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
cmd->cpuid = smp_processor_id();
cmd->state_active = false;
}
EXPORT_SYMBOL(transport_init_se_cmd);
@ -1614,6 +1615,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
BUG_ON(in_interrupt());
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
@ -1623,11 +1627,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
data_length, data_dir, task_attr, sense,
unpacked_lun);
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
else
se_cmd->cpuid = WORK_CPU_UNBOUND;
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
@ -3131,14 +3130,14 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_wait_for_tasks);
struct sense_info {
struct sense_detail {
u8 key;
u8 asc;
u8 ascq;
bool add_sector_info;
bool add_sense_info;
};
static const struct sense_info sense_info_table[] = {
static const struct sense_detail sense_detail_table[] = {
[TCM_NO_SENSE] = {
.key = NOT_READY
},
@ -3238,19 +3237,19 @@ static const struct sense_info sense_info_table[] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
.add_sector_info = true,
.add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
.add_sector_info = true,
.add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
.add_sense_info = true,
},
[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
.key = COPY_ABORTED,
@ -3298,42 +3297,42 @@ static const struct sense_info sense_info_table[] = {
*/
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_info *si;
const struct sense_detail *sd;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
si = &sense_info_table[r];
if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
sd = &sense_detail_table[r];
else
si = &sense_info_table[(__force int)
sd = &sense_detail_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
key = si->key;
key = sd->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
&ascq)) {
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
} else if (si->asc == 0) {
} else if (sd->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
ascq = cmd->scsi_ascq;
} else {
asc = si->asc;
ascq = si->ascq;
asc = sd->asc;
ascq = sd->ascq;
}
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (si->add_sector_info)
if (sd->add_sense_info)
WARN_ON_ONCE(scsi_set_sense_information(buffer,
cmd->scsi_sense_length,
cmd->bad_sector) < 0);
cmd->sense_info) < 0);
}
int

View File

@ -551,7 +551,7 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
TARGET_SCF_ACK_KREF))
goto err;
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);

View File

@ -1393,6 +1393,12 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (!PageDirty(cc->rpages[i]))
goto continue_unlock;
if (PageWriteback(cc->rpages[i])) {
if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock;
f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
}
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;

View File

@ -1218,7 +1218,6 @@ struct f2fs_dev_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_blkz; /* Total number of zones */
unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
#endif
};
@ -1639,6 +1638,7 @@ struct f2fs_sb_info {
unsigned int meta_ino_num; /* meta inode number*/
unsigned int log_blocks_per_seg; /* log2 blocks per segment */
unsigned int blocks_per_seg; /* blocks per segment */
unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */

View File

@ -3018,15 +3018,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
struct dquot *transfer_to[MAXQUOTAS] = {};
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
int err = 0;
int err;
transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
if (!IS_ERR(transfer_to[PRJQUOTA])) {
err = __dquot_transfer(inode, transfer_to);
if (err)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
dqput(transfer_to[PRJQUOTA]);
}
if (IS_ERR(transfer_to[PRJQUOTA]))
return PTR_ERR(transfer_to[PRJQUOTA]);
err = __dquot_transfer(inode, transfer_to);
if (err)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
dqput(transfer_to[PRJQUOTA]);
return err;
}

View File

@ -5054,54 +5054,6 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
return 0;
}
static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
unsigned int dev_idx)
{
if (!bdev_is_zoned(FDEV(dev_idx).bdev))
return true;
return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
}
/* Return the zone index in the given device */
static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
int dev_idx)
{
block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
sbi->log_blocks_per_blkz;
}
/*
* Return the usable segments in a section based on the zone's
* corresponding zone capacity. Zone is equal to a section.
*/
static inline unsigned int f2fs_usable_zone_segs_in_sec(
struct f2fs_sb_info *sbi, unsigned int segno)
{
unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
/* Conventional zone's capacity is always equal to zone size */
if (is_conv_zone(sbi, zone_idx, dev_idx))
return sbi->segs_per_sec;
/*
* If the zone_capacity_blocks array is NULL, then zone capacity
* is equal to the zone size for all zones
*/
if (!FDEV(dev_idx).zone_capacity_blocks)
return sbi->segs_per_sec;
/* Get the segment count beyond zone capacity block */
unusable_segs_in_sec = (sbi->blocks_per_blkz -
FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
sbi->log_blocks_per_seg;
return sbi->segs_per_sec - unusable_segs_in_sec;
}
/*
* Return the number of usable blocks in a segment. The number of blocks
* returned is always equal to the number of blocks in a segment for
@ -5114,26 +5066,15 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
struct f2fs_sb_info *sbi, unsigned int segno)
{
block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
unsigned int zone_idx, dev_idx, secno;
unsigned int secno;
if (!sbi->unusable_blocks_per_sec)
return sbi->blocks_per_seg;
secno = GET_SEC_FROM_SEG(sbi, segno);
seg_start = START_BLOCK(sbi, segno);
dev_idx = f2fs_target_device_index(sbi, seg_start);
zone_idx = get_zone_idx(sbi, secno, dev_idx);
/*
* Conventional zone's capacity is always equal to zone size,
* so, blocks per segment is unchanged.
*/
if (is_conv_zone(sbi, zone_idx, dev_idx))
return sbi->blocks_per_seg;
if (!FDEV(dev_idx).zone_capacity_blocks)
return sbi->blocks_per_seg;
sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
sec_cap_blkaddr = sec_start_blkaddr +
FDEV(dev_idx).zone_capacity_blocks[zone_idx];
sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
/*
* If segment starts before zone capacity and spans beyond
@ -5165,11 +5106,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
return 0;
}
static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno)
{
return 0;
}
#endif
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno)
@ -5184,7 +5120,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno)
{
if (f2fs_sb_has_blkzoned(sbi))
return f2fs_usable_zone_segs_in_sec(sbi, segno);
return CAP_SEGS_PER_SEC(sbi);
return sbi->segs_per_sec;
}

View File

@ -101,6 +101,12 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
#define CAP_BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
(sbi)->unusable_blocks_per_sec)
#define CAP_SEGS_PER_SEC(sbi) \
((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
(sbi)->log_blocks_per_seg))
#define GET_SEC_FROM_SEG(sbi, segno) \
(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \

View File

@ -1433,7 +1433,6 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
blkdev_put(FDEV(i).bdev, FMODE_EXCL);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
kfree(FDEV(i).zone_capacity_blocks);
#endif
}
kvfree(sbi->devs);
@ -3502,24 +3501,29 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
#ifdef CONFIG_BLK_DEV_ZONED
struct f2fs_report_zones_args {
struct f2fs_sb_info *sbi;
struct f2fs_dev_info *dev;
bool zone_cap_mismatch;
};
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct f2fs_report_zones_args *rz_args = data;
block_t unusable_blocks = (zone->len - zone->capacity) >>
F2FS_LOG_SECTORS_PER_BLOCK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return 0;
set_bit(idx, rz_args->dev->blkz_seq);
rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
F2FS_LOG_SECTORS_PER_BLOCK;
if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
rz_args->zone_cap_mismatch = true;
if (!rz_args->sbi->unusable_blocks_per_sec) {
rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
return 0;
}
if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
return -EINVAL;
}
return 0;
}
@ -3553,26 +3557,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
/* Get block zones type and zone-capacity */
FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
FDEV(devi).nr_blkz * sizeof(block_t),
GFP_KERNEL);
if (!FDEV(devi).zone_capacity_blocks)
return -ENOMEM;
rep_zone_arg.sbi = sbi;
rep_zone_arg.dev = &FDEV(devi);
rep_zone_arg.zone_cap_mismatch = false;
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
&rep_zone_arg);
if (ret < 0)
return ret;
if (!rep_zone_arg.zone_cap_mismatch) {
kfree(FDEV(devi).zone_capacity_blocks);
FDEV(devi).zone_capacity_blocks = NULL;
}
return 0;
}
#endif

View File

@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
}
}
}
static inline bool scm_has_secdata(struct socket *sock)
{
return test_bit(SOCK_PASSSEC, &sock->flags);
}
#else
static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }
static inline bool scm_has_secdata(struct socket *sock)
{
return false;
}
#endif /* CONFIG_SECURITY_NETWORK */
static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
return;

View File

@ -540,7 +540,11 @@ struct se_cmd {
struct scatterlist *t_prot_sg;
unsigned int t_prot_nents;
sense_reason_t pi_err;
sector_t bad_sector;
u64 sense_info;
/*
* CPU LIO will execute the cmd on. Defaults to the CPU the cmd is
* initialized on. Drivers can override.
*/
int cpuid;
};
@ -761,6 +765,11 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group;
};
struct se_device_queue {
struct list_head state_list;
spinlock_t lock;
};
struct se_device {
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
@ -794,7 +803,6 @@ struct se_device {
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
unsigned int dev_reservation_flags;
#define DRF_SPC2_RESERVATIONS 0x00000001
@ -814,7 +822,6 @@ struct se_device {
struct work_struct qf_work_queue;
struct work_struct delayed_cmd_work;
struct list_head delayed_cmd_list;
struct list_head state_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
@ -841,6 +848,9 @@ struct se_device {
/* For se_lun->lun_se_dev RCU read-side critical access */
u32 hba_index;
struct rcu_head rcu_head;
int queue_cnt;
struct se_device_queue *queues;
struct mutex lun_reset_mutex;
};
struct se_hba {

View File

@ -2775,17 +2775,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
}
/* Variable offset is prohibited for unprivileged mode for simplicity
* since it requires corresponding support in Spectre masking for stack
* ALU. See also retrieve_ptr_limit().
* ALU. See also retrieve_ptr_limit(). The check in
* check_stack_access_for_ptr_arithmetic() called by
* adjust_ptr_min_max_vals() prevents users from creating stack pointers
* with variable offsets, therefore no check is required here. Further,
* just checking it here would be insufficient as speculative stack
* writes could still lead to unsafe speculative behaviour.
*/
if (!env->bypass_spec_v1 && var_off) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
ptr_regno, tn_buf);
return -EACCES;
}
if (!var_off) {
off += reg->var_off.value;
err = check_stack_read_fixed_off(env, state, off, size,
@ -9550,10 +9546,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
state_reg = state->regs;
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
if (state_reg->type != SCALAR_VALUE ||
!state_reg->precise)
!state_reg->precise ||
!(state_reg->live & REG_LIVE_READ))
continue;
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "frame %d: propagating r%d\n", i, fr);
verbose(env, "frame %d: propagating r%d\n", fr, i);
err = mark_chain_precision_frame(env, fr, i);
if (err < 0)
return err;
@ -9564,11 +9561,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
!state_reg->precise)
!state_reg->precise ||
!(state_reg->live & REG_LIVE_READ))
continue;
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "frame %d: propagating fp%d\n",
(-i - 1) * BPF_REG_SIZE, fr);
fr, (-i - 1) * BPF_REG_SIZE);
err = mark_chain_precision_stack_frame(env, fr, i);
if (err < 0)
return err;

View File

@ -412,7 +412,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
ktime_t next = ktime_add(dev->next_event, tick_period);
ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
clockevents_program_event(dev, next, true);
}

View File

@ -31,7 +31,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
* Tick next event: keeps track of the tick time
*/
ktime_t tick_next_period;
ktime_t tick_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@ -89,7 +88,7 @@ static void tick_periodic(int cpu)
write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
write_seqcount_end(&jiffies_seq);
@ -129,7 +128,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
* Setup the next period for devices, which do not have
* periodic mode:
*/
next = ktime_add(next, tick_period);
next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
@ -175,7 +174,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
next = ktime_add(next, tick_period);
next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@ -219,10 +218,19 @@ static void tick_setup_device(struct tick_device *td,
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
ktime_t next_p;
u32 rem;
tick_do_timer_cpu = cpu;
tick_next_period = ktime_get();
tick_period = NSEC_PER_SEC / HZ;
next_p = ktime_get();
div_u64_rem(next_p, TICK_NSEC, &rem);
if (rem) {
next_p -= rem;
next_p += TICK_NSEC;
}
tick_next_period = next_p;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set

View File

@ -15,7 +15,6 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);

View File

@ -54,49 +54,67 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 0;
unsigned long ticks = 1;
ktime_t delta;
/*
* Do a quick check without holding jiffies_lock:
* The READ_ONCE() pairs with two updates done later in this function.
* Do a quick check without holding jiffies_lock. The READ_ONCE()
* pairs with the update done later in this function.
*
* This is also an intentional data race which is even safe on
* 32bit in theory. If there is a concurrent update then the check
* might give a random answer. It does not matter because if it
* returns then the concurrent update is already taking care, if it
* falls through then it will pointlessly contend on jiffies_lock.
*
* Though there is one nasty case on 32bit due to store tearing of
* the 64bit value. If the first 32bit store makes the quick check
* return on all other CPUs and the writing CPU context gets
* delayed to complete the second store (scheduled out on virt)
* then jiffies can become stale for up to ~2^32 nanoseconds
* without noticing. After that point all CPUs will wait for
* jiffies lock.
*
* OTOH, this is not any different than the situation with NOHZ=off
* where one CPU is responsible for updating jiffies and
* timekeeping. If that CPU goes out for lunch then all other CPUs
* will operate on stale jiffies until it decides to come back.
*/
delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
if (delta < tick_period)
if (ktime_before(now, READ_ONCE(tick_next_period)))
return;
/* Reevaluate with jiffies_lock held */
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta >= tick_period) {
delta = ktime_sub(delta, tick_period);
/* Pairs with the lockless read in this function. */
WRITE_ONCE(last_jiffies_update,
ktime_add(last_jiffies_update, tick_period));
/* Slow path for long timeouts */
if (unlikely(delta >= tick_period)) {
s64 incr = ktime_to_ns(tick_period);
ticks = ktime_divns(delta, incr);
/* Pairs with the lockless read in this function. */
WRITE_ONCE(last_jiffies_update,
ktime_add_ns(last_jiffies_update,
incr * ticks));
}
do_timer(++ticks);
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
write_seqcount_end(&jiffies_seq);
if (ktime_before(now, tick_next_period)) {
raw_spin_unlock(&jiffies_lock);
return;
}
write_seqcount_begin(&jiffies_seq);
delta = ktime_sub(now, tick_next_period);
if (unlikely(delta >= TICK_NSEC)) {
/* Slow path for long idle sleep times */
s64 incr = TICK_NSEC;
ticks += ktime_divns(delta, incr);
last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
} else {
last_jiffies_update = ktime_add_ns(last_jiffies_update,
TICK_NSEC);
}
do_timer(ticks);
/*
* Keep the tick_next_period variable up to date. WRITE_ONCE()
* pairs with the READ_ONCE() in the lockless quick check above.
*/
WRITE_ONCE(tick_next_period,
ktime_add_ns(last_jiffies_update, TICK_NSEC));
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
update_wall_time();
@ -673,7 +691,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@ -1237,7 +1255,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
if (unlikely(ts->tick_stopped))
return;
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
@ -1274,7 +1292,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
hrtimer_forward_now(&ts->sched_timer, tick_period);
hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@ -1340,7 +1358,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
hrtimer_forward(timer, now, tick_period);
hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
@ -1374,13 +1392,13 @@ void tick_setup_sched_timer(void)
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
u64 offset = ktime_to_ns(tick_period) >> 1;
u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}

View File

@ -219,10 +219,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
return obj;
}
/*
* Allocate a new object. If the pool is empty, switch off the debugger.
* Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{
@ -555,11 +551,49 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
const struct debug_obj_descr *descr,
bool onstack, bool alloc_ifstatic)
{
struct debug_obj *obj = lookup_object(addr, b);
enum debug_obj_state state = ODEBUG_STATE_NONE;
if (likely(obj))
return obj;
/*
* debug_object_init() unconditionally allocates untracked
* objects. It does not matter whether it is a static object or
* not.
*
* debug_object_assert_init() and debug_object_activate() allow
* allocation only if the descriptor callback confirms that the
* object is static and considered initialized. For non-static
* objects the allocation needs to be done from the fixup callback.
*/
if (unlikely(alloc_ifstatic)) {
if (!descr->is_static_object || !descr->is_static_object(addr))
return ERR_PTR(-ENOENT);
/* Statically allocated objects are considered initialized */
state = ODEBUG_STATE_INIT;
}
obj = alloc_object(addr, b, descr);
if (likely(obj)) {
obj->state = state;
debug_object_is_on_stack(addr, onstack);
return obj;
}
/* Out of memory. Do the cleanup outside of the locked region */
debug_objects_enabled = 0;
return NULL;
}
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
bool check_stack = false;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@ -570,16 +604,11 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
check_stack = true;
obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
if (unlikely(!obj)) {
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
switch (obj->state) {
@ -605,8 +634,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
}
raw_spin_unlock_irqrestore(&db->lock, flags);
if (check_stack)
debug_object_is_on_stack(addr, onstack);
}
/**
@ -646,14 +673,12 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
if (!debug_objects_enabled)
return 0;
@ -662,8 +687,8 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
obj = lookup_object_or_alloc(addr, db, descr, false, true);
if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;
switch (obj->state) {
@ -696,24 +721,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* We are here when a static object is activated. We
* let the type specific code confirm whether this is
* true or not. if true, we just make sure that the
* static object is tracked in the object tracker. If
* not, this must be a bug, so we try to fix it up.
*/
if (descr->is_static_object && descr->is_static_object(addr)) {
/* track this static object */
debug_object_init(addr, descr);
debug_object_activate(addr, descr);
} else {
debug_print_object(&o, "activate");
ret = debug_object_fixup(descr->fixup_activate, addr,
ODEBUG_STATE_NOTAVAILABLE);
return ret ? 0 : -EINVAL;
/* If NULL the allocation has hit OOM */
if (!obj) {
debug_objects_oom();
return 0;
}
return 0;
/* Object is neither static nor tracked. It's not initialized */
debug_print_object(&o, "activate");
ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@ -867,6 +884,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@ -877,31 +895,20 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object_or_alloc(addr, db, descr, false, true);
raw_spin_unlock_irqrestore(&db->lock, flags);
if (likely(!IS_ERR_OR_NULL(obj)))
return;
obj = lookup_object(addr, db);
/* If NULL the allocation has hit OOM */
if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* Maybe the object is static, and we let the type specific
* code confirm. Track this static object if true, else invoke
* fixup.
*/
if (descr->is_static_object && descr->is_static_object(addr)) {
/* Track this static object */
debug_object_init(addr, descr);
} else {
debug_print_object(&o, "assert_init");
debug_object_fixup(descr->fixup_assert_init, addr,
ODEBUG_STATE_NOTAVAILABLE);
}
debug_objects_oom();
return;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Object is neither tracked nor static. It's not initialized. */
debug_print_object(&o, "assert_init");
debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);

View File

@ -365,7 +365,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
if (!net_eq(dev_net(dev), &init_net))
if (!net_eq(dev_net(dev), dev_net(real_dev)))
break;
fallthrough;
case SIOCGMIIPHY:

View File

@ -269,7 +269,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)
static bool packet_use_direct_xmit(const struct packet_sock *po)
{
return po->xmit == packet_direct_xmit;
/* Paired with WRITE_ONCE() in packet_setsockopt() */
return READ_ONCE(po->xmit) == packet_direct_xmit;
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@ -2145,7 +2146,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_hatype = dev->type;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@ -2418,7 +2419,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
if (unlikely(po->origdev))
if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@ -2825,7 +2826,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
packet_inc_pending(&po->tx_ring);
status = TP_STATUS_SEND_REQUEST;
err = po->xmit(skb);
/* Paired with WRITE_ONCE() in packet_setsockopt() */
err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@ -3028,7 +3030,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
err = po->xmit(skb);
/* Paired with WRITE_ONCE() in packet_setsockopt() */
err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@ -3482,7 +3485,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {
if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
@ -3866,9 +3869,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
lock_sock(sk);
po->auxdata = !!val;
release_sock(sk);
packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
return 0;
}
case PACKET_ORIGDEV:
@ -3880,9 +3881,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
lock_sock(sk);
po->origdev = !!val;
release_sock(sk);
packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
return 0;
}
case PACKET_VNET_HDR:
@ -3979,7 +3978,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
/* Paired with all lockless reads of po->xmit */
WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
return 0;
}
default:
@ -4030,10 +4030,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
break;
case PACKET_AUXDATA:
val = po->auxdata;
val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
break;
case PACKET_ORIGDEV:
val = po->origdev;
val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
break;
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;

View File

@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_flags = 0;
if (po->running)
pinfo.pdi_flags |= PDI_RUNNING;
if (po->auxdata)
if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
pinfo.pdi_flags |= PDI_AUXDATA;
if (po->origdev)
if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
pinfo.pdi_flags |= PDI_ORIGDEV;
if (po->has_vnet_hdr)
pinfo.pdi_flags |= PDI_VNETHDR;

View File

@ -116,10 +116,9 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
unsigned long flags;
unsigned int running; /* bind_lock must be held */
unsigned int auxdata:1, /* writer must hold sock lock */
origdev:1,
has_vnet_hdr:1,
unsigned int has_vnet_hdr:1, /* writer must hold sock lock */
tp_loss:1,
tp_tx_has_off:1;
int pressure;
@ -144,4 +143,25 @@ static struct packet_sock *pkt_sk(struct sock *sk)
return (struct packet_sock *)sk;
}
enum packet_sock_flags {
PACKET_SOCK_ORIGDEV,
PACKET_SOCK_AUXDATA,
};
static inline void packet_sock_flag_set(struct packet_sock *po,
enum packet_sock_flags flag,
bool val)
{
if (val)
set_bit(flag, &po->flags);
else
clear_bit(flag, &po->flags);
}
static inline bool packet_sock_flag(const struct packet_sock *po,
enum packet_sock_flags flag)
{
return test_bit(flag, &po->flags);
}
#endif

View File

@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
case '"':
fputs("\\\"", self->out);
break;
case '\'':
fputs("\\\'", self->out);
break;
default:
putc(*str, self->out);
}