Merge 0ce9d89343 ("clk: ti: dra7-atl: Fix leak of of_nodes") into android12-5.10-lts

Steps on the way to 5.10.226

Resolves merge conflicts in:
	drivers/dma-buf/heaps/heap-helpers.c
	drivers/usb/dwc3/core.h
	fs/ext4/inline.c

Change-Id: Id7ab496884e549fc85b6fff8254fb56d6785d78c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-12 17:29:46 +00:00
commit 9e57ad4546
148 changed files with 1042 additions and 431 deletions

View File

@ -519,7 +519,7 @@ at module load time (for a module) with::
alerts_broken
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in

View File

@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
&iomuxc_lpsr {
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
fsl,phy = <
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
>;
};

View File

@ -692,7 +692,7 @@ rtt: rtt@fffffe20 {
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
pit: timer@fffffe40 {
@ -718,7 +718,7 @@ rtc: rtc@fffffea8 {
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
reg = <0xfffffea8 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
watchdog: watchdog@ffffff80 {

View File

@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return;
}
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n");
return;

View File

@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
{
/* regs will be equal to current_pt_regs() */
struct kernel_clone_args args = {
.flags = regs->d1 & ~CSIGNAL,
.flags = (u32)(regs->d1) & ~CSIGNAL,
.pidfd = (int __user *)regs->d3,
.child_tid = (int __user *)regs->d4,
.parent_tid = (int __user *)regs->d3,

View File

@ -245,11 +245,6 @@ asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);

View File

@ -275,6 +275,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {

View File

@ -194,17 +194,10 @@ static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
struct kprobe *kp;
unsigned long faddr;
bool faddr;
kp = get_kprobe((void *)addr);
faddr = ftrace_location(addr);
/*
* Addresses inside the ftrace location are refused by
* arch_check_ftrace_location(). Something went terribly wrong
* if such an address is checked here.
*/
if (WARN_ON(faddr && faddr != addr))
return 0UL;
faddr = ftrace_location(addr) == addr;
/*
* Use the current code if it is not modified by Kprobe
* and it cannot be modified by ftrace.

View File

@ -856,7 +856,7 @@ char * __init xen_memory_setup(void)
* to relocating (and even reusing) pages with kernel text or data.
*/
if (xen_is_e820_reserved(__pa_symbol(_text),
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
__pa_symbol(_end) - __pa_symbol(_text))) {
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
BUG();
}

View File

@ -2616,8 +2616,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
new_bfqq = bfqq->new_bfqq;
if (new_bfqq) {
while (new_bfqq->new_bfqq)
new_bfqq = new_bfqq->new_bfqq;
return new_bfqq;
}
/*
* Do not perform queue merging if the device is non
@ -6004,7 +6008,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
if (bfqq_process_refs(bfqq) == 1) {
if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq);
@ -6189,7 +6193,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* addition, if the queue has also just been split, we have to
* resume its state.
*/
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic;
if (split) {
/*

View File

@ -691,9 +691,11 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info);
if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
printk(KERN_ERR " %s: p%d could not be added: %ld\n",
disk->disk_name, p, -PTR_ERR(part));
if (IS_ERR(part)) {
if (PTR_ERR(part) != -ENXIO) {
printk(KERN_ERR " %s: p%d could not be added: %pe\n",
disk->disk_name, p, part);
}
return true;
}

View File

@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion;
acpi_status status;
if (!dev || !tps68470_regmap) {
dev_warn(dev, "dev or regmap is NULL\n");
return -EINVAL;
}
if (!tps68470_regmap)
return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");

View File

@ -18,7 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
struct fwnode_handle *dev_fwnode(struct device *dev)
struct fwnode_handle *dev_fwnode(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
&dev->of_node->fwnode : dev->fwnode;
@ -1231,7 +1231,7 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
const void *device_get_match_data(struct device *dev)
const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}

View File

@ -928,7 +928,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {

View File

@ -48,6 +48,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len);
else
tpm2_flush_space(chip);
out_rc:
return ret ? ret : len;

View File

@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
struct tpm_space *space = &chip->work_space;
int i;
if (!space)
return;
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context(chip, space->context_tbl[i]);

View File

@ -579,8 +579,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);

View File

@ -408,7 +408,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),

View File

@ -258,6 +258,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
}
clk = of_clk_get_from_provider(&clkspec);
of_node_put(clkspec.np);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);

View File

@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np)
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
iounmap(cpu0_base);
pr_err("Unknown frequency\n");
return -EINVAL;
}
@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
return msm_timer_init(freq, 32, irq, !!percpu_offset);
ret = msm_timer_init(freq, 32, irq, !!percpu_offset);
if (ret)
iounmap(cpu0_base);
return ret;
}
TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);

View File

@ -53,6 +53,9 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_shift;
unsigned long rev_offset;
bool multi_regulator;
/* Backward compatibility hack: Might have missing syscon */
#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
u8 quirks;
};
struct ti_cpufreq_data {
@ -156,6 +159,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
.efuse_mask = BIT(3),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@ -183,6 +187,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.efuse_mask = BIT(9),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = true,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@ -197,6 +202,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.efuse_mask = 0,
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
.quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
@ -216,7 +222,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
if (ret == -EIO) {
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->efuse_offset, 4);
@ -257,7 +263,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
if (ret == -EIO) {
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->rev_offset, 4);

View File

@ -1110,15 +1110,18 @@ static long linereq_set_config_unlocked(struct linereq *lr,
for (i = 0; i < lr->num_lines; i++) {
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
/*
* Lines not explicitly reconfigured as input or output
* are left unchanged.
*/
if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
continue;
polarity_change =
(!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
@ -1126,7 +1129,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
} else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;

View File

@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/nospec.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/device.h>
@ -147,7 +148,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
return &gdev->descs[hwnum];
return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
}
EXPORT_SYMBOL_GPL(gpiochip_get_desc);

View File

@ -2098,23 +2098,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
struct edid *edid;
int edid_size =
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
edid = kmalloc(edid_size, GFP_KERNEL);
if (edid) {
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
int edid_size;
if (fake_edid_record->ucFakeEDIDLength == 128)
edid_size = fake_edid_record->ucFakeEDIDLength;
else
edid_size = fake_edid_record->ucFakeEDIDLength * 128;
edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0],
edid_size, GFP_KERNEL);
if (edid) {
if (drm_edid_is_valid(edid)) {
adev->mode_info.bios_hardcoded_edid = edid;
adev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
} else {
kfree(edid);
}
}
record += struct_size(fake_edid_record,
ucFakeEDIDString,
edid_size);
} else {
/* empty fake edid record must be 3 bytes long */
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
record += fake_edid_record->ucFakeEDIDLength ?
fake_edid_record->ucFakeEDIDLength + 2 :
sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;

View File

@ -4107,7 +4107,7 @@ typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
{
UCHAR ucRecordType;
UCHAR ucFakeEDIDLength; // = 128 means EDID length is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128
UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
UCHAR ucFakeEDIDString[]; // This actually has ucFakeEdidLength elements.
} ATOM_FAKE_EDID_PATCH_RECORD;
typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD

View File

@ -142,6 +142,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
u32 slave_zpos = 0;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
@ -181,10 +182,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
slave_zpos = plane_st->normalized_zpos;
if (to_kplane_st(plane_st)->layer_split)
slave_zpos++;
kcrtc_st->max_slave_zorder =
max(plane_st->normalized_zpos,
kcrtc_st->max_slave_zorder);
max(slave_zpos, kcrtc_st->max_slave_zorder);
}
}
crtc_st->zpos_changed = true;

View File

@ -1169,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,

View File

@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:

View File

@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:

View File

@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:

View File

@ -56,7 +56,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
struct msm_gem_object *obj;
uint32_t *ptr, dwords;
@ -67,7 +68,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -101,6 +102,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
@ -117,12 +119,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
priv->lastctx = NULL;
gpu->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@ -143,9 +144,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
/*
* Disable local preemption by default because it requires
* user-space to be aware of it and provide additional handling
* to restore rendering state or do various flushes on switch.
*/
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x1);
OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@ -157,7 +162,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -189,6 +194,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the

View File

@ -34,8 +34,10 @@ struct a5xx_gpu {
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
spinlock_t preempt_start_lock;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;

View File

@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
int i;
@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
if (!empty && ring == a5xx_gpu->cur_ring)
empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@ -98,12 +102,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
if (gpu->nr_rings == 1)
return;
/*
* Serialize preemption start to ensure that we always make
* decision on latest state. Otherwise we can get stuck in
* lower priority or empty ring.
*/
spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
/*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
return;
goto out;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
@ -128,9 +139,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
goto out;
}
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
@ -154,6 +167,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
return;
out:
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
@ -191,6 +208,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
/*
* Try to trigger preemption again in case there was a submit or
* retire during ring switch
*/
a5xx_preempt_trigger(gpu);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
@ -207,6 +230,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
return;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->data = 0;
a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
@ -303,5 +328,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}

View File

@ -99,7 +99,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@ -131,14 +131,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
@ -170,7 +167,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@ -887,7 +884,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
a6xx_gpu->cur_ctx_seqno = 0;
gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);

View File

@ -20,16 +20,6 @@ struct a6xx_gpu {
struct msm_ringbuffer *cur_ring;
/**
* cur_ctx_seqno:
*
* The ctx->seqno value of the context with current pgtables
* installed. Tracked by seqno rather than pointer value to
* avoid dangling pointers, and cases where a ctx can be freed
* and a new one created with the same address.
*/
int cur_ctx_seqno;
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;

View File

@ -308,7 +308,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
fwname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {

View File

@ -356,7 +356,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
plane ? plane->name : NULL);
plane ? plane->name : "(null)");
total += inuse;
}

View File

@ -113,6 +113,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&priv->mm_lock);
if (ret)
return ret;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
@ -122,6 +127,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_list, m);
mutex_unlock(&priv->mm_lock);
return 0;
}

View File

@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
@ -442,6 +443,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
init_llist_head(&priv->free_list);
INIT_LIST_HEAD(&priv->inactive_list);
mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->mm_lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
@ -619,14 +626,8 @@ static void context_close(struct msm_file_private *ctx)
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
context_close(ctx);
}

View File

@ -165,7 +165,7 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
/* gpu is only set on open(), but we need this info earlier */
bool is_a2xx;
@ -175,8 +175,19 @@ struct msm_drm_private {
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/* list of GEM objects: */
/*
* List of inactive GEM objects. Every bo is either in the inactive_list
* or gpu->active_list (for the gpu it is active on[1])
*
* These lists are protected by mm_lock. If struct_mutex is involved, it
* should be aquired prior to mm_lock. One should *not* hold mm_lock in
* get_pages()/vmap()/etc paths, as they can trigger the shrinker.
*
* [1] if someone ever added support for the old 2d cores, there could be
* more than one gpu object
*/
struct list_head inactive_list;
struct mutex mm_lock;
/* worker for delayed free of objects: */
struct work_struct free_work;

View File

@ -745,13 +745,17 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
if (!atomic_fetch_inc(&msm_obj->active_count)) {
mutex_lock(&priv->mm_lock);
msm_obj->gpu = gpu;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
@ -760,12 +764,14 @@ void msm_gem_active_put(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
might_sleep();
if (!atomic_dec_return(&msm_obj->active_count)) {
mutex_lock(&priv->mm_lock);
msm_obj->gpu = NULL;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&priv->mm_lock);
}
}
@ -921,13 +927,16 @@ static void free_object(struct msm_gem_object *msm_obj)
{
struct drm_gem_object *obj = &msm_obj->base;
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
mutex_lock(&priv->mm_lock);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
mutex_lock(&msm_obj->lock);
@ -1103,14 +1112,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
if (struct_mutex_locked) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} else {
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&priv->mm_lock);
return obj;
@ -1174,9 +1178,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
mutex_unlock(&msm_obj->lock);
mutex_lock(&dev->struct_mutex);
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&priv->mm_lock);
return obj;

View File

@ -51,11 +51,15 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (!msm_gem_shrinker_lock(dev, &unlock))
return 0;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_purgeable(msm_obj))
count += msm_obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
@ -75,6 +79,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!msm_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (freed >= sc->nr_to_scan)
break;
@ -84,6 +90,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
@ -106,6 +114,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
@ -118,6 +128,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
}
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);

View File

@ -795,7 +795,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
}
gpu->funcs->submit(gpu, submit);
priv->lastctx = submit->queue->ctx;
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu);
}

View File

@ -94,7 +94,21 @@ struct msm_gpu {
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
/* list of GEM active objects: */
/**
* cur_ctx_seqno:
*
* The ctx->seqno value of the last context to submit rendering,
* and the one with current pgtables installed (for generations
* that support per-context pgtables). Tracked by seqno rather
* than pointer value to avoid dangling pointers, and cases where
* a ctx can be freed and a new one created with the same address.
*/
int cur_ctx_seqno;
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
*/
struct list_head active_list;
/* does gpu need hw_init? */

View File

@ -3615,7 +3615,7 @@ typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
{
UCHAR ucRecordType;
UCHAR ucFakeEDIDLength;
UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
UCHAR ucFakeEDIDString[]; // This actually has ucFakeEdidLength elements.
} ATOM_FAKE_EDID_PATCH_RECORD;
typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD

View File

@ -396,7 +396,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
unsigned long offset;
u64 offset;
int r;
mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
@ -434,14 +434,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
return r;
}
offset = track->cb_color_bo_offset[id] << 8;
offset = (u64)track->cb_color_bo_offset[id] << 8;
if (offset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n",
__func__, __LINE__, id, offset, surf.base_align);
return -EINVAL;
}
offset += surf.layer_size * mslice;
offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
/* old ddx are broken they allocate bo with w*h*bpp but
* program slice with ALIGN(h, 8), catch this and patch
@ -449,14 +449,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
*/
if (!surf.mode) {
uint32_t *ib = p->ib.ptr;
unsigned long tmp, nby, bsize, size, min = 0;
u64 tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */
if (surf.nby > 8) {
min = surf.nby - 8;
}
bsize = radeon_bo_size(track->cb_color_bo[id]);
tmp = track->cb_color_bo_offset[id] << 8;
tmp = (u64)track->cb_color_bo_offset[id] << 8;
for (nby = surf.nby; nby > min; nby--) {
size = nby * surf.nbx * surf.bpe * surf.nsamples;
if ((tmp + size * mslice) <= bsize) {
@ -468,7 +468,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
slice = ((nby * surf.nbx) / 64) - 1;
if (!evergreen_surface_check(p, &surf, "cb")) {
/* check if this one works */
tmp += surf.layer_size * mslice;
tmp += (u64)surf.layer_size * mslice;
if (tmp <= bsize) {
ib[track->cb_color_slice_idx[id]] = slice;
goto old_ddx_ok;
@ -477,9 +477,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
}
}
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
"offset %d, max layer %d, bo size %ld, slice %d)\n",
"offset %llu, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
track->cb_color_bo_offset[id] << 8, mslice,
(u64)track->cb_color_bo_offset[id] << 8, mslice,
radeon_bo_size(track->cb_color_bo[id]), slice);
dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
__func__, __LINE__, surf.nbx, surf.nby,
@ -563,7 +563,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
unsigned long offset;
u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@ -609,18 +609,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return r;
}
offset = track->db_s_read_offset << 8;
offset = (u64)track->db_s_read_offset << 8;
if (offset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
offset += surf.layer_size * mslice;
offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_read_bo)) {
dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
"offset %ld, max layer %d, bo size %ld)\n",
"offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
(unsigned long)track->db_s_read_offset << 8, mslice,
(u64)track->db_s_read_offset << 8, mslice,
radeon_bo_size(track->db_s_read_bo));
dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
__func__, __LINE__, track->db_depth_size,
@ -628,18 +628,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return -EINVAL;
}
offset = track->db_s_write_offset << 8;
offset = (u64)track->db_s_write_offset << 8;
if (offset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
offset += surf.layer_size * mslice;
offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_write_bo)) {
dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
"offset %ld, max layer %d, bo size %ld)\n",
"offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
(unsigned long)track->db_s_write_offset << 8, mslice,
(u64)track->db_s_write_offset << 8, mslice,
radeon_bo_size(track->db_s_write_bo));
return -EINVAL;
}
@ -660,7 +660,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
unsigned long offset;
u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@ -707,34 +707,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
return r;
}
offset = track->db_z_read_offset << 8;
offset = (u64)track->db_z_read_offset << 8;
if (offset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
offset += surf.layer_size * mslice;
offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_read_bo)) {
dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
"offset %ld, max layer %d, bo size %ld)\n",
"offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
(unsigned long)track->db_z_read_offset << 8, mslice,
(u64)track->db_z_read_offset << 8, mslice,
radeon_bo_size(track->db_z_read_bo));
return -EINVAL;
}
offset = track->db_z_write_offset << 8;
offset = (u64)track->db_z_write_offset << 8;
if (offset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
offset += surf.layer_size * mslice;
offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_write_bo)) {
dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
"offset %ld, max layer %d, bo size %ld)\n",
"offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
(unsigned long)track->db_z_write_offset << 8, mslice,
(u64)track->db_z_write_offset << 8, mslice,
radeon_bo_size(track->db_z_write_bo));
return -EINVAL;
}

View File

@ -1720,23 +1720,29 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
struct edid *edid;
int edid_size =
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
edid = kmalloc(edid_size, GFP_KERNEL);
if (edid) {
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
int edid_size;
if (fake_edid_record->ucFakeEDIDLength == 128)
edid_size = fake_edid_record->ucFakeEDIDLength;
else
edid_size = fake_edid_record->ucFakeEDIDLength * 128;
edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0],
edid_size, GFP_KERNEL);
if (edid) {
if (drm_edid_is_valid(edid)) {
rdev->mode_info.bios_hardcoded_edid = edid;
rdev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
} else {
kfree(edid);
}
}
record += struct_size(fake_edid_record,
ucFakeEDIDString,
edid_size);
} else {
/* empty fake edid record must be 3 bytes long */
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
record += fake_edid_record->ucFakeEDIDLength ?
fake_edid_record->ucFakeEDIDLength + 2 :
sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;

View File

@ -389,6 +389,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
RK3328_HDMI_HPD_IOE));
dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {

View File

@ -370,8 +370,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
if (info->is_yuv)
is_yuv = true;
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
if (dst_w > 4096) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n");
return;
}

View File

@ -195,12 +195,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_put;
goto err_unload;
drm_fbdev_generic_setup(ddev, 16);
return 0;
err_unload:
drv_unload(ddev);
err_put:
drm_dev_put(ddev);

View File

@ -79,7 +79,7 @@ static const bool max16065_have_current[] = {
};
struct max16065_data {
enum chips type;
enum chips chip;
struct i2c_client *client;
const struct attribute_group *groups[4];
struct mutex update_lock;
@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range)
return limit * range / 256;
}
static inline int MV_TO_LIMIT(int mv, int range)
static inline int MV_TO_LIMIT(unsigned long mv, int range)
{
return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
mv = clamp_val(mv, 0, ULONG_MAX / 256);
return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range);
}
static inline int ADC_TO_CURR(int adc, int gain)
@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev)
MAX16065_CURR_SENSE);
}
for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++)
for (i = 0; i < 2; i++)
data->fault[i]
= i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
/*
* MAX16067 and MAX16068 have separate undervoltage and
* overvoltage alarm bits. Squash them together.
*/
if (data->chip == max16067 || data->chip == max16068)
data->fault[0] |= data->fault[1];
data->last_updated = jiffies;
data->valid = 1;
}
@ -493,8 +501,6 @@ static const struct attribute_group max16065_max_group = {
.is_visible = max16065_secondary_is_visible,
};
static const struct i2c_device_id max16065_id[];
static int max16065_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
@ -505,7 +511,7 @@ static int max16065_probe(struct i2c_client *client)
bool have_secondary; /* true if chip has secondary limits */
bool secondary_is_max = false; /* secondary limits reflect max */
int groups = 0;
const struct i2c_device_id *id = i2c_match_id(max16065_id, client);
enum chips chip = (uintptr_t)i2c_get_match_data(client);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_READ_WORD_DATA))
@ -515,12 +521,13 @@ static int max16065_probe(struct i2c_client *client)
if (unlikely(!data))
return -ENOMEM;
data->chip = chip;
data->client = client;
mutex_init(&data->update_lock);
data->num_adc = max16065_num_adc[id->driver_data];
data->have_current = max16065_have_current[id->driver_data];
have_secondary = max16065_have_secondary[id->driver_data];
data->num_adc = max16065_num_adc[chip];
data->have_current = max16065_have_current[chip];
have_secondary = max16065_have_secondary[chip];
if (have_secondary) {
val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE);

View File

@ -58,6 +58,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
/*
* A compensation table should be sorted by the values of .ohm

View File

@ -92,6 +92,25 @@ const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
}
EXPORT_SYMBOL_GPL(i2c_match_id);
const void *i2c_get_match_data(const struct i2c_client *client)
{
struct i2c_driver *driver = to_i2c_driver(client->dev.driver);
const struct i2c_device_id *match;
const void *data;
data = device_get_match_data(&client->dev);
if (!data) {
match = i2c_match_id(driver->id_table, client);
if (!match)
return NULL;
data = (const void *)match->driver_data;
}
return data;
}
EXPORT_SYMBOL(i2c_get_match_data);
static int i2c_device_match(struct device *dev, struct device_driver *drv)
{
struct i2c_client *client = i2c_verify_client(dev);

View File

@ -1192,7 +1192,7 @@ static int __init iw_cm_init(void)
if (ret)
return ret;
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
goto err_alloc;

View File

@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on
index, pid, onoff);
/* skip invalid PIDs (0x2000) */
if (pid > 0x1fff || index > 32)
if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)

View File

@ -982,7 +982,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */
if (pid > 0x1fff || index > 32)
if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)

View File

@ -204,6 +204,9 @@ static int powernv_flash_set_driver_info(struct device *dev,
* get them
*/
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!mtd->name)
return -ENOMEM;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;

View File

@ -296,10 +296,12 @@ static int __init init_slram(void)
T("slram: devname = %s\n", devname);
if ((!map) || (!(devstart = strsep(&map, ",")))) {
E("slram: No devicestart specified.\n");
break;
}
T("slram: devstart = %s\n", devstart);
if ((!map) || (!(devlength = strsep(&map, ",")))) {
E("slram: No devicelength / -end specified.\n");
break;
}
T("slram: devlength = %s\n", devlength);
if (parse_cmdline(devname, devstart, devlength) != 0) {

View File

@ -60,6 +60,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
__be16 proto;
void *oiph;
int err;
int nh;
bareudp = rcu_dereference_sk_user_data(sk);
if (!bareudp)
@ -137,9 +138,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
}
skb_dst_set(skb, &tun_dst->dst);
skb->dev = bareudp->dev;
oiph = skb_network_header(skb);
skb_reset_mac_header(skb);
/* Save offset of outer header relative to skb->head,
* because we are going to reset the network header to the inner header
* and might change skb->head.
*/
nh = skb_network_header(skb) - skb->head;
skb_reset_network_header(skb);
if (!pskb_inet_may_pull(skb)) {
DEV_STATS_INC(bareudp->dev, rx_length_errors);
DEV_STATS_INC(bareudp->dev, rx_errors);
goto drop;
}
/* Get the outer header. */
oiph = skb->head + nh;
if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
@ -294,6 +311,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be32 saddr;
int err;
if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
return -EINVAL;
if (!sock)
return -ESHUTDOWN;
@ -357,6 +377,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
return -EINVAL;
if (!sock)
return -ESHUTDOWN;

View File

@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
#include <linux/phy/phy.h>
#include "m_can.h"
@ -1426,7 +1427,6 @@ static int m_can_close(struct net_device *dev)
napi_disable(&cdev->napi);
m_can_stop(dev);
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
if (cdev->is_peripheral) {
@ -1438,6 +1438,9 @@ static int m_can_close(struct net_device *dev)
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
}
@ -1624,10 +1627,14 @@ static int m_can_open(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
err = m_can_clk_start(cdev);
err = phy_power_on(cdev->transceiver);
if (err)
return err;
err = m_can_clk_start(cdev);
if (err)
goto out_phy_power_off;
/* open the can device */
err = open_candev(dev);
if (err) {
@ -1679,6 +1686,8 @@ static int m_can_open(struct net_device *dev)
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
phy_power_off(cdev->transceiver);
return err;
}

View File

@ -27,6 +27,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
#include <linux/phy/phy.h>
/* m_can lec values */
enum m_can_lec_type {
@ -80,6 +81,7 @@ struct m_can_classdev {
struct workqueue_struct *tx_wq;
struct work_struct tx_work;
struct sk_buff *tx_skb;
struct phy *transceiver;
struct can_bittiming_const *bit_timing;
struct can_bittiming_const *data_timing;

View File

@ -6,6 +6,7 @@
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include "m_can.h"
@ -60,6 +61,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
void __iomem *mram_addr;
struct phy *transceiver;
int irq, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev);
@ -99,6 +101,16 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
}
transceiver = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(transceiver)) {
ret = PTR_ERR(transceiver);
dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
goto probe_fail;
}
if (transceiver)
mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate;
priv->base = addr;
priv->mram_base = mram_addr;
@ -106,6 +118,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->pm_clock_support = 1;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
mcan_class->ops = &m_can_plat_ops;

View File

@ -569,7 +569,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
(*processed)++;
return true;
drop:
drop:
/* Clean rxdes0 (which resets own bit) */
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
@ -653,6 +653,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
/* Ensure the descriptor config is visible before setting the tx
* pointer.
*/
smp_wmb();
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
return true;
@ -806,6 +811,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
/* Ensure the descriptor config is visible before setting the tx
* pointer.
*/
smp_wmb();
/* Update next TX pointer */
priv->tx_pointer = pointer;
@ -826,7 +836,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
dma_err:
dma_err:
if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
@ -848,7 +858,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
* last fragment, so we know ftgmac100_free_tx_packet()
* hasn't freed the skb yet.
*/
drop:
drop:
/* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
@ -1419,7 +1429,7 @@ static void ftgmac100_reset_task(struct work_struct *work)
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
bail:
bail:
if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock);
if (netdev->phydev)
@ -1490,15 +1500,15 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
err_ncsi:
err_ncsi:
napi_disable(&priv->napi);
netif_stop_queue(netdev);
err_alloc:
err_alloc:
ftgmac100_free_buffers(priv);
free_irq(netdev->irq, netdev);
err_irq:
err_irq:
netif_napi_del(&priv->napi);
err_hw:
err_hw:
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
ftgmac100_free_rings(priv);
return err;

View File

@ -1333,12 +1333,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
err = request_irq(irq, enetc_msix, 0, v->name, v);
err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);

View File

@ -1263,6 +1263,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
rtl8168g_enable_gphy_10m(phydev);
rtl8168g_disable_aldps(phydev);
rtl8125a_config_eee_phy(phydev);
}
@ -1302,6 +1303,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev);
rtl8168g_disable_aldps(phydev);
rtl8125b_config_eee_phy(phydev);
}

View File

@ -54,6 +54,7 @@ struct geneve_config {
bool use_udp6_rx_checksums;
bool ttl_inherit;
enum ifla_geneve_df df;
bool inner_proto_inherit;
};
/* Pseudo network device */
@ -249,17 +250,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, geneve->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (tun_dst)
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
geneve->dev->stats.rx_errors++;
goto drop;
if (gnvh->proto_type == htons(ETH_P_TEB)) {
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, geneve->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source,
geneve->dev->dev_addr)) {
geneve->dev->stats.rx_errors++;
goto drop;
}
} else {
skb_reset_mac_header(skb);
skb->dev = geneve->dev;
skb->pkt_type = PACKET_HOST;
}
oiph = skb_network_header(skb);
@ -343,6 +351,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct genevehdr *geneveh;
struct geneve_dev *geneve;
struct geneve_sock *gs;
__be16 inner_proto;
int opts_len;
/* Need UDP and Geneve header to be present */
@ -354,7 +363,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(geneveh->ver != GENEVE_VER))
goto drop;
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
inner_proto = geneveh->proto_type;
if (unlikely((inner_proto != htons(ETH_P_TEB) &&
inner_proto != htons(ETH_P_IP) &&
inner_proto != htons(ETH_P_IPV6))))
goto drop;
gs = rcu_dereference_sk_user_data(sk);
@ -365,9 +378,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!geneve)
goto drop;
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
geneve->dev->stats.rx_dropped++;
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
htons(ETH_P_TEB),
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
geneve->dev->stats.rx_dropped++;
goto drop;
@ -714,7 +732,8 @@ static int geneve_stop(struct net_device *dev)
}
static void geneve_build_header(struct genevehdr *geneveh,
const struct ip_tunnel_info *info)
const struct ip_tunnel_info *info,
__be16 inner_proto)
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
@ -722,7 +741,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
@ -731,10 +750,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
const struct ip_tunnel_info *info,
bool xnet, int ip_hdr_len)
bool xnet, int ip_hdr_len,
bool inner_proto_inherit)
{
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
struct genevehdr *gnvh;
__be16 inner_proto;
int min_headroom;
int err;
@ -752,8 +773,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
goto free_dst;
gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
geneve_build_header(gnvh, info);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
geneve_build_header(gnvh, info, inner_proto);
skb_set_inner_protocol(skb, inner_proto);
return 0;
free_dst:
@ -876,6 +898,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@ -887,7 +910,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!skb_vlan_inet_prepare(skb))
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -959,7 +982,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
inner_proto_inherit);
if (unlikely(err))
return err;
@ -975,6 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@ -984,7 +1009,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!skb_vlan_inet_prepare(skb))
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -1038,7 +1063,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
inner_proto_inherit);
if (unlikely(err))
return err;
@ -1387,6 +1413,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
dst_cache_reset(&geneve->cfg.info.dst_cache);
memcpy(&geneve->cfg, cfg, sizeof(*cfg));
if (geneve->cfg.inner_proto_inherit) {
dev->header_ops = NULL;
dev->type = ARPHRD_NONE;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->flags = IFF_NOARP;
}
err = register_netdevice(dev);
if (err)
return err;
@ -1560,10 +1594,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
#endif
}
if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) {
if (changelink) {
attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT;
goto change_notsup;
}
cfg->inner_proto_inherit = true;
}
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
"Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
"Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
}
@ -1798,6 +1840,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
goto nla_put_failure;
if (geneve->cfg.inner_proto_inherit &&
nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
goto nla_put_failure;
return 0;
nla_put_failure:

View File

@ -4747,6 +4747,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
int i;
if (!iwl_mvm_has_new_tx_api(mvm)) {
/* we can't ask the firmware anything if it is dead */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status))
return;
if (drop) {
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm,
@ -4828,8 +4832,11 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
* If the firmware is dead, this can't work...
*/
if (!drop && !iwl_mvm_has_new_tx_api(mvm))
if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status))
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}

View File

@ -111,7 +111,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
}
break;
default:
IWL_ERR(trans, "WRT: Invalid buffer destination\n");
IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
le32_to_cpu(fw_mon_cfg->buf_location));
}
out:
if (dbg_flags)

View File

@ -401,8 +401,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
MT_WF_RFCR_DROP_CTL_RSV |
MT_WF_RFCR_DROP_NDPA);
MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);

View File

@ -366,6 +366,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct wilc_join_bss_param *param;
u8 rates_len = 0;
int ies_len;
u64 ies_tsf;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@ -381,6 +382,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
return NULL;
}
ies_len = ies->len;
ies_tsf = ies->tsf;
rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval);
@ -436,7 +438,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
param->tsf_lo = cpu_to_le32(ies->tsf);
param->tsf_lo = cpu_to_le32(ies_tsf);
param->noa_enabled = 1;
param->idx = noa_attr.index;
if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {

View File

@ -1007,20 +1007,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
int ret = 0;
fw = &rtwdev->fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
return -EINVAL;
ret = -EINVAL;
if (chip->wow_fw_name) {
fw = &rtwdev->wow_fw;
wait_for_completion(&fw->completion);
if (!fw->firmware)
return -EINVAL;
ret = -EINVAL;
}
return 0;
return ret;
}
static int rtw_power_on(struct rtw_dev *rtwdev)

View File

@ -598,7 +598,7 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(am6_pci_devids, bridge)) {
bridge_dev = pci_get_host_bridge_device(dev);
if (!bridge_dev && !bridge_dev->parent)
if (!bridge_dev || !bridge_dev->parent)
return;
ks_pcie = dev_get_drvdata(bridge_dev->parent);

View File

@ -79,8 +79,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@ -95,8 +95,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
MSGF_MSIC_SR_LINK_BWIDTH)
MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@ -305,10 +305,10 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
dev_err(dev, "Fatal Error Detected\n");
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */

View File

@ -1401,8 +1401,11 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
/* We will handle a range of GPIO pins */
for (i = 0; i < gpio_banks; i++)
if (gpio_chips[i])
if (gpio_chips[i]) {
pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0,
gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
}
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");

View File

@ -1923,7 +1923,8 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
if (pinctrl_enable(pcs->pctl))
ret = pinctrl_enable(pcs->pctl);
if (ret)
goto free;
return 0;

View File

@ -40,6 +40,7 @@
#define AXP209_FG_PERCENT GENMASK(6, 0)
#define AXP22X_FG_VALID BIT(7)
#define AXP20X_CHRG_CTRL1_ENABLE BIT(7)
#define AXP20X_CHRG_CTRL1_TGT_VOLT GENMASK(6, 5)
#define AXP20X_CHRG_CTRL1_TGT_4_1V (0 << 5)
#define AXP20X_CHRG_CTRL1_TGT_4_15V (1 << 5)
@ -303,11 +304,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
val->intval = reg & AXP209_FG_PERCENT;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
return axp20x_batt->data->get_max_voltage(axp20x_batt,
&val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, &reg);
if (ret)
return ret;
@ -455,10 +456,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_set_voltage_min_design(axp20x_batt, val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
@ -467,7 +468,18 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
return axp20x_set_max_constant_charge_current(axp20x_batt,
val->intval);
case POWER_SUPPLY_PROP_STATUS:
switch (val->intval) {
case POWER_SUPPLY_STATUS_CHARGING:
return regmap_update_bits(axp20x_batt->regmap, AXP20X_CHRG_CTRL1,
AXP20X_CHRG_CTRL1_ENABLE, AXP20X_CHRG_CTRL1_ENABLE);
case POWER_SUPPLY_STATUS_DISCHARGING:
case POWER_SUPPLY_STATUS_NOT_CHARGING:
return regmap_update_bits(axp20x_batt->regmap, AXP20X_CHRG_CTRL1,
AXP20X_CHRG_CTRL1_ENABLE, 0);
}
fallthrough;
default:
return -EINVAL;
}
@ -482,16 +494,17 @@ static enum power_supply_property axp20x_battery_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_CAPACITY,
};
static int axp20x_battery_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN ||
return psp == POWER_SUPPLY_PROP_STATUS ||
psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_VOLTAGE_MAX ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT ||
psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
}

View File

@ -851,7 +851,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
/* program interrupt thesholds such that we should
* get interrupt for every 'off' perc change in the soc
*/
regmap_read(map, MAX17042_RepSOC, &soc);
if (chip->pdata->enable_current_sense)
regmap_read(map, MAX17042_RepSOC, &soc);
else
regmap_read(map, MAX17042_VFSOC, &soc);
soc >>= 8;
soc_tr = (soc + off) << 8;
if (off < soc)

View File

@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
static int berlin2_reset_probe(struct platform_device *pdev)
{
struct device_node *parent_np = of_get_parent(pdev->dev.of_node);
struct device_node *parent_np;
struct berlin_reset_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
parent_np = of_get_parent(pdev->dev.of_node);
priv->regmap = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(priv->regmap))

View File

@ -482,6 +482,7 @@ static const struct of_device_id bcm63xx_spi_of_match[] = {
{ .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
{ },
};
MODULE_DEVICE_TABLE(of, bcm63xx_spi_of_match);
static int bcm63xx_spi_probe(struct platform_device *pdev)
{

View File

@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@ -410,7 +409,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
}
/* Request IRQ */
hw->irqnum = irq_of_parse_and_map(np, 0);
ret = platform_get_irq(op, 0);
if (ret < 0)
goto free_host;
hw->irqnum = ret;
ret = request_irq(hw->irqnum, spi_ppc4xx_int,
0, "spi_ppc4xx_of", (void *)hw);
if (ret) {

View File

@ -724,7 +724,7 @@ static struct urb *usbtmc_create_urb(void)
if (!urb)
return NULL;
dmabuf = kmalloc(bufsize, GFP_KERNEL);
dmabuf = kzalloc(bufsize, GFP_KERNEL);
if (!dmabuf) {
usb_free_urb(urb);
return NULL;

View File

@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
{ USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
{ } /* Terminating entry */
};

View File

@ -171,3 +171,7 @@
/* Allied Telesis VT-Kit3 */
#define AT_VENDOR_ID 0x0caa
#define AT_VTKIT3_PRODUCT_ID 0x3001
/* Macrosilicon MS3020 */
#define MACROSILICON_VENDOR_ID 0x345f
#define MACROSILICON_MS3020_PRODUCT_ID 0x3020

View File

@ -344,6 +344,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)
if (hpfb_init_one(paddr, vaddr)) {
if (d->scode >= DIOII_SCBASE)
iounmap((void *)vaddr);
release_mem_region(d->resource.start, resource_size(&d->resource));
return -ENOMEM;
}
return 0;

View File

@ -91,9 +91,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
/* If buffer is physically aligned, ensure DMA alignment. */
if (IS_ALIGNED(p, algn) &&
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
return 1;
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;

View File

@ -516,6 +516,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
if (min_inodes < 1)
min_inodes = 1;
min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
if (min_clusters < 0)
min_clusters = 0;
/*
* Start looking in the flex group where we last allocated an
@ -757,10 +759,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
struct ext4_group_desc *gdp;
ext4_group_t group;
int bit;
int err = -EFSCORRUPTED;
int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
goto out;
return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
@ -863,6 +865,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
sync_dirty_buffer(group_desc_bh);
out:
brelse(inode_bitmap_bh);
return err;
}
@ -1055,12 +1058,13 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
/* Skip groups with suspicious inode tables */
if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
&& EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
IS_ERR(inode_bitmap_bh)) {
if (IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL;
goto next_group;
}
if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
goto next_group;
repeat_in_this_group:
ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);

View File

@ -1667,24 +1667,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
struct ext4_xattr_ibody_find is = {
.s = { .not_found = -ENODATA, },
};
struct ext4_xattr_info i = {
.name_index = EXT4_XATTR_INDEX_SYSTEM,
.name = EXT4_XATTR_SYSTEM_DATA,
};
int ret;
struct ext4_iloc iloc;
void *inline_start;
int inline_size;
if (ext4_get_inode_loc(dir, &iloc))
return NULL;
ret = ext4_get_inode_loc(dir, &is.iloc);
if (ret)
return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem);
ret = ext4_xattr_ibody_find(dir, &i, &is);
if (ret)
goto out;
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
}
inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block +
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, 0, res_dir);
if (ret == 1)
goto out_find;
@ -1694,20 +1706,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
goto out;
inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc);
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, 0, res_dir);
if (ret == 1)
goto out_find;
out:
brelse(iloc.bh);
iloc.bh = NULL;
brelse(is.iloc.bh);
if (ret < 0)
is.iloc.bh = ERR_PTR(ret);
else
is.iloc.bh = NULL;
out_find:
up_read(&EXT4_I(dir)->xattr_sem);
return iloc.bh;
return is.iloc.bh;
}
int ext4_delete_inline_entry(handle_t *handle,

View File

@ -3176,11 +3176,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/*
* Clear the trimmed flag for the group so that the next
* ext4_trim_fs can trim it.
* If the volume is mounted with -o discard, online discard
* is supported and the free blocks will be trimmed online.
*/
if (!test_opt(sb, DISCARD))
EXT4_MB_GRP_CLEAR_TRIMMED(db);
EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
@ -5589,8 +5586,9 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
" group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count,
err);
} else
EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
}
EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group);
mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);

View File

@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
}
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
if (!bmp->db_numag) {
if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
err = -EINVAL;
goto err_release_metapage;
}
@ -652,7 +652,7 @@ int dbNextAG(struct inode *ipbmap)
* average free space.
*/
for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
if (agpref == bmp->db_numag)
if (agpref >= bmp->db_numag)
agpref = 0;
if (atomic_read(&bmp->db_active[agpref]))

View File

@ -1362,7 +1362,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
if (agno < 0 || agno > dn_numag)
if (agno < 0 || agno > dn_numag || agno >= MAXAG)
return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {

View File

@ -2553,20 +2553,27 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
struct super_block *sb = mnt->mnt_sb;
if (!__mnt_is_readonly(mnt) &&
(!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
(ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
char *buf = (char *)__get_free_page(GFP_KERNEL);
char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
struct tm tm;
char *buf, *mntpath;
time64_to_tm(sb->s_time_max, 0, &tm);
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf)
mntpath = d_path(mountpoint, buf, PAGE_SIZE);
else
mntpath = ERR_PTR(-ENOMEM);
if (IS_ERR(mntpath))
mntpath = "(unknown)";
pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
sb->s_type->name,
is_mounted(mnt) ? "remounted" : "mounted",
mntpath,
tm.tm_year+1900, (unsigned long long)sb->s_time_max);
mntpath, &sb->s_time_max,
(unsigned long long)sb->s_time_max);
free_page((unsigned long)buf);
sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
if (buf)
free_page((unsigned long)buf);
}
}

View File

@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
(flags & NILFS_BTREE_NODE_ROOT) ||
nchildren < 0 ||
nchildren <= 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
nilfs_crit(inode->i_sb,
"bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX ||
(nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) {
nilfs_crit(inode->i_sb,
"bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
inode->i_ino, level, flags, nchildren);
@ -1659,13 +1660,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
int nchildren, ret;
root = nilfs_btree_get_root(btree);
nchildren = nilfs_btree_node_get_nchildren(root);
if (unlikely(nchildren == 0))
return 0;
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
break;
case 3:
nchildren = nilfs_btree_node_get_nchildren(root);
if (nchildren > 1)
return 0;
ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
@ -1674,12 +1678,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
nchildren = nilfs_btree_node_get_nchildren(node);
break;
default:
return 0;
}
nchildren = nilfs_btree_node_get_nchildren(node);
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
nextmaxkey = (nchildren > 1) ?
nilfs_btree_node_get_key(node, nchildren - 2) : 0;

View File

@ -1068,13 +1068,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
return i_ret + b_ret;
}
static int ocfs2_xattr_find_entry(int name_index,
static int ocfs2_xattr_find_entry(struct inode *inode, int name_index,
const char *name,
struct ocfs2_xattr_search *xs)
{
struct ocfs2_xattr_entry *entry;
size_t name_len;
int i, cmp = 1;
int i, name_offset, cmp = 1;
if (name == NULL)
return -EINVAL;
@ -1082,13 +1082,22 @@ static int ocfs2_xattr_find_entry(int name_index,
name_len = strlen(name);
entry = xs->here;
for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
if ((void *)entry >= xs->end) {
ocfs2_error(inode->i_sb, "corrupted xattr entries");
return -EFSCORRUPTED;
}
cmp = name_index - ocfs2_xattr_get_type(entry);
if (!cmp)
cmp = name_len - entry->xe_name_len;
if (!cmp)
cmp = memcmp(name, (xs->base +
le16_to_cpu(entry->xe_name_offset)),
name_len);
if (!cmp) {
name_offset = le16_to_cpu(entry->xe_name_offset);
if ((xs->base + name_offset + name_len) > xs->end) {
ocfs2_error(inode->i_sb,
"corrupted xattr entries");
return -EFSCORRUPTED;
}
cmp = memcmp(name, (xs->base + name_offset), name_len);
}
if (cmp == 0)
break;
entry += 1;
@ -1172,7 +1181,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode,
xs->base = (void *)xs->header;
xs->here = xs->header->xh_entries;
ret = ocfs2_xattr_find_entry(name_index, name, xs);
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
if (ret)
return ret;
size = le64_to_cpu(xs->here->xe_value_size);
@ -2704,7 +2713,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
/* Find the named attribute. */
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
ret = ocfs2_xattr_find_entry(name_index, name, xs);
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
if (ret && ret != -ENODATA)
return ret;
xs->not_found = ret;
@ -2839,7 +2848,7 @@ static int ocfs2_xattr_block_find(struct inode *inode,
xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
xs->here = xs->header->xh_entries;
ret = ocfs2_xattr_find_entry(name_index, name, xs);
ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
} else
ret = ocfs2_xattr_index_block_find(inode, blk_bh,
name_index,

View File

@ -476,6 +476,8 @@ void generic_shutdown_super(struct super_block *sb)
spin_unlock(&sb_lock);
up_write(&sb->s_umount);
if (sb->s_bdi != &noop_backing_dev_info) {
if (sb->s_iflags & SB_I_PERSB_BDI)
bdi_unregister(sb->s_bdi);
bdi_put(sb->s_bdi);
sb->s_bdi = &noop_backing_dev_info;
}
@ -1634,6 +1636,7 @@ int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
}
WARN_ON(sb->s_bdi != &noop_backing_dev_info);
sb->s_bdi = bdi;
sb->s_iflags |= SB_I_PERSB_BDI;
return 0;
}

View File

@ -508,6 +508,10 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
/* A list running through the active hierarchies */
struct list_head root_list;
struct rcu_head rcu; /* Must be near the top */
/* The root cgroup. Root is destroyed on its release. */
struct cgroup cgrp;
@ -517,9 +521,6 @@ struct cgroup_root {
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
/* A list running through the active hierarchies */
struct list_head root_list;
/* Hierarchy-specific flags */
unsigned int flags;

View File

@ -1436,6 +1436,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
/* Possible states of 'frozen' field */
enum {

View File

@ -342,6 +342,8 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
const void *i2c_get_match_data(const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
struct device * const dev = kobj_to_dev(kobj);

View File

@ -31,7 +31,7 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
struct fwnode_handle *dev_fwnode(struct device *dev);
struct fwnode_handle *dev_fwnode(const struct device *dev);
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
@ -382,7 +382,7 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
const void *device_get_match_data(struct device *dev);
const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);

View File

@ -737,10 +737,7 @@ struct sk_buff {
struct list_head list;
};
union {
struct sock *sk;
int ip_defrag_offset;
};
struct sock *sk;
union {
ktime_t tstamp;

View File

@ -335,9 +335,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
/* Variant of pskb_inet_may_pull().
*/
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
bool inner_proto_inherit)
{
int nhlen = 0, maclen = ETH_HLEN;
int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)

Some files were not shown because too many files have changed in this diff Show More