This is the 5.10.156 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmOA8VIACgkQONu9yGCS aT7Z2g/+O/03N67HvGVJavgGXI0B1DcGQYSdyyLwoHLLu6RnwUjIZVUEL8EJnC1+ HKG3l/8r2mjkD/5g1SVIDSo/6sGOQUkrWcEqmpBjUt6g6xOB1w/RWagHKYL9d+9I IEAGArdElzPFwUagYJsUoXt77ixS9R22DYWq4bqcru19+TQRZ8SP4HOEMQ+kjeFo fn0cqFsXxefhUrE4Io3gTQh9mcRI2kVJh9eCQqCrmjmuY25t4an8leEATqCEGpNT TNLdCIqpmKXPl5MuoiFQf4/0W0ymmhTU+Xa8XxVD7wW9eD1kX75uIY5MHAz9OoWw waLK5qPwQlguWhNF618nSzBaMFB1CBQ3uiSlW0BaLNLa6OIx7E3rV9PXyLJ7z3kh K+XgVXgukAtwuQvpbnE57tm78oyyq7Nw6toAAyomYyJKgZfZ6XovK35ilz32Y7Eb mxT3yA3D05SLwm92sbkDvrpyzu5ONkyf02ubDw0e2sdBOXyajwuuoxny/nlQZMxO bhN1aDIpi4cudPWEjZh2XbDDRtxTD4SOAlqfi39j11QUCwk5S+7994alBrMHPbOn RTrVnblyaKcjM5gYNRFmLygqfaHVcAd9xNpLBpuywgF8sSESYeYscdNrd1O/zeux VTIZkN/lAPqVE00QlBlYIqOIOHLzDpGN/Ba1y6EehDGWt82G8dE= =c7T7 -----END PGP SIGNATURE----- Merge 5.10.156 into android12-5.10-lts Changes in 5.10.156 ASoC: wm5102: Revert "ASoC: wm5102: Fix PM disable depth imbalance in wm5102_probe" ASoC: wm5110: Revert "ASoC: wm5110: Fix PM disable depth imbalance in wm5110_probe" ASoC: wm8997: Revert "ASoC: wm8997: Fix PM disable depth imbalance in wm8997_probe" ASoC: mt6660: Keep the pm_runtime enables before component stuff in mt6660_i2c_probe ASoC: wm8962: Add an event handler for TEMP_HP and TEMP_SPK spi: intel: Fix the offset to get the 64K erase opcode ASoC: codecs: jz4725b: add missed Line In power control bit ASoC: codecs: jz4725b: fix reported volume for Master ctl ASoC: codecs: jz4725b: use right control for Capture Volume ASoC: codecs: jz4725b: fix capture selector naming selftests/futex: fix build for clang selftests/intel_pstate: fix build for ARCH=x86_64 ASoC: rt1308-sdw: add the default value of some registers drm/amd/display: Remove wrong pipe control lock NFSv4: Retry LOCK on OLD_STATEID during delegation return i2c: tegra: Allocate DMA memory for DMA engine i2c: i801: add lis3lv02d's I2C address for Vostro 5568 drm/imx: imx-tve: Fix return type of imx_tve_connector_mode_valid btrfs: remove pointless and double ulist frees in error paths of qgroup tests Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm ASoC: codecs: jz4725b: Fix spelling mistake "Sourc" -> "Source", "Routee" -> "Route" ALSA: hda/realtek: fix speakers and micmute on HP 855 G8 mtd: spi-nor: intel-spi: Disable write protection only if asked spi: intel: Use correct mask for flash and protected regions mmc: sdhci-esdhc-imx: use the correct host caps for MMC_CAP_8_BIT_DATA drm/amd/pm: support power source switch on Sienna Cichlid drm/amd/pm: Read BIF STRAP also for BACO check drm/amd/pm: disable BACO entry/exit completely on several sienna cichlid cards drm/amdgpu: disable BACO on special BEIGE_GOBY card spi: stm32: Print summary 'callbacks suppressed' message ASoC: core: Fix use-after-free in snd_soc_exit() ASoC: tas2770: Fix set_tdm_slot in case of single slot ASoC: tas2764: Fix set_tdm_slot in case of single slot serial: 8250: Remove serial_rs485 sanitization from em485 serial: 8250: omap: Fix missing PM runtime calls for omap8250_set_mctrl() serial: 8250_omap: remove wait loop from Errata i202 workaround serial: 8250: omap: Fix unpaired pm_runtime_put_sync() in omap8250_remove() serial: 8250: omap: Flush PM QOS work on remove serial: imx: Add missing .thaw_noirq hook tty: n_gsm: fix sleep-in-atomic-context bug in gsm_control_send bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb() ASoC: soc-utils: Remove __exit for snd_soc_util_exit() sctp: remove the unnecessary sinfo_stream check in sctp_prsctp_prune_unsent sctp: clear out_curr if all frag chunks of current msg are pruned block: sed-opal: kmalloc the cmd/resp buffers arm64: Fix bit-shifting UB in the MIDR_CPU_MODEL() macro siox: fix possible memory leak in siox_device_add() parport_pc: Avoid FIFO port location truncation pinctrl: devicetree: fix null pointer dereferencing in pinctrl_dt_to_map drm/panel: simple: set bpc field for logic technologies displays drm/drv: Fix potential memory leak in drm_dev_init() drm: Fix potential null-ptr-deref in drm_vblank_destroy_worker() ARM: dts: imx7: Fix NAND controller size-cells arm64: dts: imx8mm: Fix NAND controller size-cells arm64: dts: imx8mn: Fix NAND controller size-cells ata: libata-transport: fix double ata_host_put() in ata_tport_add() ata: libata-transport: fix error handling in ata_tport_add() ata: libata-transport: fix error handling in ata_tlink_add() ata: libata-transport: fix error handling in ata_tdev_add() bpf: Initialize same number of free nodes for each pcpu_freelist net: bgmac: Drop free_netdev() from bgmac_enet_remove() mISDN: fix possible memory leak in mISDN_dsp_element_register() net: hinic: Fix error handling in hinic_module_init() net: liquidio: release resources when liquidio driver open failed mISDN: fix misuse of put_device() in mISDN_register_device() net: macvlan: Use built-in RCU list checking net: caif: fix double disconnect client in chnl_net_open() bnxt_en: Remove debugfs when pci_register_driver failed xen/pcpu: fix possible memory leak in register_pcpu() net: ionic: Fix error handling in ionic_init_module() net: ena: Fix error handling in ena_init() drbd: use after free in drbd_create_device() platform/x86/intel: pmc: Don't unconditionally attach Intel PMC when virtualized cifs: add check for returning value of SMB2_close_init net: ag71xx: call phylink_disconnect_phy if ag71xx_hw_enable() fail in ag71xx_open() net/x25: Fix skb leak in x25_lapb_receive_frame() cifs: Fix wrong return value checking when GETFLAGS net: thunderbolt: Fix error handling in tbnet_init() cifs: add check for returning value of SMB2_set_info_init ftrace: Fix the possible incorrect kernel message ftrace: Optimize the allocation for mcount entries ftrace: Fix null pointer dereference in ftrace_add_mod() ring_buffer: Do not deactivate non-existant pages tracing/ring-buffer: Have polling block on watermark tracing: Fix memory leak in test_gen_synth_cmd() and test_empty_synth_event() tracing: Fix wild-memory-access in register_synth_event() tracing: kprobe: Fix potential null-ptr-deref on trace_event_file in kprobe_event_gen_test_exit() tracing: kprobe: Fix potential null-ptr-deref on trace_array in kprobe_event_gen_test_exit() ALSA: usb-audio: Drop snd_BUG_ON() from snd_usbmidi_output_open() ALSA: hda/realtek: fix speakers for Samsung Galaxy Book Pro ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book Pro 360 Revert "usb: dwc3: disable USB core PHY management" slimbus: stream: correct presence rate frequencies speakup: fix a segfault caused by switching consoles USB: bcma: Make GPIO explicitly optional USB: serial: option: add Sierra Wireless EM9191 USB: serial: option: remove old LARA-R6 PID USB: serial: option: add u-blox LARA-R6 00B modem USB: serial: option: add u-blox LARA-L6 modem USB: serial: option: add Fibocom FM160 0x0111 composition usb: add NO_LPM quirk for Realforce 87U Keyboard usb: chipidea: fix deadlock in ci_otg_del_timer usb: typec: mux: Enter safe mode only when pins need to be reconfigured iio: adc: at91_adc: fix possible memory leak in at91_adc_allocate_trigger() iio: trigger: sysfs: fix possible memory leak in iio_sysfs_trig_init() iio: adc: mp2629: fix wrong comparison of channel iio: adc: mp2629: fix potential array out of bound access iio: pressure: ms5611: changed hardcoded SPI speed to value limited dm ioctl: fix misbehavior if list_versions races with module loading serial: 8250: Fall back to non-DMA Rx if IIR_RDI occurs serial: 8250: Flush DMA Rx on RLSI serial: 8250_lpss: Configure DMA also w/o DMA filter Input: iforce - invert valid length check when fetching device IDs maccess: Fix writing offset in case of fault in strncpy_from_kernel_nofault() scsi: zfcp: Fix double free of FSF request when qdio send fails iommu/vt-d: Set SRE bit only when hardware has SRS cap firmware: coreboot: Register bus in module init mmc: core: properly select voltage range without power cycle mmc: sdhci-pci-o2micro: fix card detect fail issue caused by CD# debounce timeout mmc: sdhci-pci: Fix possible memory leak caused by missing pci_dev_put() docs: update mediator contact information in CoC doc misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram() perf/x86/intel/pt: Fix sampling using single range output nvme: restrict management ioctls to admin nvme: ensure subsystem reset is single threaded net: fix a concurrency bug in l2tp_tunnel_register() ring-buffer: Include dropped pages in counting dirty patches usbnet: smsc95xx: Fix deadlock on runtime resume stddef: Introduce struct_group() helper macro net: use struct_group to copy ip/ipv6 header addresses scsi: target: tcm_loop: Fix possible name leak in tcm_loop_setup_hba_bus() scsi: scsi_debug: Fix possible UAF in sdebug_add_host_helper() kprobes: Skip clearing aggrprobe's post_handler in kprobe-on-ftrace case Input: i8042 - fix leaking of platform device on module removal uapi/linux/stddef.h: Add include guards macvlan: enforce a consistent minimal mtu tcp: cdg: allow tcp_cdg_release() to be called multiple times kcm: avoid potential race in kcm_tx_work kcm: close race conditions on sk_receive_queue 9p: trans_fd/p9_conn_cancel: drop client lock earlier gfs2: Check sb_bsize_shift after reading superblock gfs2: Switch from strlcpy to strscpy 9p/trans_fd: always use O_NONBLOCK read/write mm: fs: initialize fsdata passed to write_begin/write_end interface ntfs: fix use-after-free in ntfs_attr_find() ntfs: fix out-of-bounds read in ntfs_attr_find() ntfs: check overflow when iterating ATTR_RECORDs Revert "net: broadcom: Fix BCMGENET Kconfig" Linux 5.10.156 Change-Id: Ic9fe339913a510cc9fb9c4557b3bd6e196db834f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
d9b90a99f3
@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
|
|||||||
uncertain how to handle situations that come up. It will not be
|
uncertain how to handle situations that come up. It will not be
|
||||||
considered a violation report unless you want it to be. If you are
|
considered a violation report unless you want it to be. If you are
|
||||||
uncertain about approaching the TAB or any other maintainers, please
|
uncertain about approaching the TAB or any other maintainers, please
|
||||||
reach out to our conflict mediator, Joanna Lee <joanna.lee@gesmer.com>.
|
reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
|
||||||
|
|
||||||
In the end, "be kind to each other" is really what the end goal is for
|
In the end, "be kind to each other" is really what the end goal is for
|
||||||
everybody. We know everyone is human and we all fail at times, but the
|
everybody. We know everyone is human and we all fail at times, but the
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 155
|
SUBLEVEL = 156
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
@ -1221,10 +1221,10 @@ dma_apbh: dma-apbh@33000000 {
|
|||||||
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
|
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
|
||||||
};
|
};
|
||||||
|
|
||||||
gpmi: nand-controller@33002000{
|
gpmi: nand-controller@33002000 {
|
||||||
compatible = "fsl,imx7d-gpmi-nand";
|
compatible = "fsl,imx7d-gpmi-nand";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <0>;
|
||||||
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
||||||
reg-names = "gpmi-nand", "bch";
|
reg-names = "gpmi-nand", "bch";
|
||||||
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
@ -939,10 +939,10 @@ dma_apbh: dma-controller@33000000 {
|
|||||||
clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
|
clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
|
||||||
};
|
};
|
||||||
|
|
||||||
gpmi: nand-controller@33002000{
|
gpmi: nand-controller@33002000 {
|
||||||
compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
|
compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <0>;
|
||||||
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
||||||
reg-names = "gpmi-nand", "bch";
|
reg-names = "gpmi-nand", "bch";
|
||||||
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
@ -809,7 +809,7 @@ dma_apbh: dma-controller@33000000 {
|
|||||||
gpmi: nand-controller@33002000 {
|
gpmi: nand-controller@33002000 {
|
||||||
compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
|
compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <0>;
|
||||||
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
|
||||||
reg-names = "gpmi-nand", "bch";
|
reg-names = "gpmi-nand", "bch";
|
||||||
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
|
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
|
||||||
|
|
||||||
#define MIDR_CPU_MODEL(imp, partnum) \
|
#define MIDR_CPU_MODEL(imp, partnum) \
|
||||||
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
|
((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
|
||||||
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
|
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
|
||||||
((partnum) << MIDR_PARTNUM_SHIFT))
|
((partnum) << MIDR_PARTNUM_SHIFT))
|
||||||
|
|
||||||
|
@ -1247,6 +1247,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
|
|||||||
if (1 << order != nr_pages)
|
if (1 << order != nr_pages)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some processors cannot always support single range for more than
|
||||||
|
* 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
|
||||||
|
* also be affected, so for now rather than trying to keep track of
|
||||||
|
* which ones, just disable it for all.
|
||||||
|
*/
|
||||||
|
if (nr_pages > 1)
|
||||||
|
goto out;
|
||||||
|
|
||||||
buf->single = true;
|
buf->single = true;
|
||||||
buf->nr_pages = nr_pages;
|
buf->nr_pages = nr_pages;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -88,8 +88,8 @@ struct opal_dev {
|
|||||||
u64 lowest_lba;
|
u64 lowest_lba;
|
||||||
|
|
||||||
size_t pos;
|
size_t pos;
|
||||||
u8 cmd[IO_BUFFER_LENGTH];
|
u8 *cmd;
|
||||||
u8 resp[IO_BUFFER_LENGTH];
|
u8 *resp;
|
||||||
|
|
||||||
struct parsed_resp parsed;
|
struct parsed_resp parsed;
|
||||||
size_t prev_d_len;
|
size_t prev_d_len;
|
||||||
@ -2134,6 +2134,8 @@ void free_opal_dev(struct opal_dev *dev)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
clean_opal_dev(dev);
|
clean_opal_dev(dev);
|
||||||
|
kfree(dev->resp);
|
||||||
|
kfree(dev->cmd);
|
||||||
kfree(dev);
|
kfree(dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(free_opal_dev);
|
EXPORT_SYMBOL(free_opal_dev);
|
||||||
@ -2146,17 +2148,39 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
|
|||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
|
||||||
|
* sure the allocated buffer is DMA-safe in that regard.
|
||||||
|
*/
|
||||||
|
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||||
|
if (!dev->cmd)
|
||||||
|
goto err_free_dev;
|
||||||
|
|
||||||
|
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||||
|
if (!dev->resp)
|
||||||
|
goto err_free_cmd;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dev->unlk_lst);
|
INIT_LIST_HEAD(&dev->unlk_lst);
|
||||||
mutex_init(&dev->dev_lock);
|
mutex_init(&dev->dev_lock);
|
||||||
dev->data = data;
|
dev->data = data;
|
||||||
dev->send_recv = send_recv;
|
dev->send_recv = send_recv;
|
||||||
if (check_opal_support(dev) != 0) {
|
if (check_opal_support(dev) != 0) {
|
||||||
pr_debug("Opal is not supported on this device\n");
|
pr_debug("Opal is not supported on this device\n");
|
||||||
kfree(dev);
|
goto err_free_resp;
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return dev;
|
return dev;
|
||||||
|
|
||||||
|
err_free_resp:
|
||||||
|
kfree(dev->resp);
|
||||||
|
|
||||||
|
err_free_cmd:
|
||||||
|
kfree(dev->cmd);
|
||||||
|
|
||||||
|
err_free_dev:
|
||||||
|
kfree(dev);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(init_opal_dev);
|
EXPORT_SYMBOL(init_opal_dev);
|
||||||
|
|
||||||
|
@ -1780,7 +1780,7 @@ static void speakup_con_update(struct vc_data *vc)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!speakup_console[vc->vc_num] || spk_parked)
|
if (!speakup_console[vc->vc_num] || spk_parked || !synth)
|
||||||
return;
|
return;
|
||||||
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
|
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
|
||||||
/* Speakup output, discard */
|
/* Speakup output, discard */
|
||||||
|
@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent,
|
|||||||
pm_runtime_enable(dev);
|
pm_runtime_enable(dev);
|
||||||
pm_runtime_forbid(dev);
|
pm_runtime_forbid(dev);
|
||||||
|
|
||||||
transport_add_device(dev);
|
error = transport_add_device(dev);
|
||||||
|
if (error)
|
||||||
|
goto tport_transport_add_err;
|
||||||
transport_configure_device(dev);
|
transport_configure_device(dev);
|
||||||
|
|
||||||
error = ata_tlink_add(&ap->link);
|
error = ata_tlink_add(&ap->link);
|
||||||
@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent,
|
|||||||
|
|
||||||
tport_link_err:
|
tport_link_err:
|
||||||
transport_remove_device(dev);
|
transport_remove_device(dev);
|
||||||
|
tport_transport_add_err:
|
||||||
device_del(dev);
|
device_del(dev);
|
||||||
|
|
||||||
tport_err:
|
tport_err:
|
||||||
transport_destroy_device(dev);
|
transport_destroy_device(dev);
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
ata_host_put(ap->host);
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -426,7 +428,9 @@ int ata_tlink_add(struct ata_link *link)
|
|||||||
goto tlink_err;
|
goto tlink_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
transport_add_device(dev);
|
error = transport_add_device(dev);
|
||||||
|
if (error)
|
||||||
|
goto tlink_transport_err;
|
||||||
transport_configure_device(dev);
|
transport_configure_device(dev);
|
||||||
|
|
||||||
ata_for_each_dev(ata_dev, link, ALL) {
|
ata_for_each_dev(ata_dev, link, ALL) {
|
||||||
@ -441,6 +445,7 @@ int ata_tlink_add(struct ata_link *link)
|
|||||||
ata_tdev_delete(ata_dev);
|
ata_tdev_delete(ata_dev);
|
||||||
}
|
}
|
||||||
transport_remove_device(dev);
|
transport_remove_device(dev);
|
||||||
|
tlink_transport_err:
|
||||||
device_del(dev);
|
device_del(dev);
|
||||||
tlink_err:
|
tlink_err:
|
||||||
transport_destroy_device(dev);
|
transport_destroy_device(dev);
|
||||||
@ -678,7 +683,13 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
transport_add_device(dev);
|
error = transport_add_device(dev);
|
||||||
|
if (error) {
|
||||||
|
device_del(dev);
|
||||||
|
ata_tdev_free(ata_dev);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
transport_configure_device(dev);
|
transport_configure_device(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2720,7 +2720,7 @@ static int init_submitter(struct drbd_device *device)
|
|||||||
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
|
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
|
||||||
{
|
{
|
||||||
struct drbd_resource *resource = adm_ctx->resource;
|
struct drbd_resource *resource = adm_ctx->resource;
|
||||||
struct drbd_connection *connection;
|
struct drbd_connection *connection, *n;
|
||||||
struct drbd_device *device;
|
struct drbd_device *device;
|
||||||
struct drbd_peer_device *peer_device, *tmp_peer_device;
|
struct drbd_peer_device *peer_device, *tmp_peer_device;
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
@ -2839,7 +2839,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||||||
out_idr_remove_vol:
|
out_idr_remove_vol:
|
||||||
idr_remove(&connection->peer_devices, vnr);
|
idr_remove(&connection->peer_devices, vnr);
|
||||||
out_idr_remove_from_resource:
|
out_idr_remove_from_resource:
|
||||||
for_each_connection(connection, resource) {
|
for_each_connection_safe(connection, n, resource) {
|
||||||
peer_device = idr_remove(&connection->peer_devices, vnr);
|
peer_device = idr_remove(&connection->peer_devices, vnr);
|
||||||
if (peer_device)
|
if (peer_device)
|
||||||
kref_put(&connection->kref, drbd_destroy_connection);
|
kref_put(&connection->kref, drbd_destroy_connection);
|
||||||
|
@ -152,12 +152,8 @@ static int coreboot_table_probe(struct platform_device *pdev)
|
|||||||
if (!ptr)
|
if (!ptr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = bus_register(&coreboot_bus_type);
|
ret = coreboot_table_populate(dev, ptr);
|
||||||
if (!ret) {
|
|
||||||
ret = coreboot_table_populate(dev, ptr);
|
|
||||||
if (ret)
|
|
||||||
bus_unregister(&coreboot_bus_type);
|
|
||||||
}
|
|
||||||
memunmap(ptr);
|
memunmap(ptr);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -172,7 +168,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy)
|
|||||||
static int coreboot_table_remove(struct platform_device *pdev)
|
static int coreboot_table_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
|
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
|
||||||
bus_unregister(&coreboot_bus_type);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,6 +197,32 @@ static struct platform_driver coreboot_table_driver = {
|
|||||||
.of_match_table = of_match_ptr(coreboot_of_match),
|
.of_match_table = of_match_ptr(coreboot_of_match),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
module_platform_driver(coreboot_table_driver);
|
|
||||||
|
static int __init coreboot_table_driver_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = bus_register(&coreboot_bus_type);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = platform_driver_register(&coreboot_table_driver);
|
||||||
|
if (ret) {
|
||||||
|
bus_unregister(&coreboot_bus_type);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit coreboot_table_driver_exit(void)
|
||||||
|
{
|
||||||
|
platform_driver_unregister(&coreboot_table_driver);
|
||||||
|
bus_unregister(&coreboot_bus_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(coreboot_table_driver_init);
|
||||||
|
module_exit(coreboot_table_driver_exit);
|
||||||
|
|
||||||
MODULE_AUTHOR("Google, Inc.");
|
MODULE_AUTHOR("Google, Inc.");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -1746,7 +1746,7 @@ void dcn20_post_unlock_program_front_end(
|
|||||||
|
|
||||||
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
|
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
|
||||||
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
|
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
|
||||||
mdelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,16 +379,31 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arcturus_check_powerplay_table(struct smu_context *smu)
|
static void arcturus_check_bxco_support(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct smu_table_context *table_context = &smu->smu_table;
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
struct smu_11_0_powerplay_table *powerplay_table =
|
struct smu_11_0_powerplay_table *powerplay_table =
|
||||||
table_context->power_play_table;
|
table_context->power_play_table;
|
||||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||||
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
uint32_t val;
|
||||||
|
|
||||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
||||||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
|
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
|
||||||
smu_baco->platform_support = true;
|
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
||||||
|
smu_baco->platform_support =
|
||||||
|
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
|
||||||
|
false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arcturus_check_powerplay_table(struct smu_context *smu)
|
||||||
|
{
|
||||||
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
|
struct smu_11_0_powerplay_table *powerplay_table =
|
||||||
|
table_context->power_play_table;
|
||||||
|
|
||||||
|
arcturus_check_bxco_support(smu);
|
||||||
|
|
||||||
table_context->thermal_controller_type =
|
table_context->thermal_controller_type =
|
||||||
powerplay_table->thermal_controller_type;
|
powerplay_table->thermal_controller_type;
|
||||||
@ -2131,13 +2146,11 @@ static void arcturus_get_unique_id(struct smu_context *smu)
|
|||||||
static bool arcturus_is_baco_supported(struct smu_context *smu)
|
static bool arcturus_is_baco_supported(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
uint32_t val;
|
|
||||||
|
|
||||||
if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
|
if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
return true;
|
||||||
return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arcturus_set_df_cstate(struct smu_context *smu,
|
static int arcturus_set_df_cstate(struct smu_context *smu,
|
||||||
|
@ -338,19 +338,34 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int navi10_check_powerplay_table(struct smu_context *smu)
|
static void navi10_check_bxco_support(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct smu_table_context *table_context = &smu->smu_table;
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
struct smu_11_0_powerplay_table *powerplay_table =
|
struct smu_11_0_powerplay_table *powerplay_table =
|
||||||
table_context->power_play_table;
|
table_context->power_play_table;
|
||||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||||
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
uint32_t val;
|
||||||
|
|
||||||
|
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
||||||
|
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
|
||||||
|
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
||||||
|
smu_baco->platform_support =
|
||||||
|
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
|
||||||
|
false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int navi10_check_powerplay_table(struct smu_context *smu)
|
||||||
|
{
|
||||||
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
|
struct smu_11_0_powerplay_table *powerplay_table =
|
||||||
|
table_context->power_play_table;
|
||||||
|
|
||||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
|
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
|
||||||
smu->dc_controlled_by_gpio = true;
|
smu->dc_controlled_by_gpio = true;
|
||||||
|
|
||||||
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
|
navi10_check_bxco_support(smu);
|
||||||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
|
|
||||||
smu_baco->platform_support = true;
|
|
||||||
|
|
||||||
table_context->thermal_controller_type =
|
table_context->thermal_controller_type =
|
||||||
powerplay_table->thermal_controller_type;
|
powerplay_table->thermal_controller_type;
|
||||||
@ -1948,13 +1963,11 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
|
|||||||
static bool navi10_is_baco_supported(struct smu_context *smu)
|
static bool navi10_is_baco_supported(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
uint32_t val;
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
|
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
return true;
|
||||||
return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int navi10_set_default_od_settings(struct smu_context *smu)
|
static int navi10_set_default_od_settings(struct smu_context *smu)
|
||||||
|
@ -294,16 +294,47 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
|
static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct smu_table_context *table_context = &smu->smu_table;
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
struct smu_11_0_7_powerplay_table *powerplay_table =
|
struct smu_11_0_7_powerplay_table *powerplay_table =
|
||||||
table_context->power_play_table;
|
table_context->power_play_table;
|
||||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||||
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
uint32_t val;
|
||||||
|
|
||||||
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
|
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
|
||||||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
|
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
|
||||||
smu_baco->platform_support = true;
|
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
||||||
|
smu_baco->platform_support =
|
||||||
|
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
|
||||||
|
false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable BACO entry/exit completely on below SKUs to
|
||||||
|
* avoid hardware intermittent failures.
|
||||||
|
*/
|
||||||
|
if (((adev->pdev->device == 0x73A1) &&
|
||||||
|
(adev->pdev->revision == 0x00)) ||
|
||||||
|
((adev->pdev->device == 0x73BF) &&
|
||||||
|
(adev->pdev->revision == 0xCF)) ||
|
||||||
|
((adev->pdev->device == 0x7422) &&
|
||||||
|
(adev->pdev->revision == 0x00)))
|
||||||
|
smu_baco->platform_support = false;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
|
||||||
|
{
|
||||||
|
struct smu_table_context *table_context = &smu->smu_table;
|
||||||
|
struct smu_11_0_7_powerplay_table *powerplay_table =
|
||||||
|
table_context->power_play_table;
|
||||||
|
|
||||||
|
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC)
|
||||||
|
smu->dc_controlled_by_gpio = true;
|
||||||
|
|
||||||
|
sienna_cichlid_check_bxco_support(smu);
|
||||||
|
|
||||||
table_context->thermal_controller_type =
|
table_context->thermal_controller_type =
|
||||||
powerplay_table->thermal_controller_type;
|
powerplay_table->thermal_controller_type;
|
||||||
@ -1736,13 +1767,11 @@ static int sienna_cichlid_run_btc(struct smu_context *smu)
|
|||||||
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
|
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
uint32_t val;
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
|
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
|
return true;
|
||||||
return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
|
static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
|
||||||
@ -2806,6 +2835,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
|
|||||||
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
|
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
|
||||||
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
|
||||||
.run_btc = sienna_cichlid_run_btc,
|
.run_btc = sienna_cichlid_run_btc,
|
||||||
|
.set_power_source = smu_v11_0_set_power_source,
|
||||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||||
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
|
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
|
||||||
|
@ -610,7 +610,7 @@ static int drm_dev_init(struct drm_device *dev,
|
|||||||
mutex_init(&dev->clientlist_mutex);
|
mutex_init(&dev->clientlist_mutex);
|
||||||
mutex_init(&dev->master_mutex);
|
mutex_init(&dev->master_mutex);
|
||||||
|
|
||||||
ret = drmm_add_action(dev, drm_dev_init_release, NULL);
|
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -116,7 +116,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
|
|||||||
|
|
||||||
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
|
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
|
||||||
{
|
{
|
||||||
kthread_destroy_worker(vblank->worker);
|
if (vblank->worker)
|
||||||
|
kthread_destroy_worker(vblank->worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
|
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
|
||||||
|
@ -212,8 +212,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int imx_tve_connector_mode_valid(struct drm_connector *connector,
|
static enum drm_mode_status
|
||||||
struct drm_display_mode *mode)
|
imx_tve_connector_mode_valid(struct drm_connector *connector,
|
||||||
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct imx_tve *tve = con_to_tve(connector);
|
struct imx_tve *tve = con_to_tve(connector);
|
||||||
unsigned long rate;
|
unsigned long rate;
|
||||||
|
@ -2655,6 +2655,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = {
|
|||||||
static const struct panel_desc logictechno_lt161010_2nh = {
|
static const struct panel_desc logictechno_lt161010_2nh = {
|
||||||
.timings = &logictechno_lt161010_2nh_timing,
|
.timings = &logictechno_lt161010_2nh_timing,
|
||||||
.num_timings = 1,
|
.num_timings = 1,
|
||||||
|
.bpc = 6,
|
||||||
.size = {
|
.size = {
|
||||||
.width = 154,
|
.width = 154,
|
||||||
.height = 86,
|
.height = 86,
|
||||||
@ -2684,6 +2685,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = {
|
|||||||
static const struct panel_desc logictechno_lt170410_2whc = {
|
static const struct panel_desc logictechno_lt170410_2whc = {
|
||||||
.timings = &logictechno_lt170410_2whc_timing,
|
.timings = &logictechno_lt170410_2whc_timing,
|
||||||
.num_timings = 1,
|
.num_timings = 1,
|
||||||
|
.bpc = 8,
|
||||||
.size = {
|
.size = {
|
||||||
.width = 217,
|
.width = 217,
|
||||||
.height = 136,
|
.height = 136,
|
||||||
|
@ -1275,6 +1275,7 @@ static const struct {
|
|||||||
*/
|
*/
|
||||||
{ "Latitude 5480", 0x29 },
|
{ "Latitude 5480", 0x29 },
|
||||||
{ "Vostro V131", 0x1d },
|
{ "Vostro V131", 0x1d },
|
||||||
|
{ "Vostro 5568", 0x29 },
|
||||||
};
|
};
|
||||||
|
|
||||||
static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
|
static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
|
||||||
|
@ -283,6 +283,7 @@ struct tegra_i2c_dev {
|
|||||||
struct dma_chan *tx_dma_chan;
|
struct dma_chan *tx_dma_chan;
|
||||||
struct dma_chan *rx_dma_chan;
|
struct dma_chan *rx_dma_chan;
|
||||||
unsigned int dma_buf_size;
|
unsigned int dma_buf_size;
|
||||||
|
struct device *dma_dev;
|
||||||
dma_addr_t dma_phys;
|
dma_addr_t dma_phys;
|
||||||
void *dma_buf;
|
void *dma_buf;
|
||||||
|
|
||||||
@ -419,7 +420,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
|
|||||||
static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
|
static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
|
||||||
{
|
{
|
||||||
if (i2c_dev->dma_buf) {
|
if (i2c_dev->dma_buf) {
|
||||||
dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size,
|
dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size,
|
||||||
i2c_dev->dma_buf, i2c_dev->dma_phys);
|
i2c_dev->dma_buf, i2c_dev->dma_phys);
|
||||||
i2c_dev->dma_buf = NULL;
|
i2c_dev->dma_buf = NULL;
|
||||||
}
|
}
|
||||||
@ -466,10 +467,13 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
|
|||||||
|
|
||||||
i2c_dev->tx_dma_chan = chan;
|
i2c_dev->tx_dma_chan = chan;
|
||||||
|
|
||||||
|
WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device);
|
||||||
|
i2c_dev->dma_dev = chan->device->dev;
|
||||||
|
|
||||||
i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
|
i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
|
||||||
I2C_PACKET_HEADER_SIZE;
|
I2C_PACKET_HEADER_SIZE;
|
||||||
|
|
||||||
dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size,
|
dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size,
|
||||||
&dma_phys, GFP_KERNEL | __GFP_NOWARN);
|
&dma_phys, GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!dma_buf) {
|
if (!dma_buf) {
|
||||||
dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n");
|
dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n");
|
||||||
@ -1255,7 +1259,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
|||||||
|
|
||||||
if (i2c_dev->dma_mode) {
|
if (i2c_dev->dma_mode) {
|
||||||
if (i2c_dev->msg_read) {
|
if (i2c_dev->msg_read) {
|
||||||
dma_sync_single_for_device(i2c_dev->dev,
|
dma_sync_single_for_device(i2c_dev->dma_dev,
|
||||||
i2c_dev->dma_phys,
|
i2c_dev->dma_phys,
|
||||||
xfer_size, DMA_FROM_DEVICE);
|
xfer_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
@ -1263,7 +1267,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
} else {
|
} else {
|
||||||
dma_sync_single_for_cpu(i2c_dev->dev,
|
dma_sync_single_for_cpu(i2c_dev->dma_dev,
|
||||||
i2c_dev->dma_phys,
|
i2c_dev->dma_phys,
|
||||||
xfer_size, DMA_TO_DEVICE);
|
xfer_size, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
@ -1276,7 +1280,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
|||||||
memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
|
memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
|
||||||
msg->buf, msg->len);
|
msg->buf, msg->len);
|
||||||
|
|
||||||
dma_sync_single_for_device(i2c_dev->dev,
|
dma_sync_single_for_device(i2c_dev->dma_dev,
|
||||||
i2c_dev->dma_phys,
|
i2c_dev->dma_phys,
|
||||||
xfer_size, DMA_TO_DEVICE);
|
xfer_size, DMA_TO_DEVICE);
|
||||||
|
|
||||||
@ -1327,7 +1331,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
|
if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
|
||||||
dma_sync_single_for_cpu(i2c_dev->dev,
|
dma_sync_single_for_cpu(i2c_dev->dma_dev,
|
||||||
i2c_dev->dma_phys,
|
i2c_dev->dma_phys,
|
||||||
xfer_size, DMA_FROM_DEVICE);
|
xfer_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
|
@ -616,8 +616,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
|
|||||||
trig->ops = &at91_adc_trigger_ops;
|
trig->ops = &at91_adc_trigger_ops;
|
||||||
|
|
||||||
ret = iio_trigger_register(trig);
|
ret = iio_trigger_register(trig);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
iio_trigger_free(trig);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return trig;
|
return trig;
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,8 @@ static struct iio_map mp2629_adc_maps[] = {
|
|||||||
MP2629_MAP(SYSTEM_VOLT, "system-volt"),
|
MP2629_MAP(SYSTEM_VOLT, "system-volt"),
|
||||||
MP2629_MAP(INPUT_VOLT, "input-volt"),
|
MP2629_MAP(INPUT_VOLT, "input-volt"),
|
||||||
MP2629_MAP(BATT_CURRENT, "batt-current"),
|
MP2629_MAP(BATT_CURRENT, "batt-current"),
|
||||||
MP2629_MAP(INPUT_CURRENT, "input-current")
|
MP2629_MAP(INPUT_CURRENT, "input-current"),
|
||||||
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mp2629_read_raw(struct iio_dev *indio_dev,
|
static int mp2629_read_raw(struct iio_dev *indio_dev,
|
||||||
@ -73,7 +74,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (chan->address == MP2629_INPUT_VOLT)
|
if (chan->channel == MP2629_INPUT_VOLT)
|
||||||
rval &= GENMASK(6, 0);
|
rval &= GENMASK(6, 0);
|
||||||
*val = rval;
|
*val = rval;
|
||||||
return IIO_VAL_INT;
|
return IIO_VAL_INT;
|
||||||
|
@ -94,7 +94,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
|
|||||||
spi_set_drvdata(spi, indio_dev);
|
spi_set_drvdata(spi, indio_dev);
|
||||||
|
|
||||||
spi->mode = SPI_MODE_0;
|
spi->mode = SPI_MODE_0;
|
||||||
spi->max_speed_hz = 20000000;
|
spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
|
||||||
spi->bits_per_word = 8;
|
spi->bits_per_word = 8;
|
||||||
ret = spi_setup(spi);
|
ret = spi_setup(spi);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -209,9 +209,13 @@ static int iio_sysfs_trigger_remove(int id)
|
|||||||
|
|
||||||
static int __init iio_sysfs_trig_init(void)
|
static int __init iio_sysfs_trig_init(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
device_initialize(&iio_sysfs_trig_dev);
|
device_initialize(&iio_sysfs_trig_dev);
|
||||||
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
|
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
|
||||||
return device_add(&iio_sysfs_trig_dev);
|
ret = device_add(&iio_sysfs_trig_dev);
|
||||||
|
if (ret)
|
||||||
|
put_device(&iio_sysfs_trig_dev);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(iio_sysfs_trig_init);
|
module_init(iio_sysfs_trig_init);
|
||||||
|
|
||||||
|
@ -273,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype,
|
|||||||
* Get device info.
|
* Get device info.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3)
|
if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3)
|
||||||
input_dev->id.vendor = get_unaligned_le16(buf + 1);
|
input_dev->id.vendor = get_unaligned_le16(buf + 1);
|
||||||
else
|
else
|
||||||
dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
|
dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
|
||||||
|
|
||||||
if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3)
|
if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3)
|
||||||
input_dev->id.product = get_unaligned_le16(buf + 1);
|
input_dev->id.product = get_unaligned_le16(buf + 1);
|
||||||
else
|
else
|
||||||
dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
|
dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
|
||||||
|
|
||||||
if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3)
|
if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3)
|
||||||
iforce->device_memory.end = get_unaligned_le16(buf + 1);
|
iforce->device_memory.end = get_unaligned_le16(buf + 1);
|
||||||
else
|
else
|
||||||
dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
|
dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
|
||||||
|
|
||||||
if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2)
|
if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2)
|
||||||
ff_effects = buf[1];
|
ff_effects = buf[1];
|
||||||
else
|
else
|
||||||
dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
|
dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
|
||||||
|
@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev)
|
|||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
i8042_platform_device = dev;
|
|
||||||
|
|
||||||
if (i8042_reset == I8042_RESET_ALWAYS) {
|
if (i8042_reset == I8042_RESET_ALWAYS) {
|
||||||
error = i8042_controller_selftest();
|
error = i8042_controller_selftest();
|
||||||
if (error)
|
if (error)
|
||||||
@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev)
|
|||||||
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
|
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
|
||||||
i8042_free_irqs();
|
i8042_free_irqs();
|
||||||
i8042_controller_reset(false);
|
i8042_controller_reset(false);
|
||||||
i8042_platform_device = NULL;
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev)
|
|||||||
i8042_unregister_ports();
|
i8042_unregister_ports();
|
||||||
i8042_free_irqs();
|
i8042_free_irqs();
|
||||||
i8042_controller_reset(false);
|
i8042_controller_reset(false);
|
||||||
i8042_platform_device = NULL;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -669,7 +669,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
|
|||||||
* Since it is a second level only translation setup, we should
|
* Since it is a second level only translation setup, we should
|
||||||
* set SRE bit as well (addresses are expected to be GPAs).
|
* set SRE bit as well (addresses are expected to be GPAs).
|
||||||
*/
|
*/
|
||||||
if (pasid != PASID_RID2PASID)
|
if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
|
||||||
pasid_set_sre(pte);
|
pasid_set_sre(pte);
|
||||||
pasid_set_present(pte);
|
pasid_set_present(pte);
|
||||||
pasid_flush_caches(iommu, pte, pasid, did);
|
pasid_flush_caches(iommu, pte, pasid, did);
|
||||||
@ -704,7 +704,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
|
|||||||
* We should set SRE bit as well since the addresses are expected
|
* We should set SRE bit as well since the addresses are expected
|
||||||
* to be GPAs.
|
* to be GPAs.
|
||||||
*/
|
*/
|
||||||
pasid_set_sre(pte);
|
if (ecap_srs(iommu->ecap))
|
||||||
|
pasid_set_sre(pte);
|
||||||
pasid_set_present(pte);
|
pasid_set_present(pte);
|
||||||
pasid_flush_caches(iommu, pte, pasid, did);
|
pasid_flush_caches(iommu, pte, pasid, did);
|
||||||
|
|
||||||
|
@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev,
|
|||||||
|
|
||||||
err = get_free_devid();
|
err = get_free_devid();
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto error1;
|
return err;
|
||||||
dev->id = err;
|
dev->id = err;
|
||||||
|
|
||||||
device_initialize(&dev->dev);
|
device_initialize(&dev->dev);
|
||||||
|
@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
|
|||||||
if (!entry)
|
if (!entry)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&entry->list);
|
||||||
entry->elem = elem;
|
entry->elem = elem;
|
||||||
|
|
||||||
entry->dev.class = elements_class;
|
entry->dev.class = elements_class;
|
||||||
@ -107,7 +108,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
|
|||||||
device_unregister(&entry->dev);
|
device_unregister(&entry->dev);
|
||||||
return ret;
|
return ret;
|
||||||
err1:
|
err1:
|
||||||
kfree(entry);
|
put_device(&entry->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mISDN_dsp_element_register);
|
EXPORT_SYMBOL(mISDN_dsp_element_register);
|
||||||
|
@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
|
|||||||
size_t *needed = needed_param;
|
size_t *needed = needed_param;
|
||||||
|
|
||||||
*needed += sizeof(struct dm_target_versions);
|
*needed += sizeof(struct dm_target_versions);
|
||||||
*needed += strlen(tt->name);
|
*needed += strlen(tt->name) + 1;
|
||||||
*needed += ALIGN_MASK;
|
*needed += ALIGN_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -638,7 +638,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char
|
|||||||
iter_info.old_vers = NULL;
|
iter_info.old_vers = NULL;
|
||||||
iter_info.vers = vers;
|
iter_info.vers = vers;
|
||||||
iter_info.flags = 0;
|
iter_info.flags = 0;
|
||||||
iter_info.end = (char *)vers+len;
|
iter_info.end = (char *)vers + needed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now loop through filling out the names & versions.
|
* Now loop through filling out the names & versions.
|
||||||
|
@ -63,6 +63,8 @@
|
|||||||
#define SPIBASE_BYT 0x54
|
#define SPIBASE_BYT 0x54
|
||||||
#define SPIBASE_BYT_SZ 512
|
#define SPIBASE_BYT_SZ 512
|
||||||
#define SPIBASE_BYT_EN BIT(1)
|
#define SPIBASE_BYT_EN BIT(1)
|
||||||
|
#define BYT_BCR 0xfc
|
||||||
|
#define BYT_BCR_WPD BIT(0)
|
||||||
|
|
||||||
#define SPIBASE_LPT 0x3800
|
#define SPIBASE_LPT 0x3800
|
||||||
#define SPIBASE_LPT_SZ 512
|
#define SPIBASE_LPT_SZ 512
|
||||||
@ -1083,12 +1085,57 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
val = readl(base + BYT_BCR);
|
||||||
|
if (!(val & BYT_BCR_WPD)) {
|
||||||
|
val |= BYT_BCR_WPD;
|
||||||
|
writel(val, base + BYT_BCR);
|
||||||
|
val = readl(base + BYT_BCR);
|
||||||
|
}
|
||||||
|
|
||||||
|
return val & BYT_BCR_WPD;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = data;
|
||||||
|
u32 bcr;
|
||||||
|
|
||||||
|
pci_read_config_dword(pdev, BCR, &bcr);
|
||||||
|
if (!(bcr & BCR_WPD)) {
|
||||||
|
bcr |= BCR_WPD;
|
||||||
|
pci_write_config_dword(pdev, BCR, bcr);
|
||||||
|
pci_read_config_dword(pdev, BCR, &bcr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bcr & BCR_WPD;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
|
||||||
|
{
|
||||||
|
unsigned int spi = PCI_DEVFN(13, 2);
|
||||||
|
struct pci_bus *bus = data;
|
||||||
|
u32 bcr;
|
||||||
|
|
||||||
|
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
|
||||||
|
if (!(bcr & BCR_WPD)) {
|
||||||
|
bcr |= BCR_WPD;
|
||||||
|
pci_bus_write_config_dword(bus, spi, BCR, bcr);
|
||||||
|
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bcr & BCR_WPD;
|
||||||
|
}
|
||||||
|
|
||||||
static int lpc_ich_init_spi(struct pci_dev *dev)
|
static int lpc_ich_init_spi(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
|
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
|
||||||
struct resource *res = &intel_spi_res[0];
|
struct resource *res = &intel_spi_res[0];
|
||||||
struct intel_spi_boardinfo *info;
|
struct intel_spi_boardinfo *info;
|
||||||
u32 spi_base, rcba, bcr;
|
u32 spi_base, rcba;
|
||||||
|
|
||||||
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
|
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
|
||||||
if (!info)
|
if (!info)
|
||||||
@ -1102,6 +1149,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
|
|||||||
if (spi_base & SPIBASE_BYT_EN) {
|
if (spi_base & SPIBASE_BYT_EN) {
|
||||||
res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
|
res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
|
||||||
res->end = res->start + SPIBASE_BYT_SZ - 1;
|
res->end = res->start + SPIBASE_BYT_SZ - 1;
|
||||||
|
|
||||||
|
info->set_writeable = lpc_ich_byt_set_writeable;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1112,8 +1161,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
|
|||||||
res->start = spi_base + SPIBASE_LPT;
|
res->start = spi_base + SPIBASE_LPT;
|
||||||
res->end = res->start + SPIBASE_LPT_SZ - 1;
|
res->end = res->start + SPIBASE_LPT_SZ - 1;
|
||||||
|
|
||||||
pci_read_config_dword(dev, BCR, &bcr);
|
info->set_writeable = lpc_ich_lpt_set_writeable;
|
||||||
info->writeable = !!(bcr & BCR_WPD);
|
info->data = dev;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1134,8 +1183,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
|
|||||||
res->start = spi_base & 0xfffffff0;
|
res->start = spi_base & 0xfffffff0;
|
||||||
res->end = res->start + SPIBASE_APL_SZ - 1;
|
res->end = res->start + SPIBASE_APL_SZ - 1;
|
||||||
|
|
||||||
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
|
info->set_writeable = lpc_ich_bxt_set_writeable;
|
||||||
info->writeable = !!(bcr & BCR_WPD);
|
info->data = bus;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
|
pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
|
||||||
|
@ -852,6 +852,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
|
|||||||
u32 context_id = vmci_get_context_id();
|
u32 context_id = vmci_get_context_id();
|
||||||
struct vmci_event_qp ev;
|
struct vmci_event_qp ev;
|
||||||
|
|
||||||
|
memset(&ev, 0, sizeof(ev));
|
||||||
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
|
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
|
||||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||||
VMCI_CONTEXT_RESOURCE_ID);
|
VMCI_CONTEXT_RESOURCE_ID);
|
||||||
@ -1465,6 +1466,7 @@ static int qp_notify_peer(bool attach,
|
|||||||
* kernel.
|
* kernel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
memset(&ev, 0, sizeof(ev));
|
||||||
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
|
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
|
||||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||||
VMCI_CONTEXT_RESOURCE_ID);
|
VMCI_CONTEXT_RESOURCE_ID);
|
||||||
|
@ -1133,7 +1133,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
|
|||||||
mmc_power_cycle(host, ocr);
|
mmc_power_cycle(host, ocr);
|
||||||
} else {
|
} else {
|
||||||
bit = fls(ocr) - 1;
|
bit = fls(ocr) - 1;
|
||||||
ocr &= 3 << bit;
|
/*
|
||||||
|
* The bit variable represents the highest voltage bit set in
|
||||||
|
* the OCR register.
|
||||||
|
* To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
|
||||||
|
* we must shift the mask '3' with (bit - 1).
|
||||||
|
*/
|
||||||
|
ocr &= 3 << (bit - 1);
|
||||||
if (bit != host->ios.vdd)
|
if (bit != host->ios.vdd)
|
||||||
dev_warn(mmc_dev(host), "exceeding card's volts\n");
|
dev_warn(mmc_dev(host), "exceeding card's volts\n");
|
||||||
}
|
}
|
||||||
|
@ -1621,14 +1621,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||||||
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
|
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
|
||||||
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
|
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
|
||||||
|
|
||||||
if (host->caps & MMC_CAP_8_BIT_DATA &&
|
if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
|
||||||
imx_data->socdata->flags & ESDHC_FLAG_HS400)
|
imx_data->socdata->flags & ESDHC_FLAG_HS400)
|
||||||
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
|
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
|
||||||
|
|
||||||
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
|
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
|
||||||
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
|
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
|
||||||
|
|
||||||
if (host->caps & MMC_CAP_8_BIT_DATA &&
|
if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
|
||||||
imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
|
imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
|
||||||
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
|
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
|
||||||
host->mmc_host_ops.hs400_enhanced_strobe =
|
host->mmc_host_ops.hs400_enhanced_strobe =
|
||||||
|
@ -1799,6 +1799,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pci_dev_put(smbus_dev);
|
||||||
|
|
||||||
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
|
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
|
||||||
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
|
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#define O2_SD_CAPS 0xE0
|
#define O2_SD_CAPS 0xE0
|
||||||
#define O2_SD_ADMA1 0xE2
|
#define O2_SD_ADMA1 0xE2
|
||||||
#define O2_SD_ADMA2 0xE7
|
#define O2_SD_ADMA2 0xE7
|
||||||
|
#define O2_SD_MISC_CTRL2 0xF0
|
||||||
#define O2_SD_INF_MOD 0xF1
|
#define O2_SD_INF_MOD 0xF1
|
||||||
#define O2_SD_MISC_CTRL4 0xFC
|
#define O2_SD_MISC_CTRL4 0xFC
|
||||||
#define O2_SD_MISC_CTRL 0x1C0
|
#define O2_SD_MISC_CTRL 0x1C0
|
||||||
@ -822,6 +823,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
|
|||||||
/* Set Tuning Windows to 5 */
|
/* Set Tuning Windows to 5 */
|
||||||
pci_write_config_byte(chip->pdev,
|
pci_write_config_byte(chip->pdev,
|
||||||
O2_SD_TUNING_CTRL, 0x55);
|
O2_SD_TUNING_CTRL, 0x55);
|
||||||
|
//Adjust 1st and 2nd CD debounce time
|
||||||
|
pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
|
||||||
|
scratch_32 &= 0xFFE7FFFF;
|
||||||
|
scratch_32 |= 0x00180000;
|
||||||
|
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
|
||||||
|
pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
|
||||||
/* Lock WP */
|
/* Lock WP */
|
||||||
ret = pci_read_config_byte(chip->pdev,
|
ret = pci_read_config_byte(chip->pdev,
|
||||||
O2_SD_LOCK_WP, &scratch);
|
O2_SD_LOCK_WP, &scratch);
|
||||||
|
@ -16,12 +16,30 @@
|
|||||||
#define BCR 0xdc
|
#define BCR 0xdc
|
||||||
#define BCR_WPD BIT(0)
|
#define BCR_WPD BIT(0)
|
||||||
|
|
||||||
|
static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = data;
|
||||||
|
u32 bcr;
|
||||||
|
|
||||||
|
/* Try to make the chip read/write */
|
||||||
|
pci_read_config_dword(pdev, BCR, &bcr);
|
||||||
|
if (!(bcr & BCR_WPD)) {
|
||||||
|
bcr |= BCR_WPD;
|
||||||
|
pci_write_config_dword(pdev, BCR, bcr);
|
||||||
|
pci_read_config_dword(pdev, BCR, &bcr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bcr & BCR_WPD;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct intel_spi_boardinfo bxt_info = {
|
static const struct intel_spi_boardinfo bxt_info = {
|
||||||
.type = INTEL_SPI_BXT,
|
.type = INTEL_SPI_BXT,
|
||||||
|
.set_writeable = intel_spi_pci_set_writeable,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_spi_boardinfo cnl_info = {
|
static const struct intel_spi_boardinfo cnl_info = {
|
||||||
.type = INTEL_SPI_CNL,
|
.type = INTEL_SPI_CNL,
|
||||||
|
.set_writeable = intel_spi_pci_set_writeable,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int intel_spi_pci_probe(struct pci_dev *pdev,
|
static int intel_spi_pci_probe(struct pci_dev *pdev,
|
||||||
@ -29,7 +47,6 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
|
|||||||
{
|
{
|
||||||
struct intel_spi_boardinfo *info;
|
struct intel_spi_boardinfo *info;
|
||||||
struct intel_spi *ispi;
|
struct intel_spi *ispi;
|
||||||
u32 bcr;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = pcim_enable_device(pdev);
|
ret = pcim_enable_device(pdev);
|
||||||
@ -41,15 +58,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
|
|||||||
if (!info)
|
if (!info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Try to make the chip read/write */
|
info->data = pdev;
|
||||||
pci_read_config_dword(pdev, BCR, &bcr);
|
|
||||||
if (!(bcr & BCR_WPD)) {
|
|
||||||
bcr |= BCR_WPD;
|
|
||||||
pci_write_config_dword(pdev, BCR, bcr);
|
|
||||||
pci_read_config_dword(pdev, BCR, &bcr);
|
|
||||||
}
|
|
||||||
info->writeable = !!(bcr & BCR_WPD);
|
|
||||||
|
|
||||||
ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
|
ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
|
||||||
if (IS_ERR(ispi))
|
if (IS_ERR(ispi))
|
||||||
return PTR_ERR(ispi);
|
return PTR_ERR(ispi);
|
||||||
|
@ -53,17 +53,17 @@
|
|||||||
#define FRACC 0x50
|
#define FRACC 0x50
|
||||||
|
|
||||||
#define FREG(n) (0x54 + ((n) * 4))
|
#define FREG(n) (0x54 + ((n) * 4))
|
||||||
#define FREG_BASE_MASK 0x3fff
|
#define FREG_BASE_MASK GENMASK(14, 0)
|
||||||
#define FREG_LIMIT_SHIFT 16
|
#define FREG_LIMIT_SHIFT 16
|
||||||
#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
|
#define FREG_LIMIT_MASK GENMASK(30, 16)
|
||||||
|
|
||||||
/* Offset is from @ispi->pregs */
|
/* Offset is from @ispi->pregs */
|
||||||
#define PR(n) ((n) * 4)
|
#define PR(n) ((n) * 4)
|
||||||
#define PR_WPE BIT(31)
|
#define PR_WPE BIT(31)
|
||||||
#define PR_LIMIT_SHIFT 16
|
#define PR_LIMIT_SHIFT 16
|
||||||
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
|
#define PR_LIMIT_MASK GENMASK(30, 16)
|
||||||
#define PR_RPE BIT(15)
|
#define PR_RPE BIT(15)
|
||||||
#define PR_BASE_MASK 0x3fff
|
#define PR_BASE_MASK GENMASK(14, 0)
|
||||||
|
|
||||||
/* Offsets are from @ispi->sregs */
|
/* Offsets are from @ispi->sregs */
|
||||||
#define SSFSTS_CTL 0x00
|
#define SSFSTS_CTL 0x00
|
||||||
@ -117,7 +117,7 @@
|
|||||||
#define ERASE_OPCODE_SHIFT 8
|
#define ERASE_OPCODE_SHIFT 8
|
||||||
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
|
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
|
||||||
#define ERASE_64K_OPCODE_SHIFT 16
|
#define ERASE_64K_OPCODE_SHIFT 16
|
||||||
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
|
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
|
||||||
|
|
||||||
#define INTEL_SPI_TIMEOUT 5000 /* ms */
|
#define INTEL_SPI_TIMEOUT 5000 /* ms */
|
||||||
#define INTEL_SPI_FIFO_SZ 64
|
#define INTEL_SPI_FIFO_SZ 64
|
||||||
@ -132,7 +132,6 @@
|
|||||||
* @sregs: Start of software sequencer registers
|
* @sregs: Start of software sequencer registers
|
||||||
* @nregions: Maximum number of regions
|
* @nregions: Maximum number of regions
|
||||||
* @pr_num: Maximum number of protected range registers
|
* @pr_num: Maximum number of protected range registers
|
||||||
* @writeable: Is the chip writeable
|
|
||||||
* @locked: Is SPI setting locked
|
* @locked: Is SPI setting locked
|
||||||
* @swseq_reg: Use SW sequencer in register reads/writes
|
* @swseq_reg: Use SW sequencer in register reads/writes
|
||||||
* @swseq_erase: Use SW sequencer in erase operation
|
* @swseq_erase: Use SW sequencer in erase operation
|
||||||
@ -150,7 +149,6 @@ struct intel_spi {
|
|||||||
void __iomem *sregs;
|
void __iomem *sregs;
|
||||||
size_t nregions;
|
size_t nregions;
|
||||||
size_t pr_num;
|
size_t pr_num;
|
||||||
bool writeable;
|
|
||||||
bool locked;
|
bool locked;
|
||||||
bool swseq_reg;
|
bool swseq_reg;
|
||||||
bool swseq_erase;
|
bool swseq_erase;
|
||||||
@ -305,6 +303,14 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
|
|||||||
INTEL_SPI_TIMEOUT * 1000);
|
INTEL_SPI_TIMEOUT * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool intel_spi_set_writeable(struct intel_spi *ispi)
|
||||||
|
{
|
||||||
|
if (!ispi->info->set_writeable)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return ispi->info->set_writeable(ispi->base, ispi->info->data);
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_spi_init(struct intel_spi *ispi)
|
static int intel_spi_init(struct intel_spi *ispi)
|
||||||
{
|
{
|
||||||
u32 opmenu0, opmenu1, lvscc, uvscc, val;
|
u32 opmenu0, opmenu1, lvscc, uvscc, val;
|
||||||
@ -317,19 +323,6 @@ static int intel_spi_init(struct intel_spi *ispi)
|
|||||||
ispi->nregions = BYT_FREG_NUM;
|
ispi->nregions = BYT_FREG_NUM;
|
||||||
ispi->pr_num = BYT_PR_NUM;
|
ispi->pr_num = BYT_PR_NUM;
|
||||||
ispi->swseq_reg = true;
|
ispi->swseq_reg = true;
|
||||||
|
|
||||||
if (writeable) {
|
|
||||||
/* Disable write protection */
|
|
||||||
val = readl(ispi->base + BYT_BCR);
|
|
||||||
if (!(val & BYT_BCR_WPD)) {
|
|
||||||
val |= BYT_BCR_WPD;
|
|
||||||
writel(val, ispi->base + BYT_BCR);
|
|
||||||
val = readl(ispi->base + BYT_BCR);
|
|
||||||
}
|
|
||||||
|
|
||||||
ispi->writeable = !!(val & BYT_BCR_WPD);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case INTEL_SPI_LPT:
|
case INTEL_SPI_LPT:
|
||||||
@ -359,6 +352,12 @@ static int intel_spi_init(struct intel_spi *ispi)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Try to disable write protection if user asked to do so */
|
||||||
|
if (writeable && !intel_spi_set_writeable(ispi)) {
|
||||||
|
dev_warn(ispi->dev, "can't disable chip write protection\n");
|
||||||
|
writeable = false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Disable #SMI generation from HW sequencer */
|
/* Disable #SMI generation from HW sequencer */
|
||||||
val = readl(ispi->base + HSFSTS_CTL);
|
val = readl(ispi->base + HSFSTS_CTL);
|
||||||
val &= ~HSFSTS_CTL_FSMIE;
|
val &= ~HSFSTS_CTL_FSMIE;
|
||||||
@ -885,9 +884,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
|
|||||||
/*
|
/*
|
||||||
* If any of the regions have protection bits set, make the
|
* If any of the regions have protection bits set, make the
|
||||||
* whole partition read-only to be on the safe side.
|
* whole partition read-only to be on the safe side.
|
||||||
|
*
|
||||||
|
* Also if the user did not ask the chip to be writeable
|
||||||
|
* mask the bit too.
|
||||||
*/
|
*/
|
||||||
if (intel_spi_is_protected(ispi, base, limit))
|
if (!writeable || intel_spi_is_protected(ispi, base, limit))
|
||||||
ispi->writeable = false;
|
part->mask_flags |= MTD_WRITEABLE;
|
||||||
|
|
||||||
end = (limit << 12) + 4096;
|
end = (limit << 12) + 4096;
|
||||||
if (end > part->size)
|
if (end > part->size)
|
||||||
@ -928,7 +930,6 @@ struct intel_spi *intel_spi_probe(struct device *dev,
|
|||||||
|
|
||||||
ispi->dev = dev;
|
ispi->dev = dev;
|
||||||
ispi->info = info;
|
ispi->info = info;
|
||||||
ispi->writeable = info->writeable;
|
|
||||||
|
|
||||||
ret = intel_spi_init(ispi);
|
ret = intel_spi_init(ispi);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -946,10 +947,6 @@ struct intel_spi *intel_spi_probe(struct device *dev,
|
|||||||
|
|
||||||
intel_spi_fill_partition(ispi, &part);
|
intel_spi_fill_partition(ispi, &part);
|
||||||
|
|
||||||
/* Prevent writes if not explicitly enabled */
|
|
||||||
if (!ispi->writeable || !writeable)
|
|
||||||
ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
|
|
||||||
|
|
||||||
ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
|
ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -4488,13 +4488,19 @@ static struct pci_driver ena_pci_driver = {
|
|||||||
|
|
||||||
static int __init ena_init(void)
|
static int __init ena_init(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
|
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
|
||||||
if (!ena_wq) {
|
if (!ena_wq) {
|
||||||
pr_err("Failed to create workqueue\n");
|
pr_err("Failed to create workqueue\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pci_register_driver(&ena_pci_driver);
|
ret = pci_register_driver(&ena_pci_driver);
|
||||||
|
if (ret)
|
||||||
|
destroy_workqueue(ena_wq);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit ena_cleanup(void)
|
static void __exit ena_cleanup(void)
|
||||||
|
@ -1468,7 +1468,7 @@ static int ag71xx_open(struct net_device *ndev)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
|
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
|
||||||
ret);
|
ret);
|
||||||
goto err;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
|
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
|
||||||
@ -1489,6 +1489,7 @@ static int ag71xx_open(struct net_device *ndev)
|
|||||||
|
|
||||||
err:
|
err:
|
||||||
ag71xx_rings_cleanup(ag);
|
ag71xx_rings_cleanup(ag);
|
||||||
|
phylink_disconnect_phy(ag->phylink);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ config BCMGENET
|
|||||||
select BCM7XXX_PHY
|
select BCM7XXX_PHY
|
||||||
select MDIO_BCM_UNIMAC
|
select MDIO_BCM_UNIMAC
|
||||||
select DIMLIB
|
select DIMLIB
|
||||||
select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL)
|
select BROADCOM_PHY if ARCH_BCM2835
|
||||||
help
|
help
|
||||||
This driver supports the built-in Ethernet MACs found in the
|
This driver supports the built-in Ethernet MACs found in the
|
||||||
Broadcom BCM7xxx Set Top Box family chipset.
|
Broadcom BCM7xxx Set Top Box family chipset.
|
||||||
|
@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
|
|||||||
phy_disconnect(bgmac->net_dev->phydev);
|
phy_disconnect(bgmac->net_dev->phydev);
|
||||||
netif_napi_del(&bgmac->napi);
|
netif_napi_del(&bgmac->napi);
|
||||||
bgmac_dma_free(bgmac);
|
bgmac_dma_free(bgmac);
|
||||||
free_netdev(bgmac->net_dev);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
|
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
|
||||||
|
|
||||||
|
@ -13111,8 +13111,16 @@ static struct pci_driver bnxt_pci_driver = {
|
|||||||
|
|
||||||
static int __init bnxt_init(void)
|
static int __init bnxt_init(void)
|
||||||
{
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
bnxt_debug_init();
|
bnxt_debug_init();
|
||||||
return pci_register_driver(&bnxt_pci_driver);
|
err = pci_register_driver(&bnxt_pci_driver);
|
||||||
|
if (err) {
|
||||||
|
bnxt_debug_exit();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit bnxt_exit(void)
|
static void __exit bnxt_exit(void)
|
||||||
|
@ -1798,13 +1798,10 @@ static int liquidio_open(struct net_device *netdev)
|
|||||||
|
|
||||||
ifstate_set(lio, LIO_IFSTATE_RUNNING);
|
ifstate_set(lio, LIO_IFSTATE_RUNNING);
|
||||||
|
|
||||||
if (OCTEON_CN23XX_PF(oct)) {
|
if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) {
|
||||||
if (!oct->msix_on)
|
ret = setup_tx_poll_fn(netdev);
|
||||||
if (setup_tx_poll_fn(netdev))
|
if (ret)
|
||||||
return -1;
|
goto err_poll;
|
||||||
} else {
|
|
||||||
if (setup_tx_poll_fn(netdev))
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_tx_start_all_queues(netdev);
|
netif_tx_start_all_queues(netdev);
|
||||||
@ -1817,7 +1814,7 @@ static int liquidio_open(struct net_device *netdev)
|
|||||||
/* tell Octeon to start forwarding packets to host */
|
/* tell Octeon to start forwarding packets to host */
|
||||||
ret = send_rx_ctrl_cmd(lio, 1);
|
ret = send_rx_ctrl_cmd(lio, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto err_rx_ctrl;
|
||||||
|
|
||||||
/* start periodical statistics fetch */
|
/* start periodical statistics fetch */
|
||||||
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
|
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
|
||||||
@ -1828,6 +1825,27 @@ static int liquidio_open(struct net_device *netdev)
|
|||||||
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
|
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
|
||||||
netdev->name);
|
netdev->name);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_rx_ctrl:
|
||||||
|
if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on))
|
||||||
|
cleanup_tx_poll_fn(netdev);
|
||||||
|
err_poll:
|
||||||
|
if (lio->ptp_clock) {
|
||||||
|
ptp_clock_unregister(lio->ptp_clock);
|
||||||
|
lio->ptp_clock = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (oct->props[lio->ifidx].napi_enabled == 1) {
|
||||||
|
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||||
|
napi_disable(napi);
|
||||||
|
|
||||||
|
oct->props[lio->ifidx].napi_enabled = 0;
|
||||||
|
|
||||||
|
if (OCTEON_CN23XX_PF(oct))
|
||||||
|
oct->droq[0]->ops.poll_mode = 0;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1502,8 +1502,15 @@ static struct pci_driver hinic_driver = {
|
|||||||
|
|
||||||
static int __init hinic_module_init(void)
|
static int __init hinic_module_init(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
hinic_dbg_register_debugfs(HINIC_DRV_NAME);
|
hinic_dbg_register_debugfs(HINIC_DRV_NAME);
|
||||||
return pci_register_driver(&hinic_driver);
|
|
||||||
|
ret = pci_register_driver(&hinic_driver);
|
||||||
|
if (ret)
|
||||||
|
hinic_dbg_unregister_debugfs();
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit hinic_module_exit(void)
|
static void __exit hinic_module_exit(void)
|
||||||
|
@ -569,8 +569,14 @@ int ionic_port_reset(struct ionic *ionic)
|
|||||||
|
|
||||||
static int __init ionic_init_module(void)
|
static int __init ionic_init_module(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
ionic_debugfs_create();
|
ionic_debugfs_create();
|
||||||
return ionic_bus_register_driver();
|
ret = ionic_bus_register_driver();
|
||||||
|
if (ret)
|
||||||
|
ionic_debugfs_destroy();
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit ionic_cleanup_module(void)
|
static void __exit ionic_cleanup_module(void)
|
||||||
|
@ -139,7 +139,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
|
|||||||
u32 idx = macvlan_eth_hash(addr);
|
u32 idx = macvlan_eth_hash(addr);
|
||||||
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
|
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(entry, h, hlist) {
|
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||||
if (ether_addr_equal_64bits(entry->addr, addr) &&
|
if (ether_addr_equal_64bits(entry->addr, addr) &&
|
||||||
entry->vlan == vlan)
|
entry->vlan == vlan)
|
||||||
return entry;
|
return entry;
|
||||||
@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev)
|
|||||||
{
|
{
|
||||||
ether_setup(dev);
|
ether_setup(dev);
|
||||||
|
|
||||||
dev->min_mtu = 0;
|
/* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
|
||||||
dev->max_mtu = ETH_MAX_MTU;
|
dev->max_mtu = ETH_MAX_MTU;
|
||||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||||
netif_keep_dst(dev);
|
netif_keep_dst(dev);
|
||||||
@ -1614,7 +1614,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
|
|||||||
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
|
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
|
||||||
struct macvlan_source_entry *entry;
|
struct macvlan_source_entry *entry;
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(entry, h, hlist) {
|
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||||
if (entry->vlan != vlan)
|
if (entry->vlan != vlan)
|
||||||
continue;
|
continue;
|
||||||
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
|
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
|
||||||
|
@ -1343,12 +1343,21 @@ static int __init tbnet_init(void)
|
|||||||
TBNET_MATCH_FRAGS_ID);
|
TBNET_MATCH_FRAGS_ID);
|
||||||
|
|
||||||
ret = tb_register_property_dir("network", tbnet_dir);
|
ret = tb_register_property_dir("network", tbnet_dir);
|
||||||
if (ret) {
|
if (ret)
|
||||||
tb_property_free_dir(tbnet_dir);
|
goto err_free_dir;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return tb_register_service_driver(&tbnet_driver);
|
ret = tb_register_service_driver(&tbnet_driver);
|
||||||
|
if (ret)
|
||||||
|
goto err_unregister;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_unregister:
|
||||||
|
tb_unregister_property_dir("network", tbnet_dir);
|
||||||
|
err_free_dir:
|
||||||
|
tb_property_free_dir(tbnet_dir);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(tbnet_init);
|
module_init(tbnet_init);
|
||||||
|
|
||||||
|
@ -61,6 +61,7 @@ struct smsc95xx_priv {
|
|||||||
u8 suspend_flags;
|
u8 suspend_flags;
|
||||||
struct mii_bus *mdiobus;
|
struct mii_bus *mdiobus;
|
||||||
struct phy_device *phydev;
|
struct phy_device *phydev;
|
||||||
|
struct task_struct *pm_task;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool turbo_mode = true;
|
static bool turbo_mode = true;
|
||||||
@ -70,13 +71,14 @@ MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
|
|||||||
static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
|
static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
|
||||||
u32 *data, int in_pm)
|
u32 *data, int in_pm)
|
||||||
{
|
{
|
||||||
|
struct smsc95xx_priv *pdata = dev->driver_priv;
|
||||||
u32 buf;
|
u32 buf;
|
||||||
int ret;
|
int ret;
|
||||||
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
|
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
|
||||||
|
|
||||||
BUG_ON(!dev);
|
BUG_ON(!dev);
|
||||||
|
|
||||||
if (!in_pm)
|
if (current != pdata->pm_task)
|
||||||
fn = usbnet_read_cmd;
|
fn = usbnet_read_cmd;
|
||||||
else
|
else
|
||||||
fn = usbnet_read_cmd_nopm;
|
fn = usbnet_read_cmd_nopm;
|
||||||
@ -100,13 +102,14 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
|
|||||||
static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
|
static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
|
||||||
u32 data, int in_pm)
|
u32 data, int in_pm)
|
||||||
{
|
{
|
||||||
|
struct smsc95xx_priv *pdata = dev->driver_priv;
|
||||||
u32 buf;
|
u32 buf;
|
||||||
int ret;
|
int ret;
|
||||||
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
|
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
|
||||||
|
|
||||||
BUG_ON(!dev);
|
BUG_ON(!dev);
|
||||||
|
|
||||||
if (!in_pm)
|
if (current != pdata->pm_task)
|
||||||
fn = usbnet_write_cmd;
|
fn = usbnet_write_cmd;
|
||||||
else
|
else
|
||||||
fn = usbnet_write_cmd_nopm;
|
fn = usbnet_write_cmd_nopm;
|
||||||
@ -1468,9 +1471,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
|
|||||||
u32 val, link_up;
|
u32 val, link_up;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
pdata->pm_task = current;
|
||||||
|
|
||||||
ret = usbnet_suspend(intf, message);
|
ret = usbnet_suspend(intf, message);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
netdev_warn(dev->net, "usbnet_suspend error\n");
|
netdev_warn(dev->net, "usbnet_suspend error\n");
|
||||||
|
pdata->pm_task = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1717,6 +1723,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
|
|||||||
if (ret && PMSG_IS_AUTO(message))
|
if (ret && PMSG_IS_AUTO(message))
|
||||||
usbnet_resume(intf);
|
usbnet_resume(intf);
|
||||||
|
|
||||||
|
pdata->pm_task = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1737,29 +1744,31 @@ static int smsc95xx_resume(struct usb_interface *intf)
|
|||||||
/* do this first to ensure it's cleared even in error case */
|
/* do this first to ensure it's cleared even in error case */
|
||||||
pdata->suspend_flags = 0;
|
pdata->suspend_flags = 0;
|
||||||
|
|
||||||
|
pdata->pm_task = current;
|
||||||
|
|
||||||
if (suspend_flags & SUSPEND_ALLMODES) {
|
if (suspend_flags & SUSPEND_ALLMODES) {
|
||||||
/* clear wake-up sources */
|
/* clear wake-up sources */
|
||||||
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
|
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto done;
|
||||||
|
|
||||||
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
|
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
|
||||||
|
|
||||||
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
|
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto done;
|
||||||
|
|
||||||
/* clear wake-up status */
|
/* clear wake-up status */
|
||||||
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
|
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto done;
|
||||||
|
|
||||||
val &= ~PM_CTL_WOL_EN_;
|
val &= ~PM_CTL_WOL_EN_;
|
||||||
val |= PM_CTL_WUPS_;
|
val |= PM_CTL_WUPS_;
|
||||||
|
|
||||||
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
|
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = usbnet_resume(intf);
|
ret = usbnet_resume(intf);
|
||||||
@ -1767,15 +1776,21 @@ static int smsc95xx_resume(struct usb_interface *intf)
|
|||||||
netdev_warn(dev->net, "usbnet_resume error\n");
|
netdev_warn(dev->net, "usbnet_resume error\n");
|
||||||
|
|
||||||
phy_init_hw(pdata->phydev);
|
phy_init_hw(pdata->phydev);
|
||||||
|
|
||||||
|
done:
|
||||||
|
pdata->pm_task = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smsc95xx_reset_resume(struct usb_interface *intf)
|
static int smsc95xx_reset_resume(struct usb_interface *intf)
|
||||||
{
|
{
|
||||||
struct usbnet *dev = usb_get_intfdata(intf);
|
struct usbnet *dev = usb_get_intfdata(intf);
|
||||||
|
struct smsc95xx_priv *pdata = dev->driver_priv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
pdata->pm_task = current;
|
||||||
ret = smsc95xx_reset(dev);
|
ret = smsc95xx_reset(dev);
|
||||||
|
pdata->pm_task = NULL;
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -3330,11 +3330,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
|
|||||||
case NVME_IOCTL_IO_CMD:
|
case NVME_IOCTL_IO_CMD:
|
||||||
return nvme_dev_user_cmd(ctrl, argp);
|
return nvme_dev_user_cmd(ctrl, argp);
|
||||||
case NVME_IOCTL_RESET:
|
case NVME_IOCTL_RESET:
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EACCES;
|
||||||
dev_warn(ctrl->device, "resetting controller\n");
|
dev_warn(ctrl->device, "resetting controller\n");
|
||||||
return nvme_reset_ctrl_sync(ctrl);
|
return nvme_reset_ctrl_sync(ctrl);
|
||||||
case NVME_IOCTL_SUBSYS_RESET:
|
case NVME_IOCTL_SUBSYS_RESET:
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EACCES;
|
||||||
return nvme_reset_subsystem(ctrl);
|
return nvme_reset_subsystem(ctrl);
|
||||||
case NVME_IOCTL_RESCAN:
|
case NVME_IOCTL_RESCAN:
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EACCES;
|
||||||
nvme_queue_scan(ctrl);
|
nvme_queue_scan(ctrl);
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
|
@ -544,11 +544,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
|
|||||||
static inline void nvme_should_fail(struct request *req) {}
|
static inline void nvme_should_fail(struct request *req) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
||||||
|
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
|
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!ctrl->subsystem)
|
if (!ctrl->subsystem)
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
|
if (!nvme_wait_reset(ctrl))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return nvme_try_sched_reset(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -635,7 +647,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
|
|||||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
||||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||||
enum nvme_ctrl_state new_state);
|
enum nvme_ctrl_state new_state);
|
||||||
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
|
||||||
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
|
||||||
@ -688,7 +699,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
|
|||||||
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
||||||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
|
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
|
||||||
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
|
||||||
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
|
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
|
||||||
|
@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port,
|
|||||||
const unsigned char *bufp = buf;
|
const unsigned char *bufp = buf;
|
||||||
size_t left = length;
|
size_t left = length;
|
||||||
unsigned long expire = jiffies + port->physport->cad->timeout;
|
unsigned long expire = jiffies + port->physport->cad->timeout;
|
||||||
const int fifo = FIFO(port);
|
const unsigned long fifo = FIFO(port);
|
||||||
int poll_for = 8; /* 80 usecs */
|
int poll_for = 8; /* 80 usecs */
|
||||||
const struct parport_pc_private *priv = port->physport->private_data;
|
const struct parport_pc_private *priv = port->physport->private_data;
|
||||||
const int fifo_depth = priv->fifo_depth;
|
const int fifo_depth = priv->fifo_depth;
|
||||||
|
@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
|
|||||||
for (state = 0; ; state++) {
|
for (state = 0; ; state++) {
|
||||||
/* Retrieve the pinctrl-* property */
|
/* Retrieve the pinctrl-* property */
|
||||||
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
|
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
|
||||||
|
if (!propname)
|
||||||
|
return -ENOMEM;
|
||||||
prop = of_find_property(np, propname, &size);
|
prop = of_find_property(np, propname, &size);
|
||||||
kfree(propname);
|
kfree(propname);
|
||||||
if (!prop) {
|
if (!prop) {
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
#include <asm/intel-family.h>
|
#include <asm/intel-family.h>
|
||||||
|
|
||||||
|
#include <xen/xen.h>
|
||||||
|
|
||||||
static void intel_pmc_core_release(struct device *dev)
|
static void intel_pmc_core_release(struct device *dev)
|
||||||
{
|
{
|
||||||
kfree(dev);
|
kfree(dev);
|
||||||
@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void)
|
|||||||
if (acpi_dev_present("INT33A1", NULL, -1))
|
if (acpi_dev_present("INT33A1", NULL, -1))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip forcefully attaching the device for VMs. Make an exception for
|
||||||
|
* Xen dom0, which does have full hardware access.
|
||||||
|
*/
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (!x86_match_cpu(intel_pmc_core_platform_ids))
|
if (!x86_match_cpu(intel_pmc_core_platform_ids))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -866,7 +866,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||||||
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
|
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
|
||||||
struct zfcp_adapter *adapter = req->adapter;
|
struct zfcp_adapter *adapter = req->adapter;
|
||||||
struct zfcp_qdio *qdio = adapter->qdio;
|
struct zfcp_qdio *qdio = adapter->qdio;
|
||||||
int req_id = req->req_id;
|
unsigned long req_id = req->req_id;
|
||||||
|
|
||||||
zfcp_reqlist_add(adapter->req_list, req);
|
zfcp_reqlist_add(adapter->req_list, req);
|
||||||
|
|
||||||
|
@ -7079,8 +7079,12 @@ static int sdebug_add_host_helper(int per_host_idx)
|
|||||||
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
|
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
|
||||||
|
|
||||||
error = device_register(&sdbg_host->dev);
|
error = device_register(&sdbg_host->dev);
|
||||||
if (error)
|
if (error) {
|
||||||
|
spin_lock(&sdebug_host_list_lock);
|
||||||
|
list_del(&sdbg_host->host_list);
|
||||||
|
spin_unlock(&sdebug_host_list_lock);
|
||||||
goto clean;
|
goto clean;
|
||||||
|
}
|
||||||
|
|
||||||
++sdebug_num_hosts;
|
++sdebug_num_hosts;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -835,6 +835,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster,
|
|||||||
|
|
||||||
err_device_register:
|
err_device_register:
|
||||||
/* don't care to make the buffer smaller again */
|
/* don't care to make the buffer smaller again */
|
||||||
|
put_device(&sdevice->dev);
|
||||||
|
sdevice = NULL;
|
||||||
|
|
||||||
err_buf_alloc:
|
err_buf_alloc:
|
||||||
siox_master_unlock(smaster);
|
siox_master_unlock(smaster);
|
||||||
|
@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = {
|
|||||||
384000,
|
384000,
|
||||||
768000,
|
768000,
|
||||||
0, /* Reserved */
|
0, /* Reserved */
|
||||||
110250,
|
11025,
|
||||||
220500,
|
22050,
|
||||||
441000,
|
44100,
|
||||||
882000,
|
88200,
|
||||||
176400,
|
176400,
|
||||||
352800,
|
352800,
|
||||||
705600,
|
705600,
|
||||||
|
@ -941,6 +941,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
|||||||
static DEFINE_RATELIMIT_STATE(rs,
|
static DEFINE_RATELIMIT_STATE(rs,
|
||||||
DEFAULT_RATELIMIT_INTERVAL * 10,
|
DEFAULT_RATELIMIT_INTERVAL * 10,
|
||||||
1);
|
1);
|
||||||
|
ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
|
||||||
if (__ratelimit(&rs))
|
if (__ratelimit(&rs))
|
||||||
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
|
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
|
||||||
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
||||||
|
@ -394,6 +394,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
|
|||||||
ret = device_register(&tl_hba->dev);
|
ret = device_register(&tl_hba->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
|
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
|
||||||
|
put_device(&tl_hba->dev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1072,7 +1073,7 @@ static struct se_wwn *tcm_loop_make_scsi_hba(
|
|||||||
*/
|
*/
|
||||||
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
|
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
sh = tl_hba->sh;
|
sh = tl_hba->sh;
|
||||||
tcm_loop_hba_no_cnt++;
|
tcm_loop_hba_no_cnt++;
|
||||||
|
@ -1416,7 +1416,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
|
|||||||
unsigned int command, u8 *data, int clen)
|
unsigned int command, u8 *data, int clen)
|
||||||
{
|
{
|
||||||
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
|
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
|
||||||
GFP_KERNEL);
|
GFP_ATOMIC);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
if (ctrl == NULL)
|
if (ctrl == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -268,8 +268,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
|
|||||||
struct dw_dma_slave *rx_param, *tx_param;
|
struct dw_dma_slave *rx_param, *tx_param;
|
||||||
struct device *dev = port->port.dev;
|
struct device *dev = port->port.dev;
|
||||||
|
|
||||||
if (!lpss->dma_param.dma_dev)
|
if (!lpss->dma_param.dma_dev) {
|
||||||
|
dma = port->dma;
|
||||||
|
if (dma)
|
||||||
|
goto out_configuration_only;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
|
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
|
||||||
if (!rx_param)
|
if (!rx_param)
|
||||||
@ -280,16 +285,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
*rx_param = lpss->dma_param;
|
*rx_param = lpss->dma_param;
|
||||||
dma->rxconf.src_maxburst = lpss->dma_maxburst;
|
|
||||||
|
|
||||||
*tx_param = lpss->dma_param;
|
*tx_param = lpss->dma_param;
|
||||||
dma->txconf.dst_maxburst = lpss->dma_maxburst;
|
|
||||||
|
|
||||||
dma->fn = lpss8250_dma_filter;
|
dma->fn = lpss8250_dma_filter;
|
||||||
dma->rx_param = rx_param;
|
dma->rx_param = rx_param;
|
||||||
dma->tx_param = tx_param;
|
dma->tx_param = tx_param;
|
||||||
|
|
||||||
port->dma = dma;
|
port->dma = dma;
|
||||||
|
|
||||||
|
out_configuration_only:
|
||||||
|
dma->rxconf.src_maxburst = lpss->dma_maxburst;
|
||||||
|
dma->txconf.dst_maxburst = lpss->dma_maxburst;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg)
|
|||||||
return readl(up->port.membase + (reg << up->port.regshift));
|
return readl(up->port.membase + (reg << up->port.regshift));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
/*
|
||||||
|
* Called on runtime PM resume path from omap8250_restore_regs(), and
|
||||||
|
* omap8250_set_mctrl().
|
||||||
|
*/
|
||||||
|
static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
||||||
{
|
{
|
||||||
struct uart_8250_port *up = up_to_u8250p(port);
|
struct uart_8250_port *up = up_to_u8250p(port);
|
||||||
struct omap8250_priv *priv = up->port.private_data;
|
struct omap8250_priv *priv = up->port.private_data;
|
||||||
@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pm_runtime_resume_and_get(port->dev);
|
||||||
|
if (err)
|
||||||
|
return;
|
||||||
|
|
||||||
|
__omap8250_set_mctrl(port, mctrl);
|
||||||
|
|
||||||
|
pm_runtime_mark_last_busy(port->dev);
|
||||||
|
pm_runtime_put_autosuspend(port->dev);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
|
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
|
||||||
* The access to uart register after MDR1 Access
|
* The access to uart register after MDR1 Access
|
||||||
@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
|||||||
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
|
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
|
||||||
struct omap8250_priv *priv)
|
struct omap8250_priv *priv)
|
||||||
{
|
{
|
||||||
u8 timeout = 255;
|
|
||||||
|
|
||||||
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
|
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
|
||||||
udelay(2);
|
udelay(2);
|
||||||
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
|
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
|
||||||
UART_FCR_CLEAR_RCVR);
|
UART_FCR_CLEAR_RCVR);
|
||||||
/*
|
|
||||||
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
|
|
||||||
* TX_FIFO_E bit is 1.
|
|
||||||
*/
|
|
||||||
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
|
|
||||||
(UART_LSR_THRE | UART_LSR_DR))) {
|
|
||||||
timeout--;
|
|
||||||
if (!timeout) {
|
|
||||||
/* Should *never* happen. we warn and carry on */
|
|
||||||
dev_crit(up->port.dev, "Errata i202: timedout %x\n",
|
|
||||||
serial_in(up, UART_LSR));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
udelay(1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
|
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
|
||||||
@ -341,7 +342,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
|
|||||||
|
|
||||||
omap8250_update_mdr1(up, priv);
|
omap8250_update_mdr1(up, priv);
|
||||||
|
|
||||||
up->port.ops->set_mctrl(&up->port, up->port.mctrl);
|
__omap8250_set_mctrl(&up->port, up->port.mctrl);
|
||||||
|
|
||||||
if (up->port.rs485.flags & SER_RS485_ENABLED)
|
if (up->port.rs485.flags & SER_RS485_ENABLED)
|
||||||
serial8250_em485_stop_tx(up);
|
serial8250_em485_stop_tx(up);
|
||||||
@ -1474,9 +1475,15 @@ static int omap8250_probe(struct platform_device *pdev)
|
|||||||
static int omap8250_remove(struct platform_device *pdev)
|
static int omap8250_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct omap8250_priv *priv = platform_get_drvdata(pdev);
|
struct omap8250_priv *priv = platform_get_drvdata(pdev);
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||||
pm_runtime_put_sync(&pdev->dev);
|
pm_runtime_put_sync(&pdev->dev);
|
||||||
|
flush_work(&priv->qos_work);
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
serial8250_unregister_port(priv->line);
|
serial8250_unregister_port(priv->line);
|
||||||
cpu_latency_qos_remove_request(&priv->pm_qos_request);
|
cpu_latency_qos_remove_request(&priv->pm_qos_request);
|
||||||
|
@ -661,13 +661,6 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
|
|||||||
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
|
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clamp the delays to [0, 100ms] */
|
|
||||||
rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
|
|
||||||
rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
|
|
||||||
|
|
||||||
memset(rs485->padding, 0, sizeof(rs485->padding));
|
|
||||||
port->rs485 = *rs485;
|
|
||||||
|
|
||||||
gpiod_set_value(port->rs485_term_gpio,
|
gpiod_set_value(port->rs485_term_gpio,
|
||||||
rs485->flags & SER_RS485_TERMINATE_BUS);
|
rs485->flags & SER_RS485_TERMINATE_BUS);
|
||||||
|
|
||||||
@ -675,15 +668,8 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
|
|||||||
* Both serial8250_em485_init() and serial8250_em485_destroy()
|
* Both serial8250_em485_init() and serial8250_em485_destroy()
|
||||||
* are idempotent.
|
* are idempotent.
|
||||||
*/
|
*/
|
||||||
if (rs485->flags & SER_RS485_ENABLED) {
|
if (rs485->flags & SER_RS485_ENABLED)
|
||||||
int ret = serial8250_em485_init(up);
|
return serial8250_em485_init(up);
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
rs485->flags &= ~SER_RS485_ENABLED;
|
|
||||||
port->rs485.flags &= ~SER_RS485_ENABLED;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
serial8250_em485_destroy(up);
|
serial8250_em485_destroy(up);
|
||||||
return 0;
|
return 0;
|
||||||
@ -1882,10 +1868,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
|
|||||||
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
|
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
|
||||||
{
|
{
|
||||||
switch (iir & 0x3f) {
|
switch (iir & 0x3f) {
|
||||||
case UART_IIR_RX_TIMEOUT:
|
case UART_IIR_RDI:
|
||||||
serial8250_rx_dma_flush(up);
|
if (!up->dma->rx_running)
|
||||||
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case UART_IIR_RLSI:
|
case UART_IIR_RLSI:
|
||||||
|
case UART_IIR_RX_TIMEOUT:
|
||||||
|
serial8250_rx_dma_flush(up);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return up->dma->rx_dma(up);
|
return up->dma->rx_dma(up);
|
||||||
|
@ -2626,6 +2626,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
|
|||||||
.suspend_noirq = imx_uart_suspend_noirq,
|
.suspend_noirq = imx_uart_suspend_noirq,
|
||||||
.resume_noirq = imx_uart_resume_noirq,
|
.resume_noirq = imx_uart_resume_noirq,
|
||||||
.freeze_noirq = imx_uart_suspend_noirq,
|
.freeze_noirq = imx_uart_suspend_noirq,
|
||||||
|
.thaw_noirq = imx_uart_resume_noirq,
|
||||||
.restore_noirq = imx_uart_resume_noirq,
|
.restore_noirq = imx_uart_resume_noirq,
|
||||||
.suspend = imx_uart_suspend,
|
.suspend = imx_uart_suspend,
|
||||||
.resume = imx_uart_resume,
|
.resume = imx_uart_resume,
|
||||||
|
@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
|
|||||||
ci->enabled_otg_timer_bits &= ~(1 << t);
|
ci->enabled_otg_timer_bits &= ~(1 << t);
|
||||||
if (ci->next_otg_timer == t) {
|
if (ci->next_otg_timer == t) {
|
||||||
if (ci->enabled_otg_timer_bits == 0) {
|
if (ci->enabled_otg_timer_bits == 0) {
|
||||||
|
spin_unlock_irqrestore(&ci->lock, flags);
|
||||||
/* No enabled timers after delete it */
|
/* No enabled timers after delete it */
|
||||||
hrtimer_cancel(&ci->otg_fsm_hrtimer);
|
hrtimer_cancel(&ci->otg_fsm_hrtimer);
|
||||||
|
spin_lock_irqsave(&ci->lock, flags);
|
||||||
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
|
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
|
||||||
} else {
|
} else {
|
||||||
/* Find the next timer */
|
/* Find the next timer */
|
||||||
|
@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||||||
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
|
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
|
||||||
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
|
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
|
||||||
|
|
||||||
|
/* Realforce 87U Keyboard */
|
||||||
|
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
|
||||||
|
|
||||||
/* M-Systems Flash Disk Pioneers */
|
/* M-Systems Flash Disk Pioneers */
|
||||||
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
|
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||||
|
|
||||||
|
@ -10,13 +10,8 @@
|
|||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
#include "../host/xhci-plat.h"
|
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
|
|
||||||
static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
|
|
||||||
.quirks = XHCI_SKIP_PHY_INIT,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int dwc3_host_get_irq(struct dwc3 *dwc)
|
static int dwc3_host_get_irq(struct dwc3 *dwc)
|
||||||
{
|
{
|
||||||
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
|
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
|
||||||
@ -92,11 +87,6 @@ int dwc3_host_init(struct dwc3 *dwc)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
|
|
||||||
sizeof(dwc3_xhci_plat_priv));
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
|
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
|
||||||
|
|
||||||
if (dwc->usb3_lpm_capable)
|
if (dwc->usb3_lpm_capable)
|
||||||
|
@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
|
|||||||
{
|
{
|
||||||
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
|
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(usb_dev->gpio_desc))
|
if (!usb_dev->gpio_desc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
gpiod_set_value(usb_dev->gpio_desc, val);
|
gpiod_set_value(usb_dev->gpio_desc, val);
|
||||||
@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
usb_dev->core = core;
|
usb_dev->core = core;
|
||||||
|
|
||||||
if (core->dev.of_node)
|
usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc",
|
||||||
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
|
GPIOD_OUT_HIGH);
|
||||||
GPIOD_OUT_HIGH);
|
if (IS_ERR(usb_dev->gpio_desc))
|
||||||
|
return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc),
|
||||||
|
"error obtaining VCC GPIO");
|
||||||
|
|
||||||
switch (core->id.id) {
|
switch (core->id.id) {
|
||||||
case BCMA_CORE_USB20_HOST:
|
case BCMA_CORE_USB20_HOST:
|
||||||
|
@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb);
|
|||||||
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
|
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
|
||||||
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
|
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
|
||||||
|
|
||||||
|
#define UBLOX_VENDOR_ID 0x1546
|
||||||
|
|
||||||
/* AMOI PRODUCTS */
|
/* AMOI PRODUCTS */
|
||||||
#define AMOI_VENDOR_ID 0x1614
|
#define AMOI_VENDOR_ID 0x1614
|
||||||
#define AMOI_PRODUCT_H01 0x0800
|
#define AMOI_PRODUCT_H01 0x0800
|
||||||
@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb);
|
|||||||
#define QUECTEL_PRODUCT_UC15 0x9090
|
#define QUECTEL_PRODUCT_UC15 0x9090
|
||||||
/* These u-blox products use Qualcomm's vendor ID */
|
/* These u-blox products use Qualcomm's vendor ID */
|
||||||
#define UBLOX_PRODUCT_R410M 0x90b2
|
#define UBLOX_PRODUCT_R410M 0x90b2
|
||||||
#define UBLOX_PRODUCT_R6XX 0x90fa
|
|
||||||
/* These Yuga products use Qualcomm's vendor ID */
|
/* These Yuga products use Qualcomm's vendor ID */
|
||||||
#define YUGA_PRODUCT_CLM920_NC5 0x9625
|
#define YUGA_PRODUCT_CLM920_NC5 0x9625
|
||||||
|
|
||||||
@ -581,6 +582,9 @@ static void option_instat_callback(struct urb *urb);
|
|||||||
#define OPPO_VENDOR_ID 0x22d9
|
#define OPPO_VENDOR_ID 0x22d9
|
||||||
#define OPPO_PRODUCT_R11 0x276c
|
#define OPPO_PRODUCT_R11 0x276c
|
||||||
|
|
||||||
|
/* Sierra Wireless products */
|
||||||
|
#define SIERRA_VENDOR_ID 0x1199
|
||||||
|
#define SIERRA_PRODUCT_EM9191 0x90d3
|
||||||
|
|
||||||
/* Device flags */
|
/* Device flags */
|
||||||
|
|
||||||
@ -1124,8 +1128,16 @@ static const struct usb_device_id option_ids[] = {
|
|||||||
/* u-blox products using Qualcomm vendor ID */
|
/* u-blox products using Qualcomm vendor ID */
|
||||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
|
||||||
.driver_info = RSVD(1) | RSVD(3) },
|
.driver_info = RSVD(1) | RSVD(3) },
|
||||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
|
||||||
|
.driver_info = RSVD(4) },
|
||||||
|
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
|
||||||
.driver_info = RSVD(3) },
|
.driver_info = RSVD(3) },
|
||||||
|
/* u-blox products */
|
||||||
|
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
|
||||||
|
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
|
||||||
|
.driver_info = RSVD(4) },
|
||||||
|
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
|
||||||
|
.driver_info = RSVD(4) },
|
||||||
/* Quectel products using Quectel vendor ID */
|
/* Quectel products using Quectel vendor ID */
|
||||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
|
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
|
||||||
.driver_info = NUMEP2 },
|
.driver_info = NUMEP2 },
|
||||||
@ -2167,6 +2179,7 @@ static const struct usb_device_id option_ids[] = {
|
|||||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
|
||||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||||
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
|
||||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
|
||||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
|
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
|
||||||
@ -2176,6 +2189,8 @@ static const struct usb_device_id option_ids[] = {
|
|||||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
||||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
|
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
|
||||||
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
|
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
|
||||||
|
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
|
||||||
|
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
|
||||||
{ } /* Terminating entry */
|
{ } /* Terminating entry */
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(usb, option_ids);
|
MODULE_DEVICE_TABLE(usb, option_ids);
|
||||||
|
@ -352,13 +352,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
|
|||||||
return pmc_usb_command(port, (void *)&req, sizeof(req));
|
return pmc_usb_command(port, (void *)&req, sizeof(req));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
|
static int pmc_usb_mux_safe_state(struct pmc_usb_port *port,
|
||||||
|
struct typec_mux_state *state)
|
||||||
{
|
{
|
||||||
u8 msg;
|
u8 msg;
|
||||||
|
|
||||||
if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
|
if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
|
||||||
|
IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) &&
|
||||||
|
state->alt && state->alt->svid == USB_TYPEC_DP_SID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
|
||||||
|
IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) &&
|
||||||
|
state->alt && state->alt->svid == USB_TYPEC_TBT_SID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
msg = PMC_USB_SAFE_MODE;
|
msg = PMC_USB_SAFE_MODE;
|
||||||
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
|
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
|
||||||
|
|
||||||
@ -426,7 +437,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (state->mode == TYPEC_STATE_SAFE)
|
if (state->mode == TYPEC_STATE_SAFE)
|
||||||
return pmc_usb_mux_safe_state(port);
|
return pmc_usb_mux_safe_state(port, state);
|
||||||
if (state->mode == TYPEC_STATE_USB)
|
if (state->mode == TYPEC_STATE_USB)
|
||||||
return pmc_usb_connect(port, port->role);
|
return pmc_usb_connect(port, port->role);
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
|
|||||||
|
|
||||||
err = device_register(dev);
|
err = device_register(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
pcpu_release(dev);
|
put_device(dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,6 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||||||
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -246,7 +245,6 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
ulist_free(old_roots);
|
||||||
ulist_free(new_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -258,18 +256,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* btrfs_qgroup_account_extent() always frees the ulists passed to it. */
|
||||||
|
old_roots = NULL;
|
||||||
|
new_roots = NULL;
|
||||||
|
|
||||||
if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
|
if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
|
||||||
nodesize, nodesize)) {
|
nodesize, nodesize)) {
|
||||||
test_err("qgroup counts didn't match expected values");
|
test_err("qgroup counts didn't match expected values");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
old_roots = NULL;
|
|
||||||
new_roots = NULL;
|
|
||||||
|
|
||||||
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -284,7 +283,6 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
|
|||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
ulist_free(old_roots);
|
||||||
ulist_free(new_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -335,7 +333,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -351,7 +348,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
ulist_free(old_roots);
|
||||||
ulist_free(new_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -372,7 +368,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -388,7 +383,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
ulist_free(old_roots);
|
||||||
ulist_free(new_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -415,7 +409,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
|
||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -431,7 +424,6 @@ static int test_multiple_refs(struct btrfs_root *root,
|
|||||||
false);
|
false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ulist_free(old_roots);
|
ulist_free(old_roots);
|
||||||
ulist_free(new_roots);
|
|
||||||
test_err("couldn't find old roots: %d", ret);
|
test_err("couldn't find old roots: %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2378,7 +2378,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *fsdata;
|
void *fsdata = NULL;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = inode_newsize_ok(inode, size);
|
err = inode_newsize_ok(inode, size);
|
||||||
@ -2404,7 +2404,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
|||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
unsigned int blocksize = i_blocksize(inode);
|
unsigned int blocksize = i_blocksize(inode);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *fsdata;
|
void *fsdata = NULL;
|
||||||
pgoff_t index, curidx;
|
pgoff_t index, curidx;
|
||||||
loff_t curpos;
|
loff_t curpos;
|
||||||
unsigned zerofrom, offset, len;
|
unsigned zerofrom, offset, len;
|
||||||
|
@ -193,7 +193,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
|
|||||||
rc = put_user(ExtAttrBits &
|
rc = put_user(ExtAttrBits &
|
||||||
FS_FL_USER_VISIBLE,
|
FS_FL_USER_VISIBLE,
|
||||||
(int __user *)arg);
|
(int __user *)arg);
|
||||||
if (rc != EOPNOTSUPP)
|
if (rc != -EOPNOTSUPP)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CIFS_POSIX */
|
#endif /* CONFIG_CIFS_POSIX */
|
||||||
@ -222,7 +222,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
|
|||||||
* pSMBFile->fid.netfid,
|
* pSMBFile->fid.netfid,
|
||||||
* extAttrBits,
|
* extAttrBits,
|
||||||
* &ExtAttrMask);
|
* &ExtAttrMask);
|
||||||
* if (rc != EOPNOTSUPP)
|
* if (rc != -EOPNOTSUPP)
|
||||||
* break;
|
* break;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1256,6 +1256,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
|
|||||||
COMPOUND_FID, current->tgid,
|
COMPOUND_FID, current->tgid,
|
||||||
FILE_FULL_EA_INFORMATION,
|
FILE_FULL_EA_INFORMATION,
|
||||||
SMB2_O_INFO_FILE, 0, data, size);
|
SMB2_O_INFO_FILE, 0, data, size);
|
||||||
|
if (rc)
|
||||||
|
goto sea_exit;
|
||||||
smb2_set_next_command(tcon, &rqst[1]);
|
smb2_set_next_command(tcon, &rqst[1]);
|
||||||
smb2_set_related(&rqst[1]);
|
smb2_set_related(&rqst[1]);
|
||||||
|
|
||||||
@ -1266,6 +1268,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
|
|||||||
rqst[2].rq_nvec = 1;
|
rqst[2].rq_nvec = 1;
|
||||||
rc = SMB2_close_init(tcon, server,
|
rc = SMB2_close_init(tcon, server,
|
||||||
&rqst[2], COMPOUND_FID, COMPOUND_FID, false);
|
&rqst[2], COMPOUND_FID, COMPOUND_FID, false);
|
||||||
|
if (rc)
|
||||||
|
goto sea_exit;
|
||||||
smb2_set_related(&rqst[2]);
|
smb2_set_related(&rqst[2]);
|
||||||
|
|
||||||
rc = compound_send_recv(xid, ses, server,
|
rc = compound_send_recv(xid, ses, server,
|
||||||
|
@ -182,7 +182,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
|
|||||||
pr_warn("Invalid superblock size\n");
|
pr_warn("Invalid superblock size\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) {
|
||||||
|
pr_warn("Invalid block size shift\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,8 +384,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
|
|||||||
if (!table[0])
|
if (!table[0])
|
||||||
table = sdp->sd_vfs->s_id;
|
table = sdp->sd_vfs->s_id;
|
||||||
|
|
||||||
strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
|
BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN);
|
||||||
strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
|
|
||||||
|
strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN);
|
||||||
|
strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
|
||||||
|
|
||||||
table = sdp->sd_table_name;
|
table = sdp->sd_table_name;
|
||||||
while ((table = strchr(table, '/')))
|
while ((table = strchr(table, '/')))
|
||||||
@ -1414,13 +1419,13 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
|||||||
|
|
||||||
switch (o) {
|
switch (o) {
|
||||||
case Opt_lockproto:
|
case Opt_lockproto:
|
||||||
strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
|
strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
|
||||||
break;
|
break;
|
||||||
case Opt_locktable:
|
case Opt_locktable:
|
||||||
strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
|
strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
|
||||||
break;
|
break;
|
||||||
case Opt_hostdata:
|
case Opt_hostdata:
|
||||||
strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
|
strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
|
||||||
break;
|
break;
|
||||||
case Opt_spectator:
|
case Opt_spectator:
|
||||||
args->ar_spectator = 1;
|
args->ar_spectator = 1;
|
||||||
|
@ -4717,7 +4717,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *fsdata;
|
void *fsdata = NULL;
|
||||||
int err;
|
int err;
|
||||||
unsigned int flags = 0;
|
unsigned int flags = 0;
|
||||||
if (nofs)
|
if (nofs)
|
||||||
|
@ -7014,6 +7014,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|||||||
{
|
{
|
||||||
struct nfs4_lockdata *data = calldata;
|
struct nfs4_lockdata *data = calldata;
|
||||||
struct nfs4_lock_state *lsp = data->lsp;
|
struct nfs4_lock_state *lsp = data->lsp;
|
||||||
|
struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
|
||||||
|
|
||||||
dprintk("%s: begin!\n", __func__);
|
dprintk("%s: begin!\n", __func__);
|
||||||
|
|
||||||
@ -7023,8 +7024,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|||||||
data->rpc_status = task->tk_status;
|
data->rpc_status = task->tk_status;
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case 0:
|
case 0:
|
||||||
renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
|
renew_lease(server, data->timestamp);
|
||||||
data->timestamp);
|
|
||||||
if (data->arg.new_lock && !data->cancelled) {
|
if (data->arg.new_lock && !data->cancelled) {
|
||||||
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
|
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
|
||||||
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
|
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
|
||||||
@ -7045,6 +7045,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|||||||
if (!nfs4_stateid_match(&data->arg.open_stateid,
|
if (!nfs4_stateid_match(&data->arg.open_stateid,
|
||||||
&lsp->ls_state->open_stateid))
|
&lsp->ls_state->open_stateid))
|
||||||
goto out_restart;
|
goto out_restart;
|
||||||
|
else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
|
||||||
|
goto out_restart;
|
||||||
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
|
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
|
||||||
&lsp->ls_stateid))
|
&lsp->ls_stateid))
|
||||||
goto out_restart;
|
goto out_restart;
|
||||||
|
@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
|
|||||||
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
||||||
u8 *mrec_end = (u8 *)ctx->mrec +
|
u8 *mrec_end = (u8 *)ctx->mrec +
|
||||||
le32_to_cpu(ctx->mrec->bytes_allocated);
|
le32_to_cpu(ctx->mrec->bytes_allocated);
|
||||||
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
u8 *name_end;
|
||||||
a->name_length * sizeof(ntfschar);
|
|
||||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
|
/* check whether ATTR_RECORD wrap */
|
||||||
name_end > mrec_end)
|
if ((u8 *)a < (u8 *)ctx->mrec)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/* check whether Attribute Record Header is within bounds */
|
||||||
|
if ((u8 *)a > mrec_end ||
|
||||||
|
(u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* check whether ATTR_RECORD's name is within bounds */
|
||||||
|
name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||||
|
a->name_length * sizeof(ntfschar);
|
||||||
|
if (name_end > mrec_end)
|
||||||
|
break;
|
||||||
|
|
||||||
ctx->attr = a;
|
ctx->attr = a;
|
||||||
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
||||||
a->type == AT_END))
|
a->type == AT_END))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
if (unlikely(!a->length))
|
if (unlikely(!a->length))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/* check whether ATTR_RECORD's length wrap */
|
||||||
|
if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
|
||||||
|
break;
|
||||||
|
/* check whether ATTR_RECORD's length is within bounds */
|
||||||
|
if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
|
||||||
|
break;
|
||||||
|
|
||||||
if (a->type != type)
|
if (a->type != type)
|
||||||
continue;
|
continue;
|
||||||
/*
|
/*
|
||||||
|
@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi)
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sanity check offset to the first attribute */
|
||||||
|
if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
|
||||||
|
ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
|
||||||
|
le16_to_cpu(m->attrs_offset));
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
/* Need this to sanity check attribute list references to $MFT. */
|
/* Need this to sanity check attribute list references to $MFT. */
|
||||||
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
|
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
|
||||||
|
|
||||||
|
@ -19,11 +19,13 @@ enum intel_spi_type {
|
|||||||
/**
|
/**
|
||||||
* struct intel_spi_boardinfo - Board specific data for Intel SPI driver
|
* struct intel_spi_boardinfo - Board specific data for Intel SPI driver
|
||||||
* @type: Type which this controller is compatible with
|
* @type: Type which this controller is compatible with
|
||||||
* @writeable: The chip is writeable
|
* @set_writeable: Try to make the chip writeable (optional)
|
||||||
|
* @data: Data to be passed to @set_writeable can be %NULL
|
||||||
*/
|
*/
|
||||||
struct intel_spi_boardinfo {
|
struct intel_spi_boardinfo {
|
||||||
enum intel_spi_type type;
|
enum intel_spi_type type;
|
||||||
bool writeable;
|
bool (*set_writeable)(void __iomem *base, void *data);
|
||||||
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* INTEL_SPI_PDATA_H */
|
#endif /* INTEL_SPI_PDATA_H */
|
||||||
|
@ -99,7 +99,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
|
|||||||
|
|
||||||
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
|
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
|
||||||
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
||||||
struct file *filp, poll_table *poll_table);
|
struct file *filp, poll_table *poll_table, int full);
|
||||||
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
|
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
|
||||||
|
|
||||||
#define RING_BUFFER_ALL_CPUS -1
|
#define RING_BUFFER_ALL_CPUS -1
|
||||||
|
@ -36,4 +36,52 @@ enum {
|
|||||||
#define offsetofend(TYPE, MEMBER) \
|
#define offsetofend(TYPE, MEMBER) \
|
||||||
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
|
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct_group() - Wrap a set of declarations in a mirrored struct
|
||||||
|
*
|
||||||
|
* @NAME: The identifier name of the mirrored sub-struct
|
||||||
|
* @MEMBERS: The member declarations for the mirrored structs
|
||||||
|
*
|
||||||
|
* Used to create an anonymous union of two structs with identical
|
||||||
|
* layout and size: one anonymous and one named. The former can be
|
||||||
|
* used normally without sub-struct naming, and the latter can be
|
||||||
|
* used to reason about the start, end, and size of the group of
|
||||||
|
* struct members.
|
||||||
|
*/
|
||||||
|
#define struct_group(NAME, MEMBERS...) \
|
||||||
|
__struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct_group_attr() - Create a struct_group() with trailing attributes
|
||||||
|
*
|
||||||
|
* @NAME: The identifier name of the mirrored sub-struct
|
||||||
|
* @ATTRS: Any struct attributes to apply
|
||||||
|
* @MEMBERS: The member declarations for the mirrored structs
|
||||||
|
*
|
||||||
|
* Used to create an anonymous union of two structs with identical
|
||||||
|
* layout and size: one anonymous and one named. The former can be
|
||||||
|
* used normally without sub-struct naming, and the latter can be
|
||||||
|
* used to reason about the start, end, and size of the group of
|
||||||
|
* struct members. Includes structure attributes argument.
|
||||||
|
*/
|
||||||
|
#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
|
||||||
|
__struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct_group_tagged() - Create a struct_group with a reusable tag
|
||||||
|
*
|
||||||
|
* @TAG: The tag name for the named sub-struct
|
||||||
|
* @NAME: The identifier name of the mirrored sub-struct
|
||||||
|
* @MEMBERS: The member declarations for the mirrored structs
|
||||||
|
*
|
||||||
|
* Used to create an anonymous union of two structs with identical
|
||||||
|
* layout and size: one anonymous and one named. The former can be
|
||||||
|
* used normally without sub-struct naming, and the latter can be
|
||||||
|
* used to reason about the start, end, and size of the group of
|
||||||
|
* struct members. Includes struct tag argument for the named copy,
|
||||||
|
* so the specified layout can be reused later.
|
||||||
|
*/
|
||||||
|
#define struct_group_tagged(TAG, NAME, MEMBERS...) \
|
||||||
|
__struct_group(TAG, NAME, /* no attrs */, MEMBERS)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -558,7 +558,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
|
|||||||
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
|
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
|
||||||
offsetof(typeof(flow->addrs), v4addrs.src) +
|
offsetof(typeof(flow->addrs), v4addrs.src) +
|
||||||
sizeof(flow->addrs.v4addrs.src));
|
sizeof(flow->addrs.v4addrs.src));
|
||||||
memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
|
memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
|
||||||
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -839,7 +839,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
|
|||||||
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
|
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
|
||||||
offsetof(typeof(flow->addrs), v6addrs.src) +
|
offsetof(typeof(flow->addrs), v6addrs.src) +
|
||||||
sizeof(flow->addrs.v6addrs.src));
|
sizeof(flow->addrs.v6addrs.src));
|
||||||
memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
|
memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
|
||||||
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,8 +100,10 @@ struct iphdr {
|
|||||||
__u8 ttl;
|
__u8 ttl;
|
||||||
__u8 protocol;
|
__u8 protocol;
|
||||||
__sum16 check;
|
__sum16 check;
|
||||||
__be32 saddr;
|
__struct_group(/* no tag */, addrs, /* no attrs */,
|
||||||
__be32 daddr;
|
__be32 saddr;
|
||||||
|
__be32 daddr;
|
||||||
|
);
|
||||||
/*The options start here. */
|
/*The options start here. */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -130,8 +130,10 @@ struct ipv6hdr {
|
|||||||
__u8 nexthdr;
|
__u8 nexthdr;
|
||||||
__u8 hop_limit;
|
__u8 hop_limit;
|
||||||
|
|
||||||
struct in6_addr saddr;
|
__struct_group(/* no tag */, addrs, /* no attrs */,
|
||||||
struct in6_addr daddr;
|
struct in6_addr saddr;
|
||||||
|
struct in6_addr daddr;
|
||||||
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,31 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
|
#ifndef _UAPI_LINUX_STDDEF_H
|
||||||
|
#define _UAPI_LINUX_STDDEF_H
|
||||||
|
|
||||||
#include <linux/compiler_types.h>
|
#include <linux/compiler_types.h>
|
||||||
|
|
||||||
#ifndef __always_inline
|
#ifndef __always_inline
|
||||||
#define __always_inline inline
|
#define __always_inline inline
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __struct_group() - Create a mirrored named and anonyomous struct
|
||||||
|
*
|
||||||
|
* @TAG: The tag name for the named sub-struct (usually empty)
|
||||||
|
* @NAME: The identifier name of the mirrored sub-struct
|
||||||
|
* @ATTRS: Any struct attributes (usually empty)
|
||||||
|
* @MEMBERS: The member declarations for the mirrored structs
|
||||||
|
*
|
||||||
|
* Used to create an anonymous union of two structs with identical layout
|
||||||
|
* and size: one anonymous and one named. The former's members can be used
|
||||||
|
* normally without sub-struct naming, and the latter can be used to
|
||||||
|
* reason about the start, end, and size of the group of struct members.
|
||||||
|
* The named struct can also be explicitly tagged for layer reuse, as well
|
||||||
|
* as both having struct attributes appended.
|
||||||
|
*/
|
||||||
|
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
|
||||||
|
union { \
|
||||||
|
struct { MEMBERS } ATTRS; \
|
||||||
|
struct TAG { MEMBERS } ATTRS NAME; \
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -102,22 +102,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
|||||||
u32 nr_elems)
|
u32 nr_elems)
|
||||||
{
|
{
|
||||||
struct pcpu_freelist_head *head;
|
struct pcpu_freelist_head *head;
|
||||||
int i, cpu, pcpu_entries;
|
unsigned int cpu, cpu_idx, i, j, n, m;
|
||||||
|
|
||||||
pcpu_entries = nr_elems / num_possible_cpus() + 1;
|
n = nr_elems / num_possible_cpus();
|
||||||
i = 0;
|
m = nr_elems % num_possible_cpus();
|
||||||
|
|
||||||
|
cpu_idx = 0;
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
again:
|
|
||||||
head = per_cpu_ptr(s->freelist, cpu);
|
head = per_cpu_ptr(s->freelist, cpu);
|
||||||
/* No locking required as this is not visible yet. */
|
j = n + (cpu_idx < m ? 1 : 0);
|
||||||
pcpu_freelist_push_node(head, buf);
|
for (i = 0; i < j; i++) {
|
||||||
i++;
|
/* No locking required as this is not visible yet. */
|
||||||
buf += elem_size;
|
pcpu_freelist_push_node(head, buf);
|
||||||
if (i == nr_elems)
|
buf += elem_size;
|
||||||
break;
|
}
|
||||||
if (i % pcpu_entries)
|
cpu_idx++;
|
||||||
goto again;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1841,7 +1841,13 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
|||||||
if ((list_p != p) && (list_p->post_handler))
|
if ((list_p != p) && (list_p->post_handler))
|
||||||
goto noclean;
|
goto noclean;
|
||||||
}
|
}
|
||||||
ap->post_handler = NULL;
|
/*
|
||||||
|
* For the kprobe-on-ftrace case, we keep the
|
||||||
|
* post_handler setting to identify this aggrprobe
|
||||||
|
* armed with kprobe_ipmodify_ops.
|
||||||
|
*/
|
||||||
|
if (!kprobe_ftrace(ap))
|
||||||
|
ap->post_handler = NULL;
|
||||||
}
|
}
|
||||||
noclean:
|
noclean:
|
||||||
/*
|
/*
|
||||||
|
@ -1295,6 +1295,7 @@ static int ftrace_add_mod(struct trace_array *tr,
|
|||||||
if (!ftrace_mod)
|
if (!ftrace_mod)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&ftrace_mod->list);
|
||||||
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
|
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
|
||||||
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
|
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
|
||||||
ftrace_mod->enable = enable;
|
ftrace_mod->enable = enable;
|
||||||
@ -3178,7 +3179,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
|
|||||||
/* if we can't allocate this size, try something smaller */
|
/* if we can't allocate this size, try something smaller */
|
||||||
if (!order)
|
if (!order)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
order >>= 1;
|
order--;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6877,7 +6878,7 @@ void __init ftrace_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
pr_info("ftrace: allocating %ld entries in %ld pages\n",
|
pr_info("ftrace: allocating %ld entries in %ld pages\n",
|
||||||
count, count / ENTRIES_PER_PAGE + 1);
|
count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
|
||||||
|
|
||||||
last_ftrace_enabled = ftrace_enabled = 1;
|
last_ftrace_enabled = ftrace_enabled = 1;
|
||||||
|
|
||||||
|
@ -73,6 +73,10 @@ static struct trace_event_file *gen_kretprobe_test;
|
|||||||
#define KPROBE_GEN_TEST_ARG3 NULL
|
#define KPROBE_GEN_TEST_ARG3 NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static bool trace_event_file_is_valid(struct trace_event_file *input)
|
||||||
|
{
|
||||||
|
return input && !IS_ERR(input);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test to make sure we can create a kprobe event, then add more
|
* Test to make sure we can create a kprobe event, then add more
|
||||||
@ -139,6 +143,8 @@ static int __init test_gen_kprobe_cmd(void)
|
|||||||
kfree(buf);
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
delete:
|
delete:
|
||||||
|
if (trace_event_file_is_valid(gen_kprobe_test))
|
||||||
|
gen_kprobe_test = NULL;
|
||||||
/* We got an error after creating the event, delete it */
|
/* We got an error after creating the event, delete it */
|
||||||
ret = kprobe_event_delete("gen_kprobe_test");
|
ret = kprobe_event_delete("gen_kprobe_test");
|
||||||
goto out;
|
goto out;
|
||||||
@ -202,6 +208,8 @@ static int __init test_gen_kretprobe_cmd(void)
|
|||||||
kfree(buf);
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
delete:
|
delete:
|
||||||
|
if (trace_event_file_is_valid(gen_kretprobe_test))
|
||||||
|
gen_kretprobe_test = NULL;
|
||||||
/* We got an error after creating the event, delete it */
|
/* We got an error after creating the event, delete it */
|
||||||
ret = kprobe_event_delete("gen_kretprobe_test");
|
ret = kprobe_event_delete("gen_kretprobe_test");
|
||||||
goto out;
|
goto out;
|
||||||
@ -217,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void)
|
|||||||
|
|
||||||
ret = test_gen_kretprobe_cmd();
|
ret = test_gen_kretprobe_cmd();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
|
if (trace_event_file_is_valid(gen_kretprobe_test)) {
|
||||||
"kprobes",
|
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
|
||||||
"gen_kretprobe_test", false));
|
"kprobes",
|
||||||
trace_put_event_file(gen_kretprobe_test);
|
"gen_kretprobe_test", false));
|
||||||
|
trace_put_event_file(gen_kretprobe_test);
|
||||||
|
}
|
||||||
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
|
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void)
|
|||||||
|
|
||||||
static void __exit kprobe_event_gen_test_exit(void)
|
static void __exit kprobe_event_gen_test_exit(void)
|
||||||
{
|
{
|
||||||
/* Disable the event or you can't remove it */
|
if (trace_event_file_is_valid(gen_kprobe_test)) {
|
||||||
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
|
/* Disable the event or you can't remove it */
|
||||||
"kprobes",
|
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
|
||||||
"gen_kprobe_test", false));
|
"kprobes",
|
||||||
|
"gen_kprobe_test", false));
|
||||||
|
|
||||||
|
/* Now give the file and instance back */
|
||||||
|
trace_put_event_file(gen_kprobe_test);
|
||||||
|
}
|
||||||
|
|
||||||
/* Now give the file and instance back */
|
|
||||||
trace_put_event_file(gen_kprobe_test);
|
|
||||||
|
|
||||||
/* Now unregister and free the event */
|
/* Now unregister and free the event */
|
||||||
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
|
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
|
||||||
|
|
||||||
/* Disable the event or you can't remove it */
|
if (trace_event_file_is_valid(gen_kretprobe_test)) {
|
||||||
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
|
/* Disable the event or you can't remove it */
|
||||||
"kprobes",
|
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
|
||||||
"gen_kretprobe_test", false));
|
"kprobes",
|
||||||
|
"gen_kretprobe_test", false));
|
||||||
|
|
||||||
|
/* Now give the file and instance back */
|
||||||
|
trace_put_event_file(gen_kretprobe_test);
|
||||||
|
}
|
||||||
|
|
||||||
/* Now give the file and instance back */
|
|
||||||
trace_put_event_file(gen_kretprobe_test);
|
|
||||||
|
|
||||||
/* Now unregister and free the event */
|
/* Now unregister and free the event */
|
||||||
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
|
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
|
||||||
|
@ -517,6 +517,7 @@ struct ring_buffer_per_cpu {
|
|||||||
local_t committing;
|
local_t committing;
|
||||||
local_t commits;
|
local_t commits;
|
||||||
local_t pages_touched;
|
local_t pages_touched;
|
||||||
|
local_t pages_lost;
|
||||||
local_t pages_read;
|
local_t pages_read;
|
||||||
long last_pages_touch;
|
long last_pages_touch;
|
||||||
size_t shortest_full;
|
size_t shortest_full;
|
||||||
@ -771,10 +772,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
|
|||||||
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
|
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
|
||||||
{
|
{
|
||||||
size_t read;
|
size_t read;
|
||||||
|
size_t lost;
|
||||||
size_t cnt;
|
size_t cnt;
|
||||||
|
|
||||||
read = local_read(&buffer->buffers[cpu]->pages_read);
|
read = local_read(&buffer->buffers[cpu]->pages_read);
|
||||||
|
lost = local_read(&buffer->buffers[cpu]->pages_lost);
|
||||||
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
|
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(cnt < lost))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cnt -= lost;
|
||||||
|
|
||||||
/* The reader can read an empty page, but not more than that */
|
/* The reader can read an empty page, but not more than that */
|
||||||
if (cnt < read) {
|
if (cnt < read) {
|
||||||
WARN_ON_ONCE(read > cnt + 1);
|
WARN_ON_ONCE(read > cnt + 1);
|
||||||
@ -784,6 +793,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
|
|||||||
return cnt - read;
|
return cnt - read;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
|
||||||
|
{
|
||||||
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||||
|
size_t nr_pages;
|
||||||
|
size_t dirty;
|
||||||
|
|
||||||
|
nr_pages = cpu_buffer->nr_pages;
|
||||||
|
if (!nr_pages || !full)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
|
||||||
|
|
||||||
|
return (dirty * 100) > (full * nr_pages);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
|
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
|
||||||
*
|
*
|
||||||
@ -912,22 +936,20 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||||||
!ring_buffer_empty_cpu(buffer, cpu)) {
|
!ring_buffer_empty_cpu(buffer, cpu)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool pagebusy;
|
bool pagebusy;
|
||||||
size_t nr_pages;
|
bool done;
|
||||||
size_t dirty;
|
|
||||||
|
|
||||||
if (!full)
|
if (!full)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||||
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
|
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
|
||||||
nr_pages = cpu_buffer->nr_pages;
|
done = !pagebusy && full_hit(buffer, cpu, full);
|
||||||
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
|
|
||||||
if (!cpu_buffer->shortest_full ||
|
if (!cpu_buffer->shortest_full ||
|
||||||
cpu_buffer->shortest_full > full)
|
cpu_buffer->shortest_full > full)
|
||||||
cpu_buffer->shortest_full = full;
|
cpu_buffer->shortest_full = full;
|
||||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||||
if (!pagebusy &&
|
if (done)
|
||||||
(!nr_pages || (dirty * 100) > full * nr_pages))
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -953,6 +975,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||||||
* @cpu: the cpu buffer to wait on
|
* @cpu: the cpu buffer to wait on
|
||||||
* @filp: the file descriptor
|
* @filp: the file descriptor
|
||||||
* @poll_table: The poll descriptor
|
* @poll_table: The poll descriptor
|
||||||
|
* @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
|
||||||
*
|
*
|
||||||
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
|
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
|
||||||
* as data is added to any of the @buffer's cpu buffers. Otherwise
|
* as data is added to any of the @buffer's cpu buffers. Otherwise
|
||||||
@ -962,14 +985,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||||||
* zero otherwise.
|
* zero otherwise.
|
||||||
*/
|
*/
|
||||||
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
||||||
struct file *filp, poll_table *poll_table)
|
struct file *filp, poll_table *poll_table, int full)
|
||||||
{
|
{
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
struct rb_irq_work *work;
|
struct rb_irq_work *work;
|
||||||
|
|
||||||
if (cpu == RING_BUFFER_ALL_CPUS)
|
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||||
work = &buffer->irq_work;
|
work = &buffer->irq_work;
|
||||||
else {
|
full = 0;
|
||||||
|
} else {
|
||||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -977,8 +1001,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
|||||||
work = &cpu_buffer->irq_work;
|
work = &cpu_buffer->irq_work;
|
||||||
}
|
}
|
||||||
|
|
||||||
poll_wait(filp, &work->waiters, poll_table);
|
if (full) {
|
||||||
work->waiters_pending = true;
|
poll_wait(filp, &work->full_waiters, poll_table);
|
||||||
|
work->full_waiters_pending = true;
|
||||||
|
} else {
|
||||||
|
poll_wait(filp, &work->waiters, poll_table);
|
||||||
|
work->waiters_pending = true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There's a tight race between setting the waiters_pending and
|
* There's a tight race between setting the waiters_pending and
|
||||||
* checking if the ring buffer is empty. Once the waiters_pending bit
|
* checking if the ring buffer is empty. Once the waiters_pending bit
|
||||||
@ -994,6 +1024,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
|||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
|
if (full)
|
||||||
|
return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
|
||||||
|
|
||||||
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
|
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
|
||||||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
|
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
|
||||||
return EPOLLIN | EPOLLRDNORM;
|
return EPOLLIN | EPOLLRDNORM;
|
||||||
@ -1635,9 +1668,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
|
|||||||
|
|
||||||
free_buffer_page(cpu_buffer->reader_page);
|
free_buffer_page(cpu_buffer->reader_page);
|
||||||
|
|
||||||
rb_head_page_deactivate(cpu_buffer);
|
|
||||||
|
|
||||||
if (head) {
|
if (head) {
|
||||||
|
rb_head_page_deactivate(cpu_buffer);
|
||||||
|
|
||||||
list_for_each_entry_safe(bpage, tmp, head, list) {
|
list_for_each_entry_safe(bpage, tmp, head, list) {
|
||||||
list_del_init(&bpage->list);
|
list_del_init(&bpage->list);
|
||||||
free_buffer_page(bpage);
|
free_buffer_page(bpage);
|
||||||
@ -1873,6 +1906,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
|
|||||||
*/
|
*/
|
||||||
local_add(page_entries, &cpu_buffer->overrun);
|
local_add(page_entries, &cpu_buffer->overrun);
|
||||||
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
||||||
|
local_inc(&cpu_buffer->pages_lost);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2363,6 +2397,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
|
|||||||
*/
|
*/
|
||||||
local_add(entries, &cpu_buffer->overrun);
|
local_add(entries, &cpu_buffer->overrun);
|
||||||
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
||||||
|
local_inc(&cpu_buffer->pages_lost);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The entries will be zeroed out when we move the
|
* The entries will be zeroed out when we move the
|
||||||
@ -3033,10 +3068,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
|
|||||||
static __always_inline void
|
static __always_inline void
|
||||||
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
||||||
{
|
{
|
||||||
size_t nr_pages;
|
|
||||||
size_t dirty;
|
|
||||||
size_t full;
|
|
||||||
|
|
||||||
if (buffer->irq_work.waiters_pending) {
|
if (buffer->irq_work.waiters_pending) {
|
||||||
buffer->irq_work.waiters_pending = false;
|
buffer->irq_work.waiters_pending = false;
|
||||||
/* irq_work_queue() supplies it's own memory barriers */
|
/* irq_work_queue() supplies it's own memory barriers */
|
||||||
@ -3060,10 +3091,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
|||||||
|
|
||||||
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
|
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
|
||||||
|
|
||||||
full = cpu_buffer->shortest_full;
|
if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
|
||||||
nr_pages = cpu_buffer->nr_pages;
|
|
||||||
dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
|
|
||||||
if (full && nr_pages && (dirty * 100) <= full * nr_pages)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpu_buffer->irq_work.wakeup_full = true;
|
cpu_buffer->irq_work.wakeup_full = true;
|
||||||
@ -4964,6 +4992,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
|||||||
local_set(&cpu_buffer->committing, 0);
|
local_set(&cpu_buffer->committing, 0);
|
||||||
local_set(&cpu_buffer->commits, 0);
|
local_set(&cpu_buffer->commits, 0);
|
||||||
local_set(&cpu_buffer->pages_touched, 0);
|
local_set(&cpu_buffer->pages_touched, 0);
|
||||||
|
local_set(&cpu_buffer->pages_lost, 0);
|
||||||
local_set(&cpu_buffer->pages_read, 0);
|
local_set(&cpu_buffer->pages_read, 0);
|
||||||
cpu_buffer->last_pages_touch = 0;
|
cpu_buffer->last_pages_touch = 0;
|
||||||
cpu_buffer->shortest_full = 0;
|
cpu_buffer->shortest_full = 0;
|
||||||
|
@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void)
|
|||||||
|
|
||||||
/* Now generate a gen_synth_test event */
|
/* Now generate a gen_synth_test event */
|
||||||
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
|
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
|
||||||
out:
|
free:
|
||||||
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
delete:
|
delete:
|
||||||
/* We got an error after creating the event, delete it */
|
/* We got an error after creating the event, delete it */
|
||||||
synth_event_delete("gen_synth_test");
|
synth_event_delete("gen_synth_test");
|
||||||
free:
|
goto free;
|
||||||
kfree(buf);
|
|
||||||
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void)
|
|||||||
|
|
||||||
/* Now trace an empty_synth_test event */
|
/* Now trace an empty_synth_test event */
|
||||||
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
|
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
|
||||||
out:
|
free:
|
||||||
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
delete:
|
delete:
|
||||||
/* We got an error after creating the event, delete it */
|
/* We got an error after creating the event, delete it */
|
||||||
synth_event_delete("empty_synth_test");
|
synth_event_delete("empty_synth_test");
|
||||||
free:
|
goto free;
|
||||||
kfree(buf);
|
|
||||||
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct synth_field_desc create_synth_test_fields[] = {
|
static struct synth_field_desc create_synth_test_fields[] = {
|
||||||
|
@ -6264,7 +6264,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
|
|||||||
return EPOLLIN | EPOLLRDNORM;
|
return EPOLLIN | EPOLLRDNORM;
|
||||||
else
|
else
|
||||||
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
|
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
|
||||||
filp, poll_table);
|
filp, poll_table, iter->tr->buffer_percent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __poll_t
|
static __poll_t
|
||||||
|
@ -791,10 +791,9 @@ static int register_synth_event(struct synth_event *event)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = set_synth_event_print_fmt(call);
|
ret = set_synth_event_print_fmt(call);
|
||||||
if (ret < 0) {
|
/* unregister_trace_event() will be called inside */
|
||||||
|
if (ret < 0)
|
||||||
trace_remove_event_call(call);
|
trace_remove_event_call(call);
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
err:
|
err:
|
||||||
|
@ -3438,7 +3438,7 @@ ssize_t generic_perform_write(struct file *file,
|
|||||||
unsigned long offset; /* Offset into pagecache page */
|
unsigned long offset; /* Offset into pagecache page */
|
||||||
unsigned long bytes; /* Bytes to write to page */
|
unsigned long bytes; /* Bytes to write to page */
|
||||||
size_t copied; /* Bytes copied from user */
|
size_t copied; /* Bytes copied from user */
|
||||||
void *fsdata;
|
void *fsdata = NULL;
|
||||||
|
|
||||||
offset = (pos & (PAGE_SIZE - 1));
|
offset = (pos & (PAGE_SIZE - 1));
|
||||||
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user