This is the 5.10.198 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUlq8kACgkQONu9yGCS
 aT5GiA//fiURwpUcawIhvgYewMVp+ovJ+mpX5IT+bMbW9Ur0sBhtiiU+WDNYxMru
 34xbSQ/+o2a6N2tmK1JF7o76e2sHw/aRgaoDHkN5oEG+lbRH7TdCv6O0QRFAthcd
 sJL+SX/GclcKW0ZHDjJX9Wt5Lq3gqVYlqJlCsw6gI/1JrQTxStrSQh7yRbrYSqpY
 wGWEq19IrE/ToZFTBuPEEvlBswszGrI88lVtjvRzIdczQVyFLAoEQ2GNPWl3XNBh
 ygGnwiHjk3a+QhZ30evIv2LX+tlGmpLy7gdLDsdZF7RfEkNHQ92IgaHvFDs8JqDg
 QnRE8KCrC2V45OIQRRnA5NVtD3LBYM0bUhbqqLiNvTMiSIBWge4efJwxyYcTTfkX
 MTmbo9z/bIVFdpgCQtneRw3eUyfbRKQ1cUvtmkuXIVLzvZUQaVMpXVZ6pz864E54
 3nJrl2HJtIdJsRX5M4unL+AXNLRoJUbfb4hbzAD0Tg8Wbdgrn7vL/z6JmIzA2ssQ
 +R/52ghimOThGTUbCi2pJx/cpKhegkJEJ7+JwUhS9L9ybA93g/bD0n9zy6JXpd/H
 Cct0JWbiukbDp1CTLQ6Qm9TK5HANW2fXMHoR3H5ltPojNZwN7/pgYqN6ppjtBKVe
 gA3k8KYkZoXjbF6VS1B5Y83wJ+H+39Luk/DSmm1ZNvYPHxmz+q0=
 =2MQy
 -----END PGP SIGNATURE-----

Merge 5.10.198 into android12-5.10-lts

Changes in 5.10.198
	NFS: Use the correct commit info in nfs_join_page_group()
	NFS/pNFS: Report EINVAL errors from connect() to the server
	SUNRPC: Mark the cred for revalidation if the server rejects it
	tracing: Increase trace array ref count on enable and filter files
	ata: ahci: Drop pointless VPRINTK() calls and convert the remaining ones
	ata: libahci: clear pending interrupt status
	ext4: remove the 'group' parameter of ext4_trim_extent
	ext4: add new helper interface ext4_try_to_trim_range()
	ext4: scope ret locally in ext4_try_to_trim_range()
	ext4: change s_last_trim_minblks type to unsigned long
	ext4: mark group as trimmed only if it was fully scanned
	ext4: replace the traditional ternary conditional operator with with max()/min()
	ext4: move setting of trimmed bit into ext4_try_to_trim_range()
	ext4: do not let fstrim block system suspend
	tracing: Have event inject files inc the trace array ref count
	netfilter: nf_tables: integrate pipapo into commit protocol
	netfilter: nf_tables: don't skip expired elements during walk
	netfilter: nf_tables: GC transaction API to avoid race with control plane
	netfilter: nf_tables: adapt set backend to use GC transaction API
	netfilter: nft_set_hash: mark set element as dead when deleting from packet path
	netfilter: nf_tables: remove busy mark and gc batch API
	netfilter: nf_tables: don't fail inserts if duplicate has expired
	netfilter: nf_tables: fix GC transaction races with netns and netlink event exit path
	netfilter: nf_tables: GC transaction race with netns dismantle
	netfilter: nf_tables: GC transaction race with abort path
	netfilter: nf_tables: use correct lock to protect gc_list
	netfilter: nf_tables: defer gc run if previous batch is still pending
	netfilter: nft_set_rbtree: skip sync GC for new elements in this transaction
	netfilter: nft_set_rbtree: use read spinlock to avoid datapath contention
	netfilter: nft_set_pipapo: stop GC iteration if GC transaction allocation fails
	netfilter: nft_set_hash: try later when GC hits EAGAIN on iteration
	netfilter: nf_tables: fix memleak when more than 255 elements expired
	ASoC: meson: spdifin: start hw on dai probe
	netfilter: nf_tables: disallow element removal on anonymous sets
	bpf: Avoid deadlock when using queue and stack maps from NMI
	selftests/tls: Add {} to avoid static checker warning
	selftests: tls: swap the TX and RX sockets in some tests
	ASoC: imx-audmix: Fix return error with devm_clk_get()
	i40e: Fix VF VLAN offloading when port VLAN is configured
	ipv4: fix null-deref in ipv4_link_failure
	powerpc/perf/hv-24x7: Update domain value check
	dccp: fix dccp_v4_err()/dccp_v6_err() again
	platform/x86: intel_scu_ipc: Check status after timeout in busy_loop()
	platform/x86: intel_scu_ipc: Check status upon timeout in ipc_wait_for_interrupt()
	platform/x86: intel_scu_ipc: Don't override scu in intel_scu_ipc_dev_simple_command()
	platform/x86: intel_scu_ipc: Fail IPC send if still busy
	x86/srso: Fix srso_show_state() side effect
	x86/srso: Fix SBPB enablement for spec_rstack_overflow=off
	net: hns3: only enable unicast promisc when mac table full
	net: hns3: add 5ms delay before clear firmware reset irq source
	net: bridge: use DEV_STATS_INC()
	team: fix null-ptr-deref when team device type is changed
	netfilter: ipset: Fix race between IPSET_CMD_CREATE and IPSET_CMD_SWAP
	seqlock: avoid -Wshadow warnings
	seqlock: Rename __seqprop() users
	seqlock: Prefix internal seqcount_t-only macros with a "do_"
	locking/seqlock: Do the lockdep annotation before locking in do_write_seqcount_begin_nested()
	bnxt_en: Flush XDP for bnxt_poll_nitroa0()'s NAPI
	net: rds: Fix possible NULL-pointer dereference
	gpio: tb10x: Fix an error handling path in tb10x_gpio_probe()
	i2c: mux: demux-pinctrl: check the return value of devm_kstrdup()
	netfilter: nf_tables: unregister flowtable hooks on netns exit
	netfilter: nf_tables: double hook unregistration in netns path
	Input: i8042 - rename i8042-x86ia64io.h to i8042-acpipnpio.h
	Input: i8042 - add quirk for TUXEDO Gemini 17 Gen1/Clevo PD70PN
	mmc: renesas_sdhi: probe into TMIO after SCC parameters have been setup
	mmc: renesas_sdhi: populate SCC pointer at the proper place
	mmc: tmio: support custom irq masks
	mmc: renesas_sdhi: register irqs before registering controller
	media: venus: core: Add io base variables for each block
	media: venus: hfi,pm,firmware: Convert to block relative addressing
	media: venus: hfi: Define additional 6xx registers
	media: venus: core: Add differentiator IS_V6(core)
	media: venus: hfi: Add a 6xx boot logic
	media: venus: hfi_venus: Write to VIDC_CTRL_INIT after unmasking interrupts
	netfilter: use actual socket sk for REJECT action
	netfilter: nft_exthdr: Support SCTP chunks
	netfilter: nf_tables: add and use nft_sk helper
	netfilter: nf_tables: add and use nft_thoff helper
	netfilter: nft_exthdr: break evaluation if setting TCP option fails
	netfilter: exthdr: add support for tcp option removal
	netfilter: nft_exthdr: Fix non-linear header modification
	ata: libata: Rename link flag ATA_LFLAG_NO_DB_DELAY
	ata: ahci: Add support for AMD A85 FCH (Hudson D4)
	ata: ahci: Rename board_ahci_mobile
	ata: ahci: Add Elkhart Lake AHCI controller
	btrfs: reset destination buffer when read_extent_buffer() gets invalid range
	MIPS: Alchemy: only build mmc support helpers if au1xmmc is enabled
	bus: ti-sysc: Use fsleep() instead of usleep_range() in sysc_reset()
	bus: ti-sysc: Fix missing AM35xx SoC matching
	clk: tegra: fix error return case for recalc_rate
	ARM: dts: omap: correct indentation
	ARM: dts: ti: omap: Fix bandgap thermal cells addressing for omap3/4
	ARM: dts: motorola-mapphone: Configure lower temperature passive cooling
	ARM: dts: motorola-mapphone: Add 1.2GHz OPP
	ARM: dts: motorola-mapphone: Drop second ti,wlcore compatible value
	ARM: dts: am335x: Guardian: Update beeper label
	ARM: dts: Unify pwm-omap-dmtimer node names
	ARM: dts: ti: omap: motorola-mapphone: Fix abe_clkctrl warning on boot
	bus: ti-sysc: Fix SYSC_QUIRK_SWSUP_SIDLE_ACT handling for uart wake-up
	power: supply: ucs1002: fix error code in ucs1002_get_property()
	xtensa: add default definition for XCHAL_HAVE_DIV32
	xtensa: iss/network: make functions static
	xtensa: boot: don't add include-dirs
	xtensa: boot/lib: fix function prototypes
	gpio: pmic-eic-sprd: Add can_sleep flag for PMIC EIC chip
	i2c: npcm7xx: Fix callback completion ordering
	dma-debug: don't call __dma_entry_alloc_check_leak() under free_entries_lock
	parisc: sba: Fix compile warning wrt list of SBA devices
	parisc: iosapic.c: Fix sparse warnings
	parisc: drivers: Fix sparse warning
	parisc: irq: Make irq_stack_union static to avoid sparse warning
	scsi: qedf: Add synchronization between I/O completions and abort
	selftests/ftrace: Correctly enable event in instance-event.tc
	ring-buffer: Avoid softlockup in ring_buffer_resize()
	selftests: fix dependency checker script
	ring-buffer: Do not attempt to read past "commit"
	platform/mellanox: mlxbf-bootctl: add NET dependency into Kconfig
	scsi: pm80xx: Use phy-specific SAS address when sending PHY_START command
	scsi: pm80xx: Avoid leaking tags when processing OPC_INB_SET_CONTROLLER_CONFIG command
	ata: libata-eh: do not clear ATA_PFLAG_EH_PENDING in ata_eh_reset()
	spi: nxp-fspi: reset the FLSHxCR1 registers
	bpf: Clarify error expectations from bpf_clone_redirect
	media: vb2: frame_vector.c: replace WARN_ONCE with a comment
	powerpc/watchpoints: Disable preemption in thread_change_pc()
	ncsi: Propagate carrier gain/loss events to the NCSI controller
	fbdev/sh7760fb: Depend on FB=y
	perf build: Define YYNOMEM as YYNOABORT for bison < 3.81
	sched/cpuacct: Fix user/system in shown cpuacct.usage*
	sched/cpuacct: Fix charge percpu cpuusage
	sched/cpuacct: Optimize away RCU read lock
	cgroup: Fix suspicious rcu_dereference_check() usage warning
	ACPI: Check StorageD3Enable _DSD property in ACPI code
	nvme-pci: factor the iod mempool creation into a helper
	nvme-pci: factor out a nvme_pci_alloc_dev helper
	nvme-pci: do not set the NUMA node of device if it has none
	watchdog: iTCO_wdt: No need to stop the timer in probe
	watchdog: iTCO_wdt: Set NO_REBOOT if the watchdog is not already running
	netfilter: nft_exthdr: Search chunks in SCTP packets only
	netfilter: nft_exthdr: Fix for unsafe packet data read
	nvme-pci: always return an ERR_PTR from nvme_pci_alloc_dev
	smack: Record transmuting in smk_transmuted
	smack: Retrieve transmuting information in smack_inode_getsecurity()
	Smack:- Use overlay inode label in smack_inode_copy_up()
	Revert "tty: n_gsm: fix UAF in gsm_cleanup_mux"
	serial: 8250_port: Check IRQ data before use
	nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
	netfilter: nf_tables: disallow rule removal from chain binding
	ALSA: hda: Disable power save for solving pop issue on Lenovo ThinkCentre M70q
	ata: libata-scsi: ignore reserved bits for REPORT SUPPORTED OPERATION CODES
	i2c: i801: unregister tco_pdev in i801_probe() error path
	Revert "SUNRPC dont update timeout value on connection reset"
	proc: nommu: /proc/<pid>/maps: release mmap read lock
	ring-buffer: Update "shortest_full" in polling
	btrfs: properly report 0 avail for very full file systems
	bpf: Fix BTF_ID symbol generation collision
	bpf: Fix BTF_ID symbol generation collision in tools/
	net: thunderbolt: Fix TCPv6 GSO checksum calculation
	ata: libata-core: Fix ata_port_request_pm() locking
	ata: libata-core: Fix port and device removal
	ata: libata-core: Do not register PM operations for SAS ports
	ata: libata-sata: increase PMP SRST timeout to 10s
	fs: binfmt_elf_efpic: fix personality for ELF-FDPIC
	spi: spi-zynqmp-gqspi: Fix runtime PM imbalance in zynqmp_qspi_probe
	spi: zynqmp-gqspi: fix clock imbalance on probe failure
	NFS: Cleanup unused rpc_clnt variable
	NFS: rename nfs_client_kset to nfs_kset
	NFSv4: Fix a state manager thread deadlock regression
	ring-buffer: remove obsolete comment for free_buffer_page()
	ring-buffer: Fix bytes info in per_cpu buffer stats
	drm/mediatek: Fix backport issue in mtk_drm_gem_prime_vmap()
	rbd: move rbd_dev_refresh() definition
	rbd: decouple header read-in from updating rbd_dev->header
	rbd: decouple parent info read-in from updating rbd_dev
	rbd: take header_rwsem in rbd_dev_refresh() only when updating
	block: fix use-after-free of q->q_usage_counter
	Revert "clk: imx: pll14xx: dynamically configure PLL for 393216000/361267200Hz"
	Revert "PCI: qcom: Disable write access to read only registers for IP v2.3.3"
	scsi: zfcp: Fix a double put in zfcp_port_enqueue()
	qed/red_ll2: Fix undefined behavior bug in struct qed_ll2_info
	wifi: mwifiex: Fix tlv_buf_left calculation
	net: replace calls to sock->ops->connect() with kernel_connect()
	net: prevent rewrite of msg_name in sock_sendmsg()
	arm64: Add Cortex-A520 CPU part definition
	ubi: Refuse attaching if mtd's erasesize is 0
	wifi: iwlwifi: dbg_ini: fix structure packing
	wifi: mwifiex: Fix oob check condition in mwifiex_process_rx_packet
	bpf: Fix tr dereferencing
	drivers/net: process the result of hdlc_open() and add call of hdlc_close() in uhdlc_close()
	wifi: mt76: mt76x02: fix MT76x0 external LNA gain handling
	regmap: rbtree: Fix wrong register marked as in-cache when creating new node
	ima: Finish deprecation of IMA_TRUSTED_KEYRING Kconfig
	scsi: target: core: Fix deadlock due to recursive locking
	ima: rework CONFIG_IMA dependency block
	NFSv4: Fix a nfs4_state_manager() race
	modpost: add missing else to the "of" check
	net: fix possible store tearing in neigh_periodic_work()
	ipv4, ipv6: Fix handling of transhdrlen in __ip{,6}_append_data()
	net: dsa: mv88e6xxx: Avoid EEPROM timeout when EEPROM is absent
	net: usb: smsc75xx: Fix uninit-value access in __smsc75xx_read_reg
	net: nfc: llcp: Add lock when modifying device list
	net: ethernet: ti: am65-cpsw: Fix error code in am65_cpsw_nuss_init_tx_chns()
	netfilter: handle the connecting collision properly in nf_conntrack_proto_sctp
	netfilter: nf_tables: nft_set_rbtree: fix spurious insertion failure
	net: stmmac: dwmac-stm32: fix resume on STM32 MCU
	tipc: fix a potential deadlock on &tx->lock
	tcp: fix quick-ack counting to count actual ACKs of new data
	tcp: fix delayed ACKs for MSS boundary condition
	sctp: update transport state when processing a dupcook packet
	sctp: update hb timer immediately after users change hb_interval
	cpupower: add Makefile dependencies for install targets
	dm zoned: free dmz->ddev array in dmz_put_zoned_devices
	RDMA/core: Require admin capabilities to set system parameters
	of: dynamic: Fix potential memory leak in of_changeset_action()
	IB/mlx4: Fix the size of a buffer in add_port_entries()
	gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
	gpio: pxa: disable pinctrl calls for MMP_GPIO
	RDMA/cma: Initialize ib_sa_multicast structure to 0 when join
	RDMA/cma: Fix truncation compilation warning in make_cma_ports
	RDMA/uverbs: Fix typo of sizeof argument
	RDMA/siw: Fix connection failure handling
	RDMA/mlx5: Fix NULL string error
	parisc: Restore __ldcw_align for PA-RISC 2.0 processors
	netfilter: nf_tables: fix kdoc warnings after gc rework
	netfilter: nftables: exthdr: fix 4-byte stack OOB write
	mmc: renesas_sdhi: only reset SCC when its pointer is populated
	xen/events: replace evtchn_rwlock with RCU
	Linux 5.10.198

Change-Id: Iabfdf919ae63e41a565e523087d800ebc20e5448
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-10-26 17:54:23 +00:00
commit 3e7bedcc0e
211 changed files with 2761 additions and 1597 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 197
SUBLEVEL = 198
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -100,11 +100,12 @@ panel-info {
};
pwm7: dmtimer-pwm {
guardian_beeper: pwm-7 {
compatible = "ti,omap-dmtimer-pwm";
#pwm-cells = <3>;
ti,timers = <&timer7>;
pinctrl-names = "default";
pinctrl-0 = <&dmtimer7_pins>;
pinctrl-0 = <&guardian_beeper_pins>;
ti,clock-source = <0x01>;
};
@ -343,9 +344,9 @@ AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
>;
};
dmtimer7_pins: pinmux_dmtimer7_pins {
guardian_beeper_pins: pinmux_dmtimer7_pins {
pinctrl-single,pins = <
AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5)
AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5) /* (E18) timer7 */
>;
};

View File

@ -150,7 +150,7 @@ bl: backlight {
enable-gpios = <&gpio6 22 GPIO_ACTIVE_HIGH>; /* gpio_182 */
};
pwm11: dmtimer-pwm@11 {
pwm11: pwm-11 {
compatible = "ti,omap-dmtimer-pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm_pins>;

View File

@ -59,7 +59,7 @@ led2 {
};
};
pwm10: dmtimer-pwm {
pwm10: pwm-10 {
compatible = "ti,omap-dmtimer-pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm_pins>;

View File

@ -156,7 +156,7 @@ soundcard {
dais = <&mcbsp2_port>, <&mcbsp3_port>;
};
pwm8: dmtimer-pwm-8 {
pwm8: pwm-8 {
pinctrl-names = "default";
pinctrl-0 = <&vibrator_direction_pin>;
@ -166,7 +166,7 @@ pwm8: dmtimer-pwm-8 {
ti,clock-source = <0x01>;
};
pwm9: dmtimer-pwm-9 {
pwm9: pwm-9 {
pinctrl-names = "default";
pinctrl-0 = <&vibrator_enable_pin>;
@ -192,6 +192,29 @@ backlight: backlight {
};
};
&cpu_thermal {
polling-delay = <10000>; /* milliseconds */
};
&cpu_alert0 {
temperature = <80000>; /* millicelsius */
};
&cpu0 {
/*
* Note that the 1.2GiHz mode is enabled for all SoC variants for
* the Motorola Android Linux v3.0.8 based kernel.
*/
operating-points = <
/* kHz uV */
300000 1025000
600000 1200000
800000 1313000
1008000 1375000
1200000 1375000
>;
};
&dss {
status = "okay";
};
@ -384,7 +407,7 @@ &mmc3 {
#address-cells = <1>;
#size-cells = <0>;
wlcore: wlcore@2 {
compatible = "ti,wl1285", "ti,wl1283";
compatible = "ti,wl1285";
reg = <2>;
/* gpio_100 with gpmc_wait2 pad as wakeirq */
interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
@ -716,12 +739,12 @@ &rng_target {
/* Configure pwm clock source for timers 8 & 9 */
&timer8 {
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
assigned-clock-parents = <&sys_clkin_ck>;
assigned-clock-parents = <&sys_32k_ck>;
};
&timer9 {
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
assigned-clock-parents = <&sys_clkin_ck>;
assigned-clock-parents = <&sys_32k_ck>;
};
/*

View File

@ -8,9 +8,9 @@
/ {
vddvario: regulator-vddvario {
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
};
vdd33a: regulator-vdd33a {

View File

@ -12,9 +12,9 @@
/ {
vddvario: regulator-vddvario {
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
};
vdd33a: regulator-vdd33a {

View File

@ -11,12 +11,12 @@ / {
model = "CompuLab CM-T3517";
compatible = "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
wl12xx_vmmc2: wl12xx_vmmc2 {
compatible = "regulator-fixed";

View File

@ -15,8 +15,7 @@ cpu_thermal: cpu_thermal {
polling-delay = <1000>; /* milliseconds */
coefficients = <0 20000>;
/* sensor ID */
thermal-sensors = <&bandgap 0>;
thermal-sensors = <&bandgap>;
cpu_trips: trips {
cpu_alert0: cpu_alert {

View File

@ -147,7 +147,7 @@ backlight: backlight {
pinctrl-0 = <&backlight_pins>;
};
pwm11: dmtimer-pwm {
pwm11: pwm-11 {
compatible = "ti,omap-dmtimer-pwm";
ti,timers = <&timer11>;
#pwm-cells = <3>;
@ -332,7 +332,7 @@ OMAP3_CORE1_IOPAD(0x2106, PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
>;
};
};
gps_pins: pinmux_gps_pins {
pinctrl-single,pins = <
@ -866,8 +866,8 @@ &mcbsp4 { /* GSM voice PCM */
};
&hdqw1w {
pinctrl-names = "default";
pinctrl-0 = <&hdq_pins>;
pinctrl-names = "default";
pinctrl-0 = <&hdq_pins>;
};
/* image signal processor within OMAP3 SoC */

View File

@ -301,5 +301,5 @@ &usb_otg_hs {
&vaux1 {
/* Needed for ads7846 */
regulator-name = "vcc";
regulator-name = "vcc";
};

View File

@ -156,7 +156,7 @@ battery: n900-battery {
io-channel-names = "temp", "bsi", "vbat";
};
pwm9: dmtimer-pwm {
pwm9: pwm-9 {
compatible = "ti,omap-dmtimer-pwm";
#pwm-cells = <3>;
ti,timers = <&timer9>;
@ -236,27 +236,27 @@ gpmc_pins: pinmux_gpmc_pins {
pinctrl-single,pins = <
/* address lines */
OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0) /* gpmc_a1.gpmc_a1 */
OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0) /* gpmc_a2.gpmc_a2 */
OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0) /* gpmc_a3.gpmc_a3 */
OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0) /* gpmc_a1.gpmc_a1 */
OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0) /* gpmc_a2.gpmc_a2 */
OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0) /* gpmc_a3.gpmc_a3 */
/* data lines, gpmc_d0..d7 not muxable according to TRM */
OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0) /* gpmc_d8.gpmc_d8 */
OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0) /* gpmc_d9.gpmc_d9 */
OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0) /* gpmc_d10.gpmc_d10 */
OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0) /* gpmc_d11.gpmc_d11 */
OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0) /* gpmc_d12.gpmc_d12 */
OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0) /* gpmc_d13.gpmc_d13 */
OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0) /* gpmc_d14.gpmc_d14 */
OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0) /* gpmc_d15.gpmc_d15 */
OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0) /* gpmc_d8.gpmc_d8 */
OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0) /* gpmc_d9.gpmc_d9 */
OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0) /* gpmc_d10.gpmc_d10 */
OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0) /* gpmc_d11.gpmc_d11 */
OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0) /* gpmc_d12.gpmc_d12 */
OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0) /* gpmc_d13.gpmc_d13 */
OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0) /* gpmc_d14.gpmc_d14 */
OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0) /* gpmc_d15.gpmc_d15 */
/*
* gpmc_ncs0, gpmc_nadv_ale, gpmc_noe, gpmc_nwe, gpmc_wait0 not muxable
* according to TRM. OneNAND seems to require PIN_INPUT on clock.
*/
OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ncs1.gpmc_ncs1 */
OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0) /* gpmc_clk.gpmc_clk */
>;
OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ncs1.gpmc_ncs1 */
OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0) /* gpmc_clk.gpmc_clk */
>;
};
i2c1_pins: pinmux_i2c1_pins {
@ -738,12 +738,12 @@ tpa6130a2: tpa6130a2@60 {
si4713: si4713@63 {
compatible = "silabs,si4713";
reg = <0x63>;
reg = <0x63>;
interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */
reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */
vio-supply = <&vio>;
vdd-supply = <&vaux1>;
interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */
reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */
vio-supply = <&vio>;
vdd-supply = <&vaux1>;
};
bq24150a: bq24150a@6b {

View File

@ -23,9 +23,9 @@ memory@80000000 {
};
vddvario: regulator-vddvario {
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
compatible = "regulator-fixed";
regulator-name = "vddvario";
regulator-always-on;
};
vdd33a: regulator-vdd33a {
@ -84,28 +84,28 @@ OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0) /* uart1_cts.uart1_cts */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0) /* uart1_rts.uart1_rts */
OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0) /* uart1_tx.uart1_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0) /* uart1_cts.uart1_cts */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0) /* uart1_rts.uart1_rts */
OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0) /* uart1_tx.uart1_tx */
>;
};
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
>;
};
uart3_pins: pinmux_uart3_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
>;
};
@ -205,22 +205,22 @@ wlcore: wlcore@2 {
};
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
};
&uart4 {
status = "disabled";
status = "disabled";
};
&usb_otg_hs {

View File

@ -15,21 +15,24 @@ cpu_thermal: cpu_thermal {
polling-delay-passive = <250>; /* milliseconds */
polling-delay = <1000>; /* milliseconds */
/* sensor ID */
thermal-sensors = <&bandgap 0>;
/*
* See 44xx files for single sensor addressing, omap5 and dra7 need
* also sensor ID for addressing.
*/
thermal-sensors = <&bandgap 0>;
cpu_trips: trips {
cpu_alert0: cpu_alert {
temperature = <100000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
type = "passive";
};
cpu_crit: cpu_crit {
temperature = <125000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
type = "critical";
};
};
cpu_alert0: cpu_alert {
temperature = <100000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
type = "passive";
};
cpu_crit: cpu_crit {
temperature = <125000>; /* millicelsius */
hysteresis = <2000>; /* millicelsius */
type = "critical";
};
};
cpu_cooling_maps: cooling-maps {
map0 {

View File

@ -72,6 +72,7 @@ abb_iva: regulator-abb-iva {
};
&cpu_thermal {
thermal-sensors = <&bandgap>;
coefficients = <0 20000>;
};

View File

@ -89,6 +89,7 @@ abb_iva: regulator-abb-iva {
};
&cpu_thermal {
thermal-sensors = <&bandgap>;
coefficients = <348 (-9301)>;
};

View File

@ -84,36 +84,36 @@ led1 {
};
lcd0: display {
compatible = "startek,startek-kd050c", "panel-dpi";
label = "lcd";
compatible = "startek,startek-kd050c", "panel-dpi";
label = "lcd";
pinctrl-names = "default";
pinctrl-0 = <&lcd_pins>;
pinctrl-names = "default";
pinctrl-0 = <&lcd_pins>;
enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>;
enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>;
panel-timing {
clock-frequency = <33000000>;
hactive = <800>;
vactive = <480>;
hfront-porch = <40>;
hback-porch = <40>;
hsync-len = <43>;
vback-porch = <29>;
vfront-porch = <13>;
vsync-len = <3>;
hsync-active = <0>;
vsync-active = <0>;
de-active = <1>;
pixelclk-active = <1>;
};
panel-timing {
clock-frequency = <33000000>;
hactive = <800>;
vactive = <480>;
hfront-porch = <40>;
hback-porch = <40>;
hsync-len = <43>;
vback-porch = <29>;
vfront-porch = <13>;
vsync-len = <3>;
hsync-active = <0>;
vsync-active = <0>;
de-active = <1>;
pixelclk-active = <1>;
};
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_lcd_out>;
};
};
};
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_lcd_out>;
};
};
};
hdmi0: connector0 {
compatible = "hdmi-connector";
@ -644,8 +644,8 @@ &usbhsehci {
};
&usb3 {
extcon = <&extcon_usb3>;
vbus-supply = <&smps10_out1_reg>;
extcon = <&extcon_usb3>;
vbus-supply = <&smps10_out1_reg>;
};
&cpu0 {

View File

@ -79,6 +79,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
@ -130,6 +131,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)

View File

@ -164,6 +164,7 @@ static struct platform_device db1x00_audio_dev = {
/******************************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
@ -369,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
@ -432,8 +434,10 @@ static struct platform_device *db1x00_devs[] = {
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
#endif
};
int __init db1000_dev_setup(void)

View File

@ -326,6 +326,7 @@ static struct platform_device db1200_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
@ -584,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -751,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = {
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
@ -762,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = {
};
static struct platform_device *pb1200_devs[] __initdata = {
#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
#endif
};
/* Some peripheral base addresses differ on the PB1200 */

View File

@ -450,6 +450,7 @@ static struct platform_device db1300_ide_dev = {
/**********************************************************************/
#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
@ -632,6 +633,7 @@ static struct platform_device db1300_sd0_dev = {
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@ -776,8 +778,10 @@ static struct platform_device *db1300_dev[] __initdata = {
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,

View File

@ -2,14 +2,28 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
for the semaphore. */
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is implemented, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd).
Although the cache control hint is accepted by all PA 2.0 processors,
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
require 16-byte alignment. If the address is unaligned, the operation
of the instruction is undefined. The ldcw instruction does not generate
unaligned data reference traps so misaligned accesses are not detected.
This hid the problem for years. So, restore the 16-byte alignment dropped
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
@ -19,22 +33,12 @@
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
#define __LDCW "ldcw"
#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is specified, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
#endif /*!CONFIG_PA20*/
#else
#define __LDCW "ldcw"
#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload

View File

@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
extern struct sba_device *sba_list;
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804

View File

@ -3,13 +3,8 @@
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;

View File

@ -925,9 +925,9 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p

View File

@ -388,7 +388,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif

View File

@ -479,11 +479,13 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
struct arch_hw_breakpoint *info;
int i;
preempt_disable();
for (i = 0; i < nr_wp_slots(); i++) {
if (unlikely(tsk->thread.last_hit_ubp[i]))
goto reset;
}
return;
goto out;
reset:
regs->msr &= ~MSR_SE;
@ -492,6 +494,9 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
__set_breakpoint(i, info);
tsk->thread.last_hit_ubp[i] = NULL;
}
out:
preempt_enable();
}
static bool is_larx_stcx_instr(int type)

View File

@ -1410,7 +1410,7 @@ static int h_24x7_event_init(struct perf_event *event)
}
domain = event_get_domain(event);
if (domain >= HV_PERF_DOMAIN_MAX) {
if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain);
return -EINVAL;
}

View File

@ -2344,7 +2344,7 @@ static void __init srso_select_mitigation(void)
switch (srso_cmd) {
case SRSO_CMD_OFF:
return;
goto pred_cmd;
case SRSO_CMD_MICROCODE:
if (has_microcode) {
@ -2622,7 +2622,7 @@ static ssize_t srso_show_state(char *buf)
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,

View File

@ -9,8 +9,7 @@
# KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
HOSTFLAGS += -Iarch/$(ARCH)/boot/include
KBUILD_CFLAGS += -fno-builtin
BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")

View File

@ -4,13 +4,14 @@
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
void exit (void)
static void exit(void)
{
for (;;);
}
void *zalloc(unsigned size)
static void *zalloc(unsigned int size)
{
void *p = avail_ram;

View File

@ -6,6 +6,10 @@
#include <variant/core.h>
#ifndef XCHAL_HAVE_DIV32
#define XCHAL_HAVE_DIV32 0
#endif
#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif

View File

@ -204,7 +204,7 @@ static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
}
unsigned short tuntap_protocol(struct sk_buff *skb)
static unsigned short tuntap_protocol(struct sk_buff *skb)
{
return eth_type_trans(skb, skb->dev);
}
@ -477,7 +477,7 @@ static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
void iss_net_user_timer_expire(struct timer_list *unused)
static void iss_net_user_timer_expire(struct timer_list *unused)
{
}

View File

@ -420,8 +420,6 @@ void blk_cleanup_queue(struct request_queue *q)
blk_mq_sched_free_requests(q);
mutex_unlock(&q->sysfs_lock);
percpu_ref_exit(&q->q_usage_counter);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}

View File

@ -726,6 +726,8 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
struct request_queue *q = container_of(rcu_head, struct request_queue,
rcu_head);
percpu_ref_exit(&q->q_usage_counter);
kmem_cache_free(blk_requestq_cachep, q);
}

View File

@ -1326,4 +1326,33 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
/**
* acpi_storage_d3 - Check if D3 should be used in the suspend path
* @dev: Device to check
*
* Return %true if the platform firmware wants @dev to be programmed
* into D3hot or D3cold (if supported) in the suspend path, or %false
* when there is no specific preference. On some platforms, if this
* hint is ignored, @dev may remain unresponsive after suspending the
* platform as a whole.
*
* Although the property has storage in the name it actually is
* applied to the PCIe slot and plugging in a non-storage device the
* same platform restrictions will likely apply.
*/
bool acpi_storage_d3(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u8 val;
if (!adev)
return false;
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
&val))
return false;
return val == 1;
}
EXPORT_SYMBOL_GPL(acpi_storage_d3);
#endif /* CONFIG_PM */

View File

@ -50,7 +50,8 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
board_ahci_mobile,
board_ahci_low_power,
board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@ -135,13 +136,20 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_mobile] = {
[board_ahci_low_power] = {
AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_no_debounce_delay] = {
.flags = AHCI_FLAG_COMMON,
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@ -268,13 +276,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@ -284,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
@ -309,9 +317,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@ -320,29 +328,29 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
{ PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@ -374,26 +382,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
{ PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@ -410,13 +418,15 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
{ PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@ -442,8 +452,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
@ -703,7 +714,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
@ -1495,7 +1506,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
u32 irq_stat, irq_masked;
unsigned int handled = 1;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
@ -1512,7 +1522,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}

View File

@ -332,7 +332,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = {
static const struct ata_port_info ahci_brcm_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
.link_flags = ATA_LFLAG_NO_DB_DELAY,
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_brcm_platform_ops,

View File

@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}

View File

@ -1199,6 +1199,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy);
}
static void ahci_port_clear_pending_irq(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
}
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
@ -1213,18 +1233,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
@ -1251,10 +1260,10 @@ void ahci_init_controller(struct ata_host *host)
}
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
}
EXPORT_SYMBOL_GPL(ahci_init_controller);
@ -1554,6 +1563,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_port_clear_pending_irq(ap);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
@ -1905,8 +1916,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
VPRINTK("ENTER\n");
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
@ -1914,8 +1923,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
ahci_handle_port_interrupt(ap, port_mmio, status);
spin_unlock(ap->lock);
VPRINTK("EXIT\n");
return IRQ_HANDLED;
}
@ -1932,9 +1939,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_warn(host->dev,
"interrupt on disabled port %u\n", i);
@ -1955,8 +1960,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
@ -1984,8 +1987,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}

View File

@ -4974,17 +4974,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
/* request PM ops to EH */
spin_lock_irqsave(ap->lock, flags);
/*
* A previous PM operation might still be in progress. Wait for
* ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
spin_lock_irqsave(ap->lock, flags);
}
/* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@ -4996,10 +4998,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
if (!async) {
if (!async)
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
}
/*
@ -5167,7 +5167,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
.name = "ata_port",
.name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@ -5915,11 +5915,30 @@ static void ata_port_detach(struct ata_port *ap)
if (!ap->ops->error_handler)
goto skip_eh;
/* tell EH we're leaving & flush EH */
/* Wait for any ongoing EH */
ata_port_wait_eh(ap);
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* Remove scsi devices */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
if (dev->sdev) {
spin_unlock_irqrestore(ap->lock, flags);
scsi_remove_device(dev->sdev);
spin_lock_irqsave(ap->lock, flags);
dev->sdev = NULL;
}
}
}
/* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);

View File

@ -2703,18 +2703,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
postreset(slave, classes);
}
/*
* Some controllers can't be frozen very well and may set spurious
* error conditions during reset. Clear accumulated error
* information and re-thaw the port if frozen. As reset is the
* final recovery action and we cross check link onlineness against
* device classification later, no hotplug event is lost by this.
*/
/* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info));
link->eh_info.serror = 0;
if (slave)
memset(&slave->eh_info, 0, sizeof(link->eh_info));
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
if (ap->pflags & ATA_PFLAG_FROZEN)

View File

@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
* immediately after resuming. Delay 200ms before
* debouncing.
*/
if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
ata_msleep(link->ap, 200);
/* is SControl restored correctly? */

View File

@ -4259,7 +4259,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);

View File

@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
static const struct device_type ata_port_sas_type = {
.name = ATA_PORT_TYPE_NAME,
};
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
dev->type = &ata_port_type;
if (ap->flags & ATA_FLAG_SAS_HOST)
dev->type = &ata_port_sas_type;
else
dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);

View File

@ -30,6 +30,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
#define ATA_PORT_TYPE_NAME "ata_port"
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;

View File

@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
(reg - rbnode->base_reg) / map->reg_stride,
value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}

View File

@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@ -1047,15 +1046,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
static void rbd_image_header_cleanup(struct rbd_image_header *header)
{
kfree(header->object_prefix);
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
memset(header, 0, sizeof(*header));
}
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
bool first_time)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@ -1122,11 +1130,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
rbd_init_layout(rbd_dev);
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@ -4916,7 +4919,9 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@ -4964,7 +4969,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
ret = rbd_header_from_disk(rbd_dev, ondisk);
ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@ -4989,39 +4994,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
int ret;
down_write(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
ret = rbd_dev_header_info(rbd_dev);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto out;
}
rbd_assert(!rbd_is_snap(rbd_dev));
rbd_dev->mapping.size = rbd_dev->header.image_size;
out:
up_write(&rbd_dev->header_rwsem);
if (!ret && mapping_size != rbd_dev->mapping.size)
rbd_dev_update_size(rbd_dev);
return ret;
}
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
@ -5576,17 +5548,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.obj_order,
&rbd_dev->header.image_size);
}
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
char **pobject_prefix)
{
size_t size;
void *reply_buf;
char *object_prefix;
int ret;
void *p;
@ -5604,16 +5571,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p + ret, NULL, GFP_NOIO);
object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
GFP_NOIO);
if (IS_ERR(object_prefix)) {
ret = PTR_ERR(object_prefix);
goto out;
}
ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
*pobject_prefix = object_prefix;
dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@ -5664,13 +5631,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
rbd_is_ro(rbd_dev),
&rbd_dev->header.features);
}
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@ -5707,6 +5667,14 @@ struct parent_image_info {
u64 overlap;
};
static void rbd_parent_info_cleanup(struct parent_image_info *pii)
{
kfree(pii->pool_ns);
kfree(pii->image_id);
memset(pii, 0, sizeof(*pii));
}
/*
* The caller is responsible for @pii.
*/
@ -5776,6 +5744,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
@ -5814,14 +5785,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
static int get_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@ -5849,7 +5823,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@ -5859,37 +5833,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
ret = get_parent_info(rbd_dev, &pii);
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
pii.has_overlap, pii.overlap);
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
}
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
}
/* The ceph file layout needs to fit pool id in 32 bits */
@ -5901,58 +5850,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
* The parent won't change (except when the clone is
* flattened, already handled that). So we only need to
* record the parent spec we have not already done so.
* The parent won't change except when the clone is flattened,
* so we only need to record the parent image spec once.
*/
if (!rbd_dev->parent_spec) {
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_assert(!rbd_dev->parent_spec);
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
/*
* We always update the parent overlap. If it's zero we issue
* a warning, as we will proceed as if there was no parent.
* Record the parent overlap. If it's zero, issue a warning as
* we will proceed as if there is no parent.
*/
if (!pii.overlap) {
if (parent_spec) {
/* refresh, careful to warn just once */
if (rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone now standalone (overlap became 0)");
} else {
/* initial probe */
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
}
}
if (!pii.overlap)
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
kfree(pii.pool_ns);
kfree(pii.image_id);
rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@ -5964,27 +5901,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
p = &striping_info_buf;
rbd_dev->header.stripe_unit = ceph_decode_64(&p);
rbd_dev->header.stripe_count = ceph_decode_64(&p);
*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
*stripe_count);
return 0;
}
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
__le64 data_pool_id;
__le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
NULL, 0, &data_pool_id, sizeof(data_pool_id));
NULL, 0, &data_pool_buf,
sizeof(data_pool_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof(data_pool_id))
if (ret < sizeof(data_pool_buf))
return -EBADMSG;
rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
*data_pool_id = le64_to_cpu(data_pool_buf);
dout(" data_pool_id = %lld\n", *data_pool_id);
WARN_ON(*data_pool_id == CEPH_NOPOOL);
return 0;
}
@ -6176,7 +6119,8 @@ static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
return ret;
}
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@ -6237,9 +6181,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
*psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@ -6288,38 +6230,42 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
return snap_name;
}
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
ret = rbd_dev_v2_image_size(rbd_dev);
ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
first_time ? &header->obj_order : NULL,
&header->image_size);
if (ret)
return ret;
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret && first_time) {
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
}
ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
if (ret)
return ret;
return ret;
return 0;
}
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
return rbd_dev_v1_header_info(rbd_dev);
return rbd_dev_v1_header_info(rbd_dev, header, first_time);
return rbd_dev_v2_header_info(rbd_dev);
return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@ -6806,60 +6752,49 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
header = &rbd_dev->header;
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
kfree(header->object_prefix);
memset(header, 0, sizeof (*header));
rbd_image_header_cleanup(&rbd_dev->header);
}
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
int ret;
ret = rbd_dev_v2_object_prefix(rbd_dev);
ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
goto out_err;
return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
ret = rbd_dev_v2_features(rbd_dev);
ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
rbd_is_ro(rbd_dev), &header->features);
if (ret)
goto out_err;
return ret;
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev);
if (header->features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
&header->stripe_count);
if (ret)
goto out_err;
return ret;
}
rbd_init_layout(rbd_dev);
return 0;
if (header->features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
return ret;
}
out_err:
rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
return ret;
return 0;
}
/*
@ -7054,13 +6989,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
rbd_init_layout(rbd_dev);
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@ -7089,7 +7026,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@ -7115,6 +7052,107 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
return ret;
}
static void rbd_dev_update_header(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
if (rbd_dev->header.image_size != header->image_size) {
rbd_dev->header.image_size = header->image_size;
if (!rbd_is_snap(rbd_dev)) {
rbd_dev->mapping.size = header->image_size;
rbd_dev_update_size(rbd_dev);
}
}
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = header->snapc;
header->snapc = NULL;
if (rbd_dev->image_format == 1) {
kfree(rbd_dev->header.snap_names);
rbd_dev->header.snap_names = header->snap_names;
header->snap_names = NULL;
kfree(rbd_dev->header.snap_sizes);
rbd_dev->header.snap_sizes = header->snap_sizes;
header->snap_sizes = NULL;
}
}
static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone has been flattened\n",
rbd_dev->disk->disk_name);
}
} else {
rbd_assert(rbd_dev->parent_spec);
/*
* Update the parent overlap. If it became zero, issue
* a warning as we will proceed as if there is no parent.
*/
if (!pii->overlap && rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone has become standalone (overlap 0)");
rbd_dev->parent_overlap = pii->overlap;
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
struct rbd_image_header header = { 0 };
struct parent_image_info pii = { 0 };
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &header, false);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out;
}
down_write(&rbd_dev->header_rwsem);
rbd_dev_update_header(rbd_dev, &header);
if (rbd_dev->parent)
rbd_dev_update_parent(rbd_dev, &pii);
up_write(&rbd_dev->header_rwsem);
out:
rbd_parent_info_cleanup(&pii);
rbd_image_header_cleanup(&header);
return ret;
}
static ssize_t do_rbd_add(struct bus_type *bus,
const char *buf,
size_t count)

View File

@ -38,6 +38,7 @@ enum sysc_soc {
SOC_2420,
SOC_2430,
SOC_3430,
SOC_AM35,
SOC_3630,
SOC_4430,
SOC_4460,
@ -1113,6 +1114,11 @@ static int sysc_enable_module(struct device *dev)
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
/* Clear WAKEUP */
if (regbits->enwkup_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
reg &= ~BIT(regbits->enwkup_shift);
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
@ -1233,6 +1239,13 @@ static int sysc_disable_module(struct device *dev)
}
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
/* Set WAKEUP */
if (regbits->enwkup_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
reg |= BIT(regbits->enwkup_shift);
}
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
@ -1496,16 +1509,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@ -1818,7 +1831,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
__func__, val, irq_mask);
if (sysc_soc->soc == SOC_3430) {
if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
/* Clear DSS_SDI_CONTROL */
sysc_write(ddata, 0x44, 0);
@ -2085,8 +2098,7 @@ static int sysc_reset(struct sysc *ddata)
}
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
@ -2960,6 +2972,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
SOC_FLAG("OMAP443*", SOC_4430),
@ -3147,7 +3160,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
if (sysc_soc->soc == SOC_3430)
if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
error = -ENXIO;
else
error = -EBUSY;

View File

@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;

View File

@ -60,6 +60,8 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};
struct imx_pll14xx_clk imx_1443x_pll = {

View File

@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
return err;
return 0;
return response.rate;
}

View File

@ -966,7 +966,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
return pinctrl_gpio_set_config(offset, config);
return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */

View File

@ -338,6 +338,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
pmic_eic->chip.can_sleep = true;
pmic_eic->intc.name = dev_name(&pdev->dev);
pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;

View File

@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
case MMP_GPIO:
return false;
default:

View File

@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
return ret;
goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
err_remove_domain:
irq_domain_remove(tb10x_gpio->domain);
return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)

View File

@ -1907,6 +1907,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}

View File

@ -675,6 +675,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
{
struct i2c_msg *msgs;
int msgs_num;
bool do_complete = false;
msgs = bus->msgs;
msgs_num = bus->msgs_num;
@ -701,23 +702,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
msgs[1].flags & I2C_M_RD)
msgs[1].len = info;
}
if (completion_done(&bus->cmd_complete) == false)
complete(&bus->cmd_complete);
break;
do_complete = true;
break;
case I2C_NACK_IND:
/* MASTER transmit got a NACK before tx all bytes */
bus->cmd_err = -ENXIO;
if (bus->master_or_slave == I2C_MASTER)
complete(&bus->cmd_complete);
do_complete = true;
break;
case I2C_BUS_ERR_IND:
/* Bus error */
bus->cmd_err = -EAGAIN;
if (bus->master_or_slave == I2C_MASTER)
complete(&bus->cmd_complete);
do_complete = true;
break;
case I2C_WAKE_UP_IND:
/* I2C wake up */
@ -731,6 +726,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
if (bus->slave)
bus->master_or_slave = I2C_SLAVE;
#endif
if (do_complete)
complete(&bus->cmd_complete);
}
static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)

View File

@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
if (!props[i].name || !props[i].value) {
err = -ENOMEM;
goto err_rollback;
}
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);

View File

@ -4723,7 +4723,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
struct ib_sa_multicast ib;
struct ib_sa_multicast ib = {};
enum ib_gid_type gid_type;
bool send_only;

View File

@ -221,7 +221,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
for (i = 0; i < ports_num; i++) {
char port_str[10];
char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);

View File

@ -2148,6 +2148,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,

View File

@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
if (count < method_elm->req_size + sizeof(hdr)) {
if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't

View File

@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[11];
char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;

View File

@ -2069,7 +2069,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
return NULL;
return "Unknown";
}
}

View File

@ -973,6 +973,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@ -1097,9 +1098,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
siw_dbg_cep(cep, "no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
if (cep->listen_cep) {
siw_dbg_cep(cep,
"no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
}
}
}
release_cep = 1;
@ -1222,7 +1226,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
siw_dbg_cep(cep, "state: %d\n", cep->state);
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
cep->state, sk->sk_state);
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _I8042_X86IA64IO_H
#define _I8042_X86IA64IO_H
#ifndef _I8042_ACPIPNPIO_H
#define _I8042_ACPIPNPIO_H
#ifdef CONFIG_X86
@ -1184,6 +1184,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
},
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
@ -1587,4 +1594,4 @@ static inline void i8042_platform_exit(void)
i8042_pnp_exit();
}
#endif /* _I8042_X86IA64IO_H */
#endif /* _I8042_ACPIPNPIO_H */

View File

@ -20,7 +20,7 @@
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
#include "i8042-x86ia64io.h"
#include "i8042-acpipnpio.h"
#else
#include "i8042-io.h"
#endif

View File

@ -750,17 +750,16 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
/*
* Cleanup zoned device information.
*/
static void dmz_put_zoned_device(struct dm_target *ti)
static void dmz_put_zoned_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
int i;
for (i = 0; i < dmz->nr_ddevs; i++) {
if (dmz->ddev[i]) {
for (i = 0; i < dmz->nr_ddevs; i++)
if (dmz->ddev[i])
dm_put_device(ti, dmz->ddev[i]);
dmz->ddev[i] = NULL;
}
}
kfree(dmz->ddev);
}
static int dmz_fixup_devices(struct dm_target *ti)
@ -951,7 +950,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
err:
kfree(dmz->dev);
kfree(dmz);
@ -982,7 +981,7 @@ static void dmz_dtr(struct dm_target *ti)
bioset_exit(&dmz->bio_set);
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
mutex_destroy(&dmz->chunk_lock);

View File

@ -21,6 +21,7 @@
#include "core.h"
#include "firmware.h"
#include "pm_helpers.h"
#include "hfi_venus_io.h"
static void venus_event_notify(struct venus_core *core, u32 event)
{
@ -210,6 +211,15 @@ static int venus_enumerate_codecs(struct venus_core *core, u32 type)
return ret;
}
static void venus_assign_register_offsets(struct venus_core *core)
{
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE;
core->cpu_cs_base = core->base + CPU_CS_BASE;
core->cpu_ic_base = core->base + CPU_IC_BASE;
core->wrapper_base = core->base + WRAPPER_BASE;
}
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -276,6 +286,8 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_core_put;
venus_assign_register_offsets(core);
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
goto err_core_deinit;

View File

@ -119,6 +119,11 @@ struct venus_caps {
* struct venus_core - holds core parameters valid for all instances
*
* @base: IO memory base address
* @vbif_base IO memory vbif base address
* @cpu_base IO memory cpu base address
* @cpu_cs_base IO memory cpu_cs base address
* @cpu_ic_base IO memory cpu_ic base address
* @wrapper_base IO memory wrapper base address
* @irq: Venus irq
* @clks: an array of struct clk pointers
* @vcodec0_clks: an array of vcodec0 struct clk pointers
@ -152,6 +157,11 @@ struct venus_caps {
*/
struct venus_core {
void __iomem *base;
void __iomem *vbif_base;
void __iomem *cpu_base;
void __iomem *cpu_cs_base;
void __iomem *cpu_ic_base;
void __iomem *wrapper_base;
int irq;
struct clk *clks[VIDC_CLKS_NUM_MAX];
struct clk *vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX];
@ -416,6 +426,7 @@ struct venus_inst {
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
#define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX)
#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX)
#define IS_V6(core) ((core)->res->hfi_version == HFI_VERSION_6XX)
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)

View File

@ -27,19 +27,19 @@
static void venus_reset_cpu(struct venus_core *core)
{
u32 fw_size = core->fw.mapped_mem_size;
void __iomem *base = core->base;
void __iomem *wrapper_base = core->wrapper_base;
writel(0, base + WRAPPER_FW_START_ADDR);
writel(fw_size, base + WRAPPER_FW_END_ADDR);
writel(0, base + WRAPPER_CPA_START_ADDR);
writel(fw_size, base + WRAPPER_CPA_END_ADDR);
writel(fw_size, base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, base + WRAPPER_NONPIX_END_ADDR);
writel(0x0, base + WRAPPER_CPU_CGC_DIS);
writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG);
writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
/* Bring ARM9 out of reset */
writel(0, base + WRAPPER_A9SS_SW_RESET);
writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
}
int venus_set_hw_state(struct venus_core *core, bool resume)
@ -56,7 +56,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume)
venus_reset_cpu(core);
else
writel(1, core->base + WRAPPER_A9SS_SW_RESET);
writel(1, core->wrapper_base + WRAPPER_A9SS_SW_RESET);
return 0;
}
@ -159,12 +159,12 @@ static int venus_shutdown_no_tz(struct venus_core *core)
size_t unmapped;
u32 reg;
struct device *dev = core->fw.dev;
void __iomem *base = core->base;
void __iomem *wrapper_base = core->wrapper_base;
/* Assert the reset to ARM9 */
reg = readl_relaxed(base + WRAPPER_A9SS_SW_RESET);
reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
reg |= WRAPPER_A9SS_SW_RESET_BIT;
writel_relaxed(reg, base + WRAPPER_A9SS_SW_RESET);
writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
/* Make sure reset is asserted before the mapping is removed */
mb();

View File

@ -345,16 +345,6 @@ static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
}
static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value)
{
writel(value, hdev->core->base + reg);
}
static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg)
{
return readl(hdev->core->base + reg);
}
static void venus_set_registers(struct venus_hfi_device *hdev)
{
const struct venus_resources *res = hdev->core->res;
@ -363,12 +353,14 @@ static void venus_set_registers(struct venus_hfi_device *hdev)
unsigned int i;
for (i = 0; i < count; i++)
venus_writel(hdev, tbl[i].reg, tbl[i].value);
writel(tbl[i].value, hdev->core->base + tbl[i].reg);
}
static void venus_soft_int(struct venus_hfi_device *hdev)
{
venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT));
void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
writel(BIT(CPU_IC_SOFTINT_H2A_SHIFT), cpu_ic_base + CPU_IC_SOFTINT);
}
static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
@ -439,16 +431,25 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
static const unsigned int max_tries = 100;
u32 ctrl_status = 0;
u32 ctrl_status = 0, mask_val;
unsigned int count = 0;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
void __iomem *wrapper_base = hdev->core->wrapper_base;
int ret = 0;
venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT));
venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK);
venus_writel(hdev, CPU_CS_SCIACMDARG3, 1);
if (IS_V6(hdev->core)) {
mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
WRAPPER_INTR_MASK_A2HCPU_MASK);
} else {
mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
}
writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
while (!ctrl_status && count < max_tries) {
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
dev_err(dev, "invalid setting for UC_REGION\n");
ret = -EINVAL;
@ -462,15 +463,20 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
if (count >= max_tries)
ret = -ETIMEDOUT;
if (IS_V6(hdev->core))
writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
return ret;
}
static u32 venus_hwversion(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION);
void __iomem *wrapper_base = hdev->core->wrapper_base;
u32 ver;
u32 major, minor, step;
ver = readl(wrapper_base + WRAPPER_HW_VERSION);
major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
@ -485,6 +491,7 @@ static u32 venus_hwversion(struct venus_hfi_device *hdev)
static int venus_run(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
int ret;
/*
@ -493,12 +500,12 @@ static int venus_run(struct venus_hfi_device *hdev)
*/
venus_set_registers(hdev);
venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da);
venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE);
venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da);
venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01);
writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
if (hdev->sfr.da)
venus_writel(hdev, SFR_ADDR, hdev->sfr.da);
writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
ret = venus_boot_core(hdev);
if (ret) {
@ -513,17 +520,18 @@ static int venus_run(struct venus_hfi_device *hdev)
static int venus_halt_axi(struct venus_hfi_device *hdev)
{
void __iomem *base = hdev->core->base;
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *vbif_base = hdev->core->vbif_base;
struct device *dev = hdev->core->dev;
u32 val;
int ret;
if (IS_V4(hdev->core)) {
val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT);
val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
val |= WRAPPER_CPU_AXI_HALT_HALT;
venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val);
writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS,
ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
val,
val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
POLL_INTERVAL_US,
@ -537,12 +545,12 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
}
/* Halt AXI and AXI IMEM VBIF Access */
val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val);
writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
/* Request for AXI bus port halt */
ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val,
ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
POLL_INTERVAL_US,
VBIF_AXI_HALT_ACK_TIMEOUT_US);
@ -1035,19 +1043,21 @@ static irqreturn_t venus_isr(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
u32 status;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
void __iomem *wrapper_base = hdev->core->wrapper_base;
if (!hdev)
return IRQ_NONE;
status = venus_readl(hdev, WRAPPER_INTR_STATUS);
status = readl(wrapper_base + WRAPPER_INTR_STATUS);
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
hdev->irq_status = status;
venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1);
venus_writel(hdev, WRAPPER_INTR_CLEAR, status);
writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
return IRQ_WAKE_THREAD;
}
@ -1380,6 +1390,7 @@ static int venus_suspend_1xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status;
int ret;
@ -1414,7 +1425,7 @@ static int venus_suspend_1xx(struct venus_core *core)
return -EINVAL;
}
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
mutex_unlock(&hdev->lock);
return -EINVAL;
@ -1435,10 +1446,12 @@ static int venus_suspend_1xx(struct venus_core *core)
static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
{
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
@ -1449,10 +1462,12 @@ static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
{
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
@ -1465,6 +1480,7 @@ static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status;
bool val;
int ret;
@ -1481,7 +1497,7 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
goto power_off;

View File

@ -8,27 +8,28 @@
#define VBIF_BASE 0x80000
#define VBIF_AXI_HALT_CTRL0 (VBIF_BASE + 0x208)
#define VBIF_AXI_HALT_CTRL1 (VBIF_BASE + 0x20c)
#define VBIF_AXI_HALT_CTRL0 0x208
#define VBIF_AXI_HALT_CTRL1 0x20c
#define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0)
#define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0)
#define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000
#define CPU_BASE 0xc0000
#define CPU_CS_BASE (CPU_BASE + 0x12000)
#define CPU_IC_BASE (CPU_BASE + 0x1f000)
#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE + 0x1c)
#define CPU_CS_A2HSOFTINTCLR 0x1c
#define VIDC_CTRL_INIT (CPU_CS_BASE + 0x48)
#define VIDC_CTRL_INIT 0x48
#define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe
#define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1
#define VIDC_CTRL_INIT_CTRL_MASK 0x1
#define VIDC_CTRL_INIT_CTRL_SHIFT 0
/* HFI control status */
#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE + 0x4c)
#define CPU_CS_SCIACMDARG0 0x4c
#define CPU_CS_SCIACMDARG0_MASK 0xff
#define CPU_CS_SCIACMDARG0_SHIFT 0x0
#define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe
@ -39,42 +40,55 @@
#define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30)
/* HFI queue table info */
#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE + 0x50)
#define CPU_CS_SCIACMDARG1 0x50
/* HFI queue table address */
#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE + 0x54)
#define CPU_CS_SCIACMDARG2 0x54
/* Venus cpu */
#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE + 0x58)
#define CPU_CS_SCIACMDARG3 0x58
#define SFR_ADDR (CPU_CS_BASE + 0x5c)
#define MMAP_ADDR (CPU_CS_BASE + 0x60)
#define UC_REGION_ADDR (CPU_CS_BASE + 0x64)
#define UC_REGION_SIZE (CPU_CS_BASE + 0x68)
#define SFR_ADDR 0x5c
#define MMAP_ADDR 0x60
#define UC_REGION_ADDR 0x64
#define UC_REGION_SIZE 0x68
#define CPU_IC_SOFTINT (CPU_IC_BASE + 0x18)
#define CPU_CS_H2XSOFTINTEN_V6 0x148
#define CPU_CS_X2RPMH_V6 0x168
#define CPU_CS_X2RPMH_MASK0_BMSK_V6 0x1
#define CPU_CS_X2RPMH_MASK0_SHFT_V6 0x0
#define CPU_CS_X2RPMH_MASK1_BMSK_V6 0x2
#define CPU_CS_X2RPMH_MASK1_SHFT_V6 0x1
#define CPU_CS_X2RPMH_SWOVERRIDE_BMSK_V6 0x4
#define CPU_CS_X2RPMH_SWOVERRIDE_SHFT_V6 0x3
/* Relative to CPU_IC_BASE */
#define CPU_IC_SOFTINT 0x18
#define CPU_IC_SOFTINT_V6 0x150
#define CPU_IC_SOFTINT_H2A_MASK 0x8000
#define CPU_IC_SOFTINT_H2A_SHIFT 0xf
#define CPU_IC_SOFTINT_H2A_SHIFT_V6 0x0
/* Venus wrapper */
#define WRAPPER_BASE 0x000e0000
#define WRAPPER_HW_VERSION (WRAPPER_BASE + 0x00)
#define WRAPPER_HW_VERSION 0x00
#define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000
#define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
#define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000
#define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
#define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff
#define WRAPPER_CLOCK_CONFIG (WRAPPER_BASE + 0x04)
#define WRAPPER_CLOCK_CONFIG 0x04
#define WRAPPER_INTR_STATUS (WRAPPER_BASE + 0x0c)
#define WRAPPER_INTR_STATUS 0x0c
#define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10
#define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4
#define WRAPPER_INTR_STATUS_A2H_MASK 0x4
#define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2
#define WRAPPER_INTR_MASK (WRAPPER_BASE + 0x10)
#define WRAPPER_INTR_MASK 0x10
#define WRAPPER_INTR_MASK_A2HWD_BASK 0x10
#define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4
#define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8
@ -82,41 +96,59 @@
#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4
#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2
#define WRAPPER_INTR_CLEAR (WRAPPER_BASE + 0x14)
#define WRAPPER_INTR_STATUS_A2HWD_MASK_V6 0x8
#define WRAPPER_INTR_MASK_A2HWD_BASK_V6 0x8
#define WRAPPER_INTR_CLEAR 0x14
#define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10
#define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4
#define WRAPPER_INTR_CLEAR_A2H_MASK 0x4
#define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2
#define WRAPPER_POWER_STATUS (WRAPPER_BASE + 0x44)
#define WRAPPER_VDEC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x48)
#define WRAPPER_VENC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x4c)
#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET (WRAPPER_BASE + 0x64)
#define WRAPPER_POWER_STATUS 0x44
#define WRAPPER_VDEC_VCODEC_POWER_CONTROL 0x48
#define WRAPPER_VENC_VCODEC_POWER_CONTROL 0x4c
#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6 0x54
#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6 0x58
#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET 0x64
#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000)
#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008)
#define WRAPPER_CPU_CLOCK_CONFIG 0x2000
#define WRAPPER_CPU_AXI_HALT 0x2008
#define WRAPPER_CPU_AXI_HALT_HALT BIT(16)
#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c)
#define WRAPPER_CPU_AXI_HALT_STATUS 0x200c
#define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24)
#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010)
#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
#define WRAPPER_CPU_CGC_DIS 0x2010
#define WRAPPER_CPU_STATUS 0x2014
#define WRAPPER_CPU_STATUS_WFI BIT(0)
#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
#define WRAPPER_CPA_START_ADDR (WRAPPER_BASE + 0x1020)
#define WRAPPER_CPA_END_ADDR (WRAPPER_BASE + 0x1024)
#define WRAPPER_FW_START_ADDR (WRAPPER_BASE + 0x1028)
#define WRAPPER_FW_END_ADDR (WRAPPER_BASE + 0x102C)
#define WRAPPER_NONPIX_START_ADDR (WRAPPER_BASE + 0x1030)
#define WRAPPER_NONPIX_END_ADDR (WRAPPER_BASE + 0x1034)
#define WRAPPER_A9SS_SW_RESET (WRAPPER_BASE + 0x3000)
#define WRAPPER_SW_RESET 0x3000
#define WRAPPER_CPA_START_ADDR 0x1020
#define WRAPPER_CPA_END_ADDR 0x1024
#define WRAPPER_FW_START_ADDR 0x1028
#define WRAPPER_FW_END_ADDR 0x102C
#define WRAPPER_NONPIX_START_ADDR 0x1030
#define WRAPPER_NONPIX_END_ADDR 0x1034
#define WRAPPER_A9SS_SW_RESET 0x3000
#define WRAPPER_A9SS_SW_RESET_BIT BIT(4)
/* Venus 4xx */
#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90)
#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x94)
#define WRAPPER_VCODEC0_MMCC_POWER_STATUS 0x90
#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL 0x94
#define WRAPPER_VCODEC1_MMCC_POWER_STATUS (WRAPPER_BASE + 0x110)
#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x114)
#define WRAPPER_VCODEC1_MMCC_POWER_STATUS 0x110
#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL 0x114
/* Venus 6xx */
#define WRAPPER_CORE_POWER_STATUS_V6 0x80
#define WRAPPER_CORE_POWER_CONTROL_V6 0x84
/* Wrapper TZ 6xx */
#define WRAPPER_TZ_BASE_V6 0x000c0000
#define WRAPPER_TZ_CPU_STATUS_V6 0x10
/* Venus AON */
#define AON_BASE_V6 0x000e0000
#define AON_WRAPPER_MVP_NOC_LPI_CONTROL 0x00
#define AON_WRAPPER_MVP_NOC_LPI_STATUS 0x04
#endif

View File

@ -304,9 +304,9 @@ vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable)
void __iomem *ctrl;
if (session_type == VIDC_SESSION_TYPE_DEC)
ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
ctrl = core->wrapper_base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
else
ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
ctrl = core->wrapper_base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
if (enable)
writel(0, ctrl);
@ -381,11 +381,11 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
int ret;
if (coreid == VIDC_CORE_ID_1) {
ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
} else {
ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
ctrl = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
}
if (enable) {

View File

@ -556,16 +556,18 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
renesas_sdhi_reset_scc(host, priv);
renesas_sdhi_reset_hs400_mode(host, priv);
priv->needs_adjust_hs400 = false;
if (priv->scc_ctl) {
renesas_sdhi_reset_scc(host, priv);
renesas_sdhi_reset_hs400_mode(host, priv);
priv->needs_adjust_hs400 = false;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
}
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
@ -1010,11 +1012,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
if (of_data && of_data->scc_offset) {
priv->scc_ctl = host->ctl + of_data->scc_offset;
host->reset = renesas_sdhi_reset;
}
host->reset = renesas_sdhi_reset;
} else {
host->sdcard_irq_mask_all = TMIO_MASK_ALL;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@ -1070,10 +1070,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
quirks->hs400_calib_table + 1);
}
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
/* Enable tuning iff we have an SCC and a supported mode */
if (of_data && of_data->scc_offset &&
(host->mmc->caps & MMC_CAP_UHS_SDR104 ||
@ -1098,6 +1094,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
priv->scc_ctl = host->ctl + of_data->scc_offset;
host->check_retune = renesas_sdhi_check_scc_error;
host->ops.execute_tuning = renesas_sdhi_execute_tuning;
host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
@ -1105,6 +1102,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
}
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
@ -1130,6 +1129,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
goto eirq;
}
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);

View File

@ -161,6 +161,7 @@ struct tmio_mmc_host {
u32 sdio_irq_mask;
unsigned int clk_cache;
u32 sdcard_irq_setbit_mask;
u32 sdcard_irq_mask_all;
spinlock_t lock; /* protect host private data */
unsigned long last_req_ts;

View File

@ -1158,7 +1158,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
tmio_mmc_reset(_host);
_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
if (!_host->sdcard_irq_mask_all)
_host->sdcard_irq_mask_all = TMIO_MASK_ALL;
tmio_mmc_disable_mmc_irqs(_host, _host->sdcard_irq_mask_all);
if (_host->native_hotplug)
tmio_mmc_enable_mmc_irqs(_host,
@ -1212,7 +1214,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all);
if (host->native_hotplug)
pm_runtime_put_noidle(&pdev->dev);
@ -1242,7 +1244,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all);
if (host->clk_cache)
host->set_clock(host, 0);

View File

@ -889,6 +889,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
/* UBI cannot work on flashes with zero erasesize. */
if (!mtd->erasesize) {
pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)

View File

@ -2316,14 +2316,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
}
}

View File

@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
{
const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err) {
dev_err(chip->dev, "Error reading status");
return;
}
/* If the switch is still resetting, it may not
* respond on the bus, and so MDIO read returns
* 0xffff. Differentiate between that, and waiting for
* the EEPROM to be done by bit 0 being set.
*/
if (val != 0xffff &&
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
return;
usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5

View File

@ -278,7 +278,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);

View File

@ -323,7 +323,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;

View File

@ -349,6 +349,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;

View File

@ -2404,6 +2404,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
struct rx_cmp_ext *rxcmp1;
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
bool flush_xdp = false;
u32 rx_pkts = 0;
u8 event = 0;
@ -2438,6 +2439,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
if (event & BNXT_REDIRECT_EVENT)
flush_xdp = true;
} else if (unlikely(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE)) {
bnxt_hwrm_handler(bp, txcmp);
@ -2457,6 +2460,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (event & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
if (flush_xdp)
xdp_do_flush();
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);

View File

@ -3125,8 +3125,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
#define HCLGE_IMP_RESET_DELAY 5
switch (event_type) {
case HCLGE_VECTOR0_EVENT_RST:
if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
mdelay(HCLGE_IMP_RESET_DELAY);
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
@ -7850,7 +7855,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
else
else if (hclge_is_umv_space_full(vport, true))
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)

View File

@ -4332,9 +4332,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
@ -4420,6 +4417,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");

View File

@ -111,9 +111,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
};
extern const struct qed_ll2_ops qed_ll2_ops_pass;

View File

@ -105,6 +105,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
if (!dwmac->dev->power.is_suspended) {
if (!dwmac->ops->clk_rx_enable_in_suspend ||
!dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@ -515,7 +517,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {

View File

@ -1496,6 +1496,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
if (tx_chn->irq <= 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
ret = tx_chn->irq ?: -ENXIO;
goto err;
}

View File

@ -2122,7 +2122,12 @@ static const struct ethtool_ops team_ethtool_ops = {
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
dev->header_ops = port_dev->header_ops;
struct team *team = netdev_priv(dev);
if (port_dev->type == ARPHRD_ETHER)
dev->header_ops = team->header_ops_cache;
else
dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
@ -2169,8 +2174,11 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;

View File

@ -958,12 +958,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
} else if (skb_is_gso_v6(skb)) {
} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,

View File

@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (unlikely(ret < 0)) {
if (unlikely(ret < 4)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;

View File

@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static int uhdlc_close(struct net_device *dev);
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
rc = hdlc_open(dev);
if (rc)
uhdlc_close(dev);
}
return 0;
return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
hdlc_close(dev);
return 0;
}

View File

@ -334,9 +334,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
__le32 internal_base_addr;
__le64 dram_base_addr;
__le32 page_num;
__le32 internal_base_addr __packed;
__le64 dram_base_addr __packed;
__le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};

View File

@ -977,8 +977,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}

View File

@ -98,7 +98,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@ -107,12 +108,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type

View File

@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
u16 val;
u8 lna;
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
*lna_2g = 0;
if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
memset(lna_5g, 0, sizeof(s8) * 3);
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)

View File

@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
bool use_lna;
u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (chan->band == NL80211_BAND_2GHZ)
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
else
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
if (use_lna)
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);

View File

@ -387,14 +387,6 @@ static int nvme_pci_npages_sgl(void)
NVME_CTRL_PAGE_SIZE);
}
static size_t nvme_pci_iod_alloc_size(void)
{
size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
return sizeof(__le64 *) * npages +
sizeof(struct scatterlist) * NVME_MAX_SEGS;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@ -2557,6 +2549,22 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_small_pool);
}
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
size_t alloc_size = sizeof(__le64 *) * npages +
sizeof(struct scatterlist) * NVME_MAX_SEGS;
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1,
mempool_kmalloc, mempool_kfree,
(void *)alloc_size, GFP_KERNEL,
dev_to_node(dev->dev));
if (!dev->iod_mempool)
return -ENOMEM;
return 0;
}
static void nvme_free_tagset(struct nvme_dev *dev)
{
if (dev->tagset.tags)
@ -2564,6 +2572,7 @@ static void nvme_free_tagset(struct nvme_dev *dev)
dev->ctrl.tagset = NULL;
}
/* pairs with nvme_pci_alloc_dev */
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
@ -2840,32 +2849,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
#ifdef CONFIG_ACPI
static bool nvme_acpi_storage_d3(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
u8 val;
/*
* Look for _DSD property specifying that the storage device on the port
* must use D3 to support deep platform power savings during
* suspend-to-idle.
*/
if (!adev)
return false;
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
&val))
return false;
return val == 1;
}
#else
static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
{
return false;
}
#endif /* CONFIG_ACPI */
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@ -2875,20 +2858,20 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
nvme_put_ctrl(&dev->ctrl);
}
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int node, result = -ENOMEM;
struct nvme_dev *dev;
unsigned long quirks = id->driver_data;
size_t alloc_size;
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
set_dev_node(&pdev->dev, first_memory_node);
int node = dev_to_node(&pdev->dev);
struct nvme_dev *dev;
int ret = -ENOMEM;
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
mutex_init(&dev->shutdown_lock);
dev->nr_write_queues = write_queues;
dev->nr_poll_queues = poll_queues;
@ -2896,26 +2879,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->queues = kcalloc_node(dev->nr_allocated_queues,
sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
goto out_free_dev;
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
result = nvme_dev_map(dev);
if (result)
goto put_pci;
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
mutex_init(&dev->shutdown_lock);
result = nvme_setup_prp_pools(dev);
if (result)
goto unmap;
quirks |= check_vendor_combination_bug(pdev);
if (!noacpi && nvme_acpi_storage_d3(pdev)) {
if (!noacpi && acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
@ -2924,46 +2893,54 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
"platform quirk: setting simple suspend\n");
quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
}
ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
if (ret)
goto out_put_device;
return dev;
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
*/
alloc_size = nvme_pci_iod_alloc_size();
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
out_put_device:
put_device(dev->dev);
kfree(dev->queues);
out_free_dev:
kfree(dev);
return ERR_PTR(ret);
}
dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
mempool_kfree,
(void *) alloc_size,
GFP_KERNEL, node);
if (!dev->iod_mempool) {
result = -ENOMEM;
goto release_pools;
}
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct nvme_dev *dev;
int result = -ENOMEM;
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
dev = nvme_pci_alloc_dev(pdev, id);
if (IS_ERR(dev))
return PTR_ERR(dev);
result = nvme_dev_map(dev);
if (result)
goto release_mempool;
goto out_uninit_ctrl;
result = nvme_setup_prp_pools(dev);
if (result)
goto out_dev_unmap;
result = nvme_pci_alloc_iod_mempool(dev);
if (result)
goto out_release_prp_pools;
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
pci_set_drvdata(pdev, dev);
nvme_reset_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
return 0;
release_mempool:
mempool_destroy(dev->iod_mempool);
release_pools:
out_release_prp_pools:
nvme_release_prp_pools(dev);
unmap:
out_dev_unmap:
nvme_dev_unmap(dev);
put_pci:
put_device(dev->dev);
free:
kfree(dev->queues);
kfree(dev);
out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
return result;
}

Some files were not shown because too many files have changed in this diff Show More