This is the 5.10.219 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmZuzl4ACgkQONu9yGCS
 aT7+ohAAyRkTis6xeME1FWIJthCJl8FzUq9nfN+OccK3TwCbXyZKXlZK8lVz0T5U
 DvG1Tg+rR76+hIJghMIy2FEPRBA19WMq9a+Ms2ZyyggPrlgksFivO8N8YgnIxabb
 EJPN7pAzO+GA+vd8YeGeK1ldq8QUISlN35s+hkur1jeBctWRcpHeOTjIej/Qytmj
 ny1o7hCp179+SPygSEYEYtguACaQflhfBjOgAQ9DwWjP6vO2W9Vb16X4tiT0udHm
 ExPjOwxbEMN/7m9gKrnl6WcIROSOy55FnfcYZP+NRY4jBlANUgXF1ca9hAhcIKSv
 oOyeRN5S3FZAdxIHG9SFU9b6MPwJSeO5ETQyfiRGNFRkXOa2tKknNSsuACu8kSwx
 SKJIpcuW1DkortwsYFbilXdl6TrK6oCcEczV5qtludcRoDznfUGejb5e81v3yYkO
 no6ORvBJSBnEObY+gpexvxQp2Ae1+YkSLJaDzYWMC+JHBIgWTz2F2qJJkP6bRAUV
 QduFTdTenDnL7zW3DseZGJKotU95cUoKNAwa7wfboZeygHc2+KaUOchKcqI0P9dZ
 pS27RzcAJJ2uufujofyxOOhzFKw98WFurfNsMZTDBwHuqReoiRAS7pi0PeTMuqUv
 GC8V1eIKgeWdI+pdTZLXylziiM41IylLjU/hxCrsykb+EwFa5NY=
 =B1lK
 -----END PGP SIGNATURE-----

Merge 5.10.219 into android12-5.10-lts

Changes in 5.10.219
	x86/tsc: Trust initial offset in architectural TSC-adjust MSRs
	tty: n_gsm: fix possible out-of-bounds in gsm0_receive()
	speakup: Fix sizeof() vs ARRAY_SIZE() bug
	ring-buffer: Fix a race between readers and resize checks
	net: smc91x: Fix m68k kernel compilation for ColdFire CPU
	nilfs2: fix unexpected freezing of nilfs_segctor_sync()
	nilfs2: fix potential hang in nilfs_detach_log_writer()
	ALSA: core: Fix NULL module pointer assignment at card init
	wifi: cfg80211: fix the order of arguments for trace events of the tx_rx_evt class
	net: usb: qmi_wwan: add Telit FN920C04 compositions
	drm/amd/display: Set color_mgmt_changed to true on unsuspend
	ASoC: rt5645: Fix the electric noise due to the CBJ contacts floating
	ASoC: dt-bindings: rt5645: add cbj sleeve gpio property
	regulator: vqmmc-ipq4019: fix module autoloading
	ASoC: rt715: add vendor clear control register
	ASoC: da7219-aad: fix usage of device_get_named_child_node()
	drm/amdkfd: Flush the process wq before creating a kfd_process
	nvme: find numa distance only if controller has valid numa id
	openpromfs: finish conversion to the new mount API
	crypto: bcm - Fix pointer arithmetic
	firmware: raspberrypi: Use correct device for DMA mappings
	ecryptfs: Fix buffer size for tag 66 packet
	nilfs2: fix out-of-range warning
	parisc: add missing export of __cmpxchg_u8()
	crypto: ccp - drop platform ifdef checks
	crypto: x86/nh-avx2 - add missing vzeroupper
	crypto: x86/sha256-avx2 - add missing vzeroupper
	s390/cio: fix tracepoint subchannel type field
	jffs2: prevent xattr node from overflowing the eraseblock
	soc: mediatek: cmdq: Fix typo of CMDQ_JUMP_RELATIVE
	null_blk: Fix missing mutex_destroy() at module removal
	md: fix resync softlockup when bitmap size is less than array size
	wifi: ath10k: poll service ready message before failing
	x86/boot: Ignore relocations in .notes sections in walk_relocs() too
	qed: avoid truncating work queue length
	scsi: ufs: qcom: Perform read back after writing reset bit
	scsi: ufs-qcom: Fix ufs RST_n spec violation
	scsi: ufs: qcom: Perform read back after writing REG_UFS_SYS1CLK_1US
	scsi: ufs: ufs-qcom: Fix the Qcom register name for offset 0xD0
	scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW version major 5
	scsi: ufs: qcom: Perform read back after writing unipro mode
	scsi: ufs: qcom: Perform read back after writing CGC enable
	scsi: ufs: cdns-pltfrm: Perform read back after writing HCLKDIV
	scsi: ufs: core: Perform read back after disabling interrupts
	scsi: ufs: core: Perform read back after disabling UIC_COMMAND_COMPL
	irqchip/alpine-msi: Fix off-by-one in allocation error path
	irqchip/loongson-pch-msi: Fix off-by-one on allocation error path
	ACPI: disable -Wstringop-truncation
	gfs2: Fix "ignore unlock failures after withdraw"
	selftests/bpf: Fix umount cgroup2 error in test_sockmap
	cpufreq: Reorganize checks in cpufreq_offline()
	cpufreq: Split cpufreq_offline()
	cpufreq: Rearrange locking in cpufreq_remove_dev()
	cpufreq: exit() callback is optional
	net: export inet_lookup_reuseport and inet6_lookup_reuseport
	net: remove duplicate reuseport_lookup functions
	udp: Avoid call to compute_score on multiple sites
	scsi: libsas: Fix the failure of adding phy with zero-address to port
	scsi: hpsa: Fix allocation size for Scsi_Host private data
	x86/purgatory: Switch to the position-independent small code model
	wifi: ath10k: Fix an error code problem in ath10k_dbg_sta_write_peer_debug_trigger()
	wifi: ath10k: populate board data for WCN3990
	tcp: avoid premature drops in tcp_add_backlog()
	net: give more chances to rcu in netdev_wait_allrefs_any()
	macintosh/via-macii: Fix "BUG: sleeping function called from invalid context"
	wifi: carl9170: add a proper sanity check for endpoints
	wifi: ar5523: enable proper endpoint verification
	sh: kprobes: Merge arch_copy_kprobe() into arch_prepare_kprobe()
	Revert "sh: Handle calling csum_partial with misaligned data"
	selftests/binderfs: use the Makefile's rules, not Make's implicit rules
	HID: intel-ish-hid: ipc: Add check for pci_alloc_irq_vectors
	scsi: bfa: Ensure the copied buf is NUL terminated
	scsi: qedf: Ensure the copied buf is NUL terminated
	wifi: mwl8k: initialize cmd->addr[] properly
	usb: aqc111: stop lying about skb->truesize
	net: usb: sr9700: stop lying about skb->truesize
	m68k: Fix spinlock race in kernel thread creation
	m68k: mac: Fix reboot hang on Mac IIci
	net: ipv6: fix wrong start position when receive hop-by-hop fragment
	eth: sungem: remove .ndo_poll_controller to avoid deadlocks
	net: ethernet: cortina: Locking fixes
	af_unix: Fix data races in unix_release_sock/unix_stream_sendmsg
	net: usb: smsc95xx: stop lying about skb->truesize
	net: openvswitch: fix overwriting ct original tuple for ICMPv6
	ipv6: sr: add missing seg6_local_exit
	ipv6: sr: fix incorrect unregister order
	ipv6: sr: fix invalid unregister error path
	net/mlx5: Discard command completions in internal error
	drm/amd/display: Fix potential index out of bounds in color transformation function
	ASoC: soc-acpi: add helper to identify parent driver.
	ASoC: Intel: Disable route checks for Skylake boards
	mtd: rawnand: hynix: fixed typo
	fbdev: shmobile: fix snprintf truncation
	drm/meson: vclk: fix calculation of 59.94 fractional rates
	drm/mediatek: Add 0 size check to mtk_drm_gem_obj
	powerpc/fsl-soc: hide unused const variable
	fbdev: sisfb: hide unused variables
	media: ngene: Add dvb_ca_en50221_init return value check
	media: radio-shark2: Avoid led_names truncations
	drm: bridge: cdns-mhdp8546: Fix possible null pointer dereference
	fbdev: sh7760fb: allow modular build
	media: atomisp: ssh_css: Fix a null-pointer dereference in load_video_binaries
	drm/arm/malidp: fix a possible null pointer dereference
	drm: vc4: Fix possible null pointer dereference
	ASoC: tracing: Export SND_SOC_DAPM_DIR_OUT to its value
	drm/bridge: lt9611: Don't log an error when DSI host can't be found
	drm/bridge: tc358775: Don't log an error when DSI host can't be found
	drm/panel: simple: Add missing Innolux G121X1-L03 format, flags, connector
	drm/mipi-dsi: use correct return type for the DSC functions
	RDMA/hns: Refactor the hns_roce_buf allocation flow
	RDMA/hns: Create QP with selected QPN for bank load balance
	RDMA/hns: Fix incorrect symbol types
	RDMA/hns: Fix return value in hns_roce_map_mr_sg
	RDMA/hns: Use complete parentheses in macros
	RDMA/hns: Modify the print level of CQE error
	clk: qcom: mmcc-msm8998: fix venus clock issue
	x86/insn: Fix PUSH instruction in x86 instruction decoder opcode map
	ext4: avoid excessive credit estimate in ext4_tmpfile()
	sunrpc: removed redundant procp check
	ext4: simplify calculation of blkoff in ext4_mb_new_blocks_simple
	ext4: fix unit mismatch in ext4_mb_new_blocks_simple
	ext4: try all groups in ext4_mb_new_blocks_simple
	ext4: remove unused parameter from ext4_mb_new_blocks_simple()
	ext4: fix potential unnitialized variable
	SUNRPC: Fix gss_free_in_token_pages()
	selftests/kcmp: Make the test output consistent and clear
	selftests/kcmp: remove unused open mode
	RDMA/IPoIB: Fix format truncation compilation errors
	net: qrtr: fix null-ptr-deref in qrtr_ns_remove
	net: qrtr: ns: Fix module refcnt
	netrom: fix possible dead-lock in nr_rt_ioctl()
	af_packet: do not call packet_read_pending() from tpacket_destruct_skb()
	sched/fair: Allow disabling sched_balance_newidle with sched_relax_domain_level
	greybus: lights: check return of get_channel_from_mode
	f2fs: fix to wait on page writeback in __clone_blkaddrs()
	soundwire: cadence: fix invalid PDI offset
	dmaengine: idma64: Add check for dma_set_max_seg_size
	firmware: dmi-id: add a release callback function
	serial: max3100: Lock port->lock when calling uart_handle_cts_change()
	serial: max3100: Update uart_driver_registered on driver removal
	serial: max3100: Fix bitwise types
	greybus: arche-ctrl: move device table to its right location
	serial: sc16is7xx: add proper sched.h include for sched_set_fifo()
	f2fs: compress: support chksum
	f2fs: add compress_mode mount option
	f2fs: compress: clean up parameter of __f2fs_cluster_blocks()
	f2fs: compress: remove unneeded preallocation
	f2fs: introduce FI_COMPRESS_RELEASED instead of using IMMUTABLE bit
	f2fs: compress: fix to relocate check condition in f2fs_{release,reserve}_compress_blocks()
	f2fs: add cp_error check in f2fs_write_compressed_pages
	f2fs: fix to force keeping write barrier for strict fsync mode
	f2fs: do not allow partial truncation on pinned file
	f2fs: fix typos in comments
	f2fs: fix to relocate check condition in f2fs_fallocate()
	f2fs: fix to check pinfile flag in f2fs_move_file_range()
	iio: pressure: dps310: support negative temperature values
	fpga: region: change FPGA indirect article to an
	fpga: region: Rename dev to parent for parent device
	docs: driver-api: fpga: avoid using UTF-8 chars
	fpga: region: Use standard dev_release for class driver
	fpga: region: add owner module and take its refcount
	microblaze: Remove gcc flag for non existing early_printk.c file
	microblaze: Remove early printk call from cpuinfo-static.c
	usb: gadget: u_audio: Clear uac pointer when freed.
	stm class: Fix a double free in stm_register_device()
	ppdev: Remove usage of the deprecated ida_simple_xx() API
	ppdev: Add an error check in register_device
	extcon: max8997: select IRQ_DOMAIN instead of depending on it
	PCI/EDR: Align EDR_PORT_DPC_ENABLE_DSM with PCI Firmware r3.3
	PCI/EDR: Align EDR_PORT_LOCATE_DSM with PCI Firmware r3.3
	f2fs: compress: fix to cover {reserve,release}_compress_blocks() w/ cp_rwsem lock
	f2fs: fix to release node block count in error path of f2fs_new_node_page()
	f2fs: compress: don't allow unaligned truncation on released compress inode
	serial: sh-sci: protect invalidating RXDMA on shutdown
	libsubcmd: Fix parse-options memory leak
	s390/ipl: Fix incorrect initialization of len fields in nvme reipl block
	s390/ipl: Fix incorrect initialization of nvme dump block
	Input: ims-pcu - fix printf string overflow
	Input: ioc3kbd - convert to platform remove callback returning void
	Input: ioc3kbd - add device table
	mmc: sdhci_am654: Add tuning algorithm for delay chain
	mmc: sdhci_am654: Write ITAPDLY for DDR52 timing
	mmc: sdhci_am654: Drop lookup for deprecated ti,otap-del-sel
	mmc: sdhci_am654: Add OTAP/ITAP delay enable
	mmc: sdhci_am654: Add ITAPDLYSEL in sdhci_j721e_4bit_set_clock
	mmc: sdhci_am654: Fix ITAPDLY for HS400 timing
	Input: pm8xxx-vibrator - correct VIB_MAX_LEVELS calculation
	drm/msm/dpu: Always flush the slave INTF on the CTL
	um: Fix return value in ubd_init()
	um: Add winch to winch_handlers before registering winch IRQ
	um: vector: fix bpfflash parameter evaluation
	drm/bridge: tc358775: fix support for jeida-18 and jeida-24
	media: stk1160: fix bounds checking in stk1160_copy_video()
	scsi: qla2xxx: Replace all non-returning strlcpy() with strscpy()
	media: flexcop-usb: clean up endpoint sanity checks
	media: flexcop-usb: fix sanity check of bNumEndpoints
	powerpc/pseries: Add failure related checks for h_get_mpp and h_get_ppp
	um: Fix the -Wmissing-prototypes warning for __switch_mm
	media: cec: cec-adap: always cancel work in cec_transmit_msg_fh
	media: cec: cec-api: add locking in cec_release()
	media: core headers: fix kernel-doc warnings
	media: cec: fix a deadlock situation
	media: cec: call enable_adap on s_log_addrs
	media: cec: abort if the current transmit was canceled
	media: cec: correctly pass on reply results
	media: cec: use call_op and check for !unregistered
	media: cec-adap.c: drop activate_cnt, use state info instead
	media: cec: core: avoid recursive cec_claim_log_addrs
	media: cec: core: avoid confusing "transmit timed out" message
	null_blk: Fix the WARNING: modpost: missing MODULE_DESCRIPTION()
	regulator: bd71828: Don't overwrite runtime voltages
	x86/kconfig: Select ARCH_WANT_FRAME_POINTERS again when UNWINDER_FRAME_POINTER=y
	nfc: nci: Fix uninit-value in nci_rx_work
	ASoC: tas2552: Add TX path for capturing AUDIO-OUT data
	sunrpc: fix NFSACL RPC retry on soft mount
	rpcrdma: fix handling for RDMA_CM_EVENT_DEVICE_REMOVAL
	ipv6: sr: fix memleak in seg6_hmac_init_algo
	params: lift param_set_uint_minmax to common code
	tcp: Fix shift-out-of-bounds in dctcp_update_alpha().
	openvswitch: Set the skbuff pkt_type for proper pmtud support.
	arm64: asm-bug: Add .align 2 to the end of __BUG_ENTRY
	virtio: delete vq in vp_find_vqs_msix() when request_irq() fails
	net: fec: avoid lock evasion when reading pps_enable
	tls: fix missing memory barrier in tls_init
	nfc: nci: Fix kcov check in nci_rx_work()
	nfc: nci: Fix handling of zero-length payload packets in nci_rx_work()
	netfilter: nfnetlink_queue: acquire rcu_read_lock() in instance_destroy_rcu()
	netfilter: nft_payload: restore vlan q-in-q match support
	spi: Don't mark message DMA mapped when no transfer in it is
	nvmet: fix ns enable/disable possible hang
	net/mlx5e: Use rx_missed_errors instead of rx_dropped for reporting buffer exhaustion
	dma-buf/sw-sync: don't enable IRQ from sync_print_obj()
	bpf: Fix potential integer overflow in resolve_btfids
	enic: Validate length of nl attributes in enic_set_vf_port
	net: usb: smsc95xx: fix changing LED_SEL bit value updated from EEPROM
	bpf: Allow delete from sockmap/sockhash only if update is allowed
	net:fec: Add fec_enet_deinit()
	netfilter: tproxy: bail out if IP has been disabled on the device
	kconfig: fix comparison to constant symbols, 'm', 'n'
	spi: stm32: Don't warn about spurious interrupts
	ipvlan: Dont Use skb->sk in ipvlan_process_v{4,6}_outbound
	hwmon: (shtc1) Fix property misspelling
	ALSA: timer: Set lower bound of start tick time
	genirq/cpuhotplug, x86/vector: Prevent vector leak during CPU offline
	media: cec: core: add adap_nb_transmit_canceled() callback
	SUNRPC: Fix loop termination condition in gss_free_in_token_pages()
	binder: fix max_thread type inconsistency
	mmc: core: Do not force a retune before RPMB switch
	io_uring: fail NOP if non-zero op flags is passed in
	afs: Don't cross .backup mountpoint from backup volume
	nilfs2: fix use-after-free of timer for log writer thread
	vxlan: Fix regression when dropping packets due to invalid src addresses
	x86/mm: Remove broken vsyscall emulation code from the page fault code
	netfilter: nf_tables: restrict tunnel object to NFPROTO_NETDEV
	netfilter: nf_tables: Fix potential data-race in __nft_obj_type_get()
	f2fs: fix to do sanity check on i_xattr_nid in sanity_check_inode()
	media: lgdt3306a: Add a check against null-pointer-def
	drm/amdgpu: add error handle to avoid out-of-bounds
	ata: pata_legacy: make legacy_exit() work again
	ACPI: resource: Do IRQ override on TongFang GXxHRXx and GMxHGxx
	arm64: tegra: Correct Tegra132 I2C alias
	arm64: dts: qcom: qcs404: fix bluetooth device address
	md/raid5: fix deadlock that raid5d() wait for itself to clear MD_SB_CHANGE_PENDING
	wifi: rtl8xxxu: Fix the TX power of RTL8192CU, RTL8723AU
	wifi: rtlwifi: rtl8192de: Fix low speed with WPA3-SAE
	wifi: rtlwifi: rtl8192de: Fix endianness issue in RX path
	arm64: dts: hi3798cv200: fix the size of GICR
	media: mc: mark the media devnode as registered from the, start
	media: mxl5xx: Move xpt structures off stack
	media: v4l2-core: hold videodev_lock until dev reg, finishes
	mmc: core: Add mmc_gpiod_set_cd_config() function
	mmc: sdhci-acpi: Sort DMI quirks alphabetically
	mmc: sdhci-acpi: Fix Lenovo Yoga Tablet 2 Pro 1380 sdcard slot not working
	mmc: sdhci-acpi: Disable write protect detection on Toshiba WT10-A
	fbdev: savage: Handle err return when savagefb_check_var failed
	KVM: arm64: Allow AArch32 PSTATE.M to be restored as System mode
	crypto: ecrdsa - Fix module auto-load on add_key
	crypto: qat - Fix ADF_DEV_RESET_SYNC memory leak
	net/ipv6: Fix route deleting failure when metric equals 0
	net/9p: fix uninit-value in p9_client_rpc()
	intel_th: pci: Add Meteor Lake-S CPU support
	sparc64: Fix number of online CPUs
	watchdog: rti_wdt: Set min_hw_heartbeat_ms to accommodate a safety margin
	kdb: Fix buffer overflow during tab-complete
	kdb: Use format-strings rather than '\0' injection in kdb_read()
	kdb: Fix console handling when editing and tab-completing commands
	kdb: Merge identical case statements in kdb_read()
	kdb: Use format-specifiers rather than memset() for padding in kdb_read()
	net: fix __dst_negative_advice() race
	sparc: move struct termio to asm/termios.h
	ext4: fix mb_cache_entry's e_refcnt leak in ext4_xattr_block_cache_find()
	s390/ap: Fix crash in AP internal function modify_bitmap()
	nfs: fix undefined behavior in nfs_block_bits()
	NFS: Fix READ_PLUS when server doesn't support OP_READ_PLUS
	scsi: ufs: ufs-qcom: Clear qunipro_g4_sel for HW major version > 5
	f2fs: compress: fix compression chksum
	RDMA/hns: Use mutex instead of spinlock for ida allocation
	RDMA/hns: Fix CQ and QP cache affinity
	Linux 5.10.219

Change-Id: I0e21ff44d28df2a2802a9fb35f0959bb5ab528fc
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-07-10 14:41:23 +00:00
commit fedef46c69
293 changed files with 2403 additions and 1678 deletions

View File

@ -20,6 +20,11 @@ Optional properties:
a GPIO spec for the external headphone detect pin. If jd-mode = 0, a GPIO spec for the external headphone detect pin. If jd-mode = 0,
we will get the JD status by getting the value of hp-detect-gpios. we will get the JD status by getting the value of hp-detect-gpios.
- cbj-sleeve-gpios:
a GPIO spec to control the external combo jack circuit to tie the sleeve/ring2
contacts to the ground or floating. It could avoid some electric noise from the
active speaker jacks.
- realtek,in2-differential - realtek,in2-differential
Boolean. Indicate MIC2 input are differential, rather than single-ended. Boolean. Indicate MIC2 input are differential, rather than single-ended.
@ -68,6 +73,7 @@ codec: rt5650@1a {
compatible = "realtek,rt5650"; compatible = "realtek,rt5650";
reg = <0x1a>; reg = <0x1a>;
hp-detect-gpios = <&gpio 19 0>; hp-detect-gpios = <&gpio 19 0>;
cbj-sleeve-gpios = <&gpio 20 0>;
interrupt-parent = <&gpio>; interrupt-parent = <&gpio>;
interrupts = <7 IRQ_TYPE_EDGE_FALLING>; interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
realtek,dmic-en = "true"; realtek,dmic-en = "true";

View File

@ -4,11 +4,11 @@ FPGA Bridge
API to implement a new FPGA bridge API to implement a new FPGA bridge
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* struct fpga_bridge The FPGA Bridge structure * struct fpga_bridge - The FPGA Bridge structure
* struct fpga_bridge_ops Low level Bridge driver ops * struct fpga_bridge_ops - Low level Bridge driver ops
* devm_fpga_bridge_create() Allocate and init a bridge struct * devm_fpga_bridge_create() - Allocate and init a bridge struct
* fpga_bridge_register() Register a bridge * fpga_bridge_register() - Register a bridge
* fpga_bridge_unregister() Unregister a bridge * fpga_bridge_unregister() - Unregister a bridge
.. kernel-doc:: include/linux/fpga/fpga-bridge.h .. kernel-doc:: include/linux/fpga/fpga-bridge.h
:functions: fpga_bridge :functions: fpga_bridge

View File

@ -101,12 +101,12 @@ in state.
API for implementing a new FPGA Manager driver API for implementing a new FPGA Manager driver
---------------------------------------------- ----------------------------------------------
* ``fpga_mgr_states`` Values for :c:expr:`fpga_manager->state`. * ``fpga_mgr_states`` - Values for :c:expr:`fpga_manager->state`.
* struct fpga_manager the FPGA manager struct * struct fpga_manager - the FPGA manager struct
* struct fpga_manager_ops Low level FPGA manager driver ops * struct fpga_manager_ops - Low level FPGA manager driver ops
* devm_fpga_mgr_create() Allocate and init a manager struct * devm_fpga_mgr_create() - Allocate and init a manager struct
* fpga_mgr_register() Register an FPGA manager * fpga_mgr_register() - Register an FPGA manager
* fpga_mgr_unregister() Unregister an FPGA manager * fpga_mgr_unregister() - Unregister an FPGA manager
.. kernel-doc:: include/linux/fpga/fpga-mgr.h .. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_mgr_states :functions: fpga_mgr_states

View File

@ -84,10 +84,10 @@ will generate that list. Here's some sample code of what to do next::
API for programming an FPGA API for programming an FPGA
--------------------------- ---------------------------
* fpga_region_program_fpga() Program an FPGA * fpga_region_program_fpga() - Program an FPGA
* fpga_image_info() Specifies what FPGA image to program * fpga_image_info() - Specifies what FPGA image to program
* fpga_image_info_alloc() Allocate an FPGA image info struct * fpga_image_info_alloc() - Allocate an FPGA image info struct
* fpga_image_info_free() Free an FPGA image info struct * fpga_image_info_free() - Free an FPGA image info struct
.. kernel-doc:: drivers/fpga/fpga-region.c .. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_program_fpga :functions: fpga_region_program_fpga

View File

@ -45,19 +45,25 @@ An example of usage can be seen in the probe function of [#f2]_.
API to add a new FPGA region API to add a new FPGA region
---------------------------- ----------------------------
* struct fpga_region — The FPGA region struct * struct fpga_region - The FPGA region struct
* devm_fpga_region_create() — Allocate and init a region struct * struct fpga_region_info - Parameter structure for __fpga_region_register_full()
* fpga_region_register() — Register an FPGA region * __fpga_region_register_full() - Create and register an FPGA region using the
* fpga_region_unregister() — Unregister an FPGA region fpga_region_info structure to provide the full flexibility of options
* __fpga_region_register() - Create and register an FPGA region using standard
arguments
* fpga_region_unregister() - Unregister an FPGA region
Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()``
automatically set the module that registers the FPGA region as the owner.
The FPGA region's probe function will need to get a reference to the FPGA The FPGA region's probe function will need to get a reference to the FPGA
Manager it will be using to do the programming. This usually would happen Manager it will be using to do the programming. This usually would happen
during the region's probe function. during the region's probe function.
* fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count * fpga_mgr_get() - Get a reference to an FPGA manager, raise ref count
* of_fpga_mgr_get() Get a reference to an FPGA manager, raise ref count, * of_fpga_mgr_get() - Get a reference to an FPGA manager, raise ref count,
given a device node. given a device node.
* fpga_mgr_put() Put an FPGA manager * fpga_mgr_put() - Put an FPGA manager
The FPGA region will need to specify which bridges to control while programming The FPGA region will need to specify which bridges to control while programming
the FPGA. The region driver can build a list of bridges during probe time the FPGA. The region driver can build a list of bridges during probe time
@ -66,20 +72,23 @@ the list of bridges to program just before programming
(:c:expr:`fpga_region->get_bridges`). The FPGA bridge framework supplies the (:c:expr:`fpga_region->get_bridges`). The FPGA bridge framework supplies the
following APIs to handle building or tearing down that list. following APIs to handle building or tearing down that list.
* fpga_bridge_get_to_list() Get a ref of an FPGA bridge, add it to a * fpga_bridge_get_to_list() - Get a ref of an FPGA bridge, add it to a
list list
* of_fpga_bridge_get_to_list() Get a ref of an FPGA bridge, add it to a * of_fpga_bridge_get_to_list() - Get a ref of an FPGA bridge, add it to a
list, given a device node list, given a device node
* fpga_bridges_put() Given a list of bridges, put them * fpga_bridges_put() - Given a list of bridges, put them
.. kernel-doc:: include/linux/fpga/fpga-region.h .. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: fpga_region :functions: fpga_region
.. kernel-doc:: drivers/fpga/fpga-region.c .. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: devm_fpga_region_create :functions: fpga_region_info
.. kernel-doc:: drivers/fpga/fpga-region.c .. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_register :functions: __fpga_region_register_full
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: __fpga_region_register
.. kernel-doc:: drivers/fpga/fpga-region.c .. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_unregister :functions: fpga_region_unregister

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 218 SUBLEVEL = 219
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -58,7 +58,7 @@ cpu@3 {
gic: interrupt-controller@f1001000 { gic: interrupt-controller@f1001000 {
compatible = "arm,gic-400"; compatible = "arm,gic-400";
reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */ reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */
<0x0 0xf1002000 0x0 0x100>; /* GICC */ <0x0 0xf1002000 0x0 0x2000>; /* GICC */
#address-cells = <0>; #address-cells = <0>;
#interrupt-cells = <3>; #interrupt-cells = <3>;
interrupt-controller; interrupt-controller;

View File

@ -9,8 +9,8 @@ / {
compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124"; compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
aliases { aliases {
rtc0 = "/i2c@7000d000/as3722@40"; rtc0 = &as3722;
rtc1 = "/rtc@7000e000"; rtc1 = &tegra_rtc;
serial0 = &uarta; serial0 = &uarta;
}; };

View File

@ -573,7 +573,7 @@ spi@7000de00 {
status = "disabled"; status = "disabled";
}; };
rtc@7000e000 { tegra_rtc: rtc@7000e000 {
compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc"; compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
reg = <0x0 0x7000e000 0x0 0x100>; reg = <0x0 0x7000e000 0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -60,7 +60,7 @@ bluetooth {
vddrf-supply = <&vreg_l1_1p3>; vddrf-supply = <&vreg_l1_1p3>;
vddch0-supply = <&vdd_ch0_3p3>; vddch0-supply = <&vdd_ch0_3p3>;
local-bd-address = [ 02 00 00 00 5a ad ]; local-bd-address = [ 00 00 00 00 00 00 ];
max-speed = <3200000>; max-speed = <3200000>;
}; };

View File

@ -28,6 +28,7 @@
14470: .long 14471f - 14470b; \ 14470: .long 14471f - 14470b; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \ .short flags; \
.align 2; \
.popsection; \ .popsection; \
14471: 14471:
#else #else

View File

@ -233,6 +233,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND: case PSR_AA32_MODE_UND:
case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL; return -EINVAL;
break; break;

View File

@ -432,7 +432,9 @@ resume:
movec %a0,%dfc movec %a0,%dfc
/* restore status register */ /* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr movew %a1@(TASK_THREAD+THREAD_SR),%d0
oriw #0x0700,%d0
movew %d0,%sr
rts rts

View File

@ -452,30 +452,18 @@ void mac_poweroff(void)
void mac_reset(void) void mac_reset(void)
{ {
if (macintosh_config->adb_type == MAC_ADB_II &&
macintosh_config->ident != MAC_MODEL_SE30) {
/* need ROMBASE in booter */
/* indeed, plus need to MAP THE ROM !! */
if (mac_bi_data.rombase == 0)
mac_bi_data.rombase = 0x40800000;
/* works on some */
rom_reset = (void *) (mac_bi_data.rombase + 0xa);
local_irq_disable();
rom_reset();
#ifdef CONFIG_ADB_CUDA #ifdef CONFIG_ADB_CUDA
} else if (macintosh_config->adb_type == MAC_ADB_EGRET || if (macintosh_config->adb_type == MAC_ADB_EGRET ||
macintosh_config->adb_type == MAC_ADB_CUDA) { macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_restart(); cuda_restart();
} else
#endif #endif
#ifdef CONFIG_ADB_PMU #ifdef CONFIG_ADB_PMU
} else if (macintosh_config->adb_type == MAC_ADB_PB2) { if (macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_restart(); pmu_restart();
} else
#endif #endif
} else if (CPU_IS_030) { if (CPU_IS_030) {
/* 030-specific reset routine. The idea is general, but the /* 030-specific reset routine. The idea is general, but the
* specific registers to reset are '030-specific. Until I * specific registers to reset are '030-specific. Until I
* have a non-030 machine, I can't test anything else. * have a non-030 machine, I can't test anything else.
@ -523,6 +511,18 @@ void mac_reset(void)
"jmp %/a0@\n\t" /* jump to the reset vector */ "jmp %/a0@\n\t" /* jump to the reset vector */
".chip 68k" ".chip 68k"
: : "r" (offset), "a" (rombase) : "a0"); : : "r" (offset), "a" (rombase) : "a0");
} else {
/* need ROMBASE in booter */
/* indeed, plus need to MAP THE ROM !! */
if (mac_bi_data.rombase == 0)
mac_bi_data.rombase = 0x40800000;
/* works on some */
rom_reset = (void *)(mac_bi_data.rombase + 0xa);
local_irq_disable();
rom_reset();
} }
/* should never get here */ /* should never get here */

View File

@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code and low level code # Do not trace early boot code and low level code
CFLAGS_REMOVE_timer.o = -pg CFLAGS_REMOVE_timer.o = -pg
CFLAGS_REMOVE_intc.o = -pg CFLAGS_REMOVE_intc.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_process.o = -pg CFLAGS_REMOVE_process.o = -pg
endif endif

View File

@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
#define err_printk(x) \ #define err_printk(x) \
early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n"); pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{ {

View File

@ -21,6 +21,7 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h> #include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32); EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP

View File

@ -494,7 +494,7 @@ struct hvcall_mpp_data {
unsigned long backing_mem; unsigned long backing_mem;
}; };
int h_get_mpp(struct hvcall_mpp_data *); long h_get_mpp(struct hvcall_mpp_data *mpp_data);
struct hvcall_mpp_x_data { struct hvcall_mpp_x_data {
unsigned long coalesced_bytes; unsigned long coalesced_bytes;

View File

@ -1883,10 +1883,10 @@ void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
* h_get_mpp * h_get_mpp
* H_GET_MPP hcall returns info in 7 parms * H_GET_MPP hcall returns info in 7 parms
*/ */
int h_get_mpp(struct hvcall_mpp_data *mpp_data) long h_get_mpp(struct hvcall_mpp_data *mpp_data)
{ {
int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; long rc;
rc = plpar_hcall9(H_GET_MPP, retbuf); rc = plpar_hcall9(H_GET_MPP, retbuf);

View File

@ -112,8 +112,8 @@ struct hvcall_ppp_data {
*/ */
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{ {
unsigned long rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; long rc;
rc = plpar_hcall9(H_GET_PPP, retbuf); rc = plpar_hcall9(H_GET_PPP, retbuf);
@ -192,7 +192,7 @@ static void parse_ppp_data(struct seq_file *m)
struct hvcall_ppp_data ppp_data; struct hvcall_ppp_data ppp_data;
struct device_node *root; struct device_node *root;
const __be32 *perf_level; const __be32 *perf_level;
int rc; long rc;
rc = h_get_ppp(&ppp_data); rc = h_get_ppp(&ppp_data);
if (rc) if (rc)

View File

@ -573,10 +573,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
.msiir_offset = 0x38, .msiir_offset = 0x38,
}; };
#ifdef CONFIG_EPAPR_PARAVIRT
static const struct fsl_msi_feature vmpic_msi_feature = { static const struct fsl_msi_feature vmpic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_VMPIC, .fsl_pic_ip = FSL_PIC_IP_VMPIC,
.msiir_offset = 0, .msiir_offset = 0,
}; };
#endif
static const struct of_device_id fsl_of_msi_ids[] = { static const struct of_device_id fsl_of_msi_ids[] = {
{ {

View File

@ -832,8 +832,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
scpdata_len += padding; scpdata_len += padding;
} }
reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len; reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len;
reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len; reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len;
reipl_block_nvme->nvme.scp_data_len = scpdata_len; reipl_block_nvme->nvme.scp_data_len = scpdata_len;
return count; return count;
@ -1602,9 +1602,9 @@ static int __init dump_nvme_init(void)
} }
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN; dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION; dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN; dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
dump_block_nvme->fcp.pbt = IPL_PBT_NVME; dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP; dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME; dump_capabilities |= DUMP_TYPE_NVME;
return 0; return 0;
} }

View File

@ -44,17 +44,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (OPCODE_RTE(opcode)) if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */ return -EFAULT; /* Bad breakpoint */
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = opcode; p->opcode = opcode;
return 0; return 0;
} }
void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)
{ {
*p->addr = BREAKPOINT_INSTRUCTION; *p->addr = BREAKPOINT_INSTRUCTION;

View File

@ -33,7 +33,8 @@
*/ */
/* /*
* asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum); * unsigned int csum_partial(const unsigned char *buf, int len,
* unsigned int sum);
*/ */
.text .text
@ -45,31 +46,11 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte * Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
mov r5, r1
mov r4, r0 mov r4, r0
tst #3, r0 ! Check alignment. tst #2, r0 ! Check alignment.
bt/s 2f ! Jump if alignment is ok. bt 2f ! Jump if alignment is ok.
mov r4, r7 ! Keep a copy to check for alignment
! !
tst #1, r0 ! Check alignment.
bt 21f ! Jump if alignment is boundary of 2bytes.
! buf is odd
tst r5, r5
add #-1, r5
bt 9f
mov.b @r4+, r0
extu.b r0, r0
addc r0, r6 ! t=0 from previous tst
mov r6, r0
shll8 r6
shlr16 r0
shlr8 r0
or r0, r6
mov r4, r0
tst #2, r0
bt 2f
21:
! buf is 2 byte aligned (len could be 0)
add #-2, r5 ! Alignment uses up two bytes. add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 ! cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes. bt/s 1f ! Jump if we had at least two bytes.
@ -77,17 +58,16 @@ ENTRY(csum_partial)
bra 6f bra 6f
add #2, r5 ! r5 was < 2. Deal with it. add #2, r5 ! r5 was < 2. Deal with it.
1: 1:
mov r5, r1 ! Save new len for later use.
mov.w @r4+, r0 mov.w @r4+, r0
extu.w r0, r0 extu.w r0, r0
addc r0, r6 addc r0, r6
bf 2f bf 2f
add #1, r6 add #1, r6
2: 2:
! buf is 4 byte aligned (len could be 0)
mov r5, r1
mov #-5, r0 mov #-5, r0
shld r0, r1 shld r0, r5
tst r1, r1 tst r5, r5
bt/s 4f ! if it's =0, go to 4f bt/s 4f ! if it's =0, go to 4f
clrt clrt
.align 2 .align 2
@ -109,31 +89,30 @@ ENTRY(csum_partial)
addc r0, r6 addc r0, r6
addc r2, r6 addc r2, r6
movt r0 movt r0
dt r1 dt r5
bf/s 3b bf/s 3b
cmp/eq #1, r0 cmp/eq #1, r0
! here, we know r1==0 ! here, we know r5==0
addc r1, r6 ! add carry to r6 addc r5, r6 ! add carry to r6
4: 4:
mov r5, r0 mov r1, r0
and #0x1c, r0 and #0x1c, r0
tst r0, r0 tst r0, r0
bt 6f bt/s 6f
! 4 bytes or more remaining mov r0, r5
mov r0, r1 shlr2 r5
shlr2 r1
mov #0, r2 mov #0, r2
5: 5:
addc r2, r6 addc r2, r6
mov.l @r4+, r2 mov.l @r4+, r2
movt r0 movt r0
dt r1 dt r5
bf/s 5b bf/s 5b
cmp/eq #1, r0 cmp/eq #1, r0
addc r2, r6 addc r2, r6
addc r1, r6 ! r1==0 here, so it means add carry-bit addc r5, r6 ! r5==0 here, so it means add carry-bit
6: 6:
! 3 bytes or less remaining mov r1, r5
mov #3, r0 mov #3, r0
and r0, r5 and r0, r5
tst r5, r5 tst r5, r5
@ -159,16 +138,6 @@ ENTRY(csum_partial)
mov #0, r0 mov #0, r0
addc r0, r6 addc r0, r6
9: 9:
! Check if the buffer was misaligned, if so realign sum
mov r7, r0
tst #1, r0
bt 10f
mov r6, r0
shll8 r6
shlr16 r0
shlr8 r0
or r0, r6
10:
rts rts
mov r6, r0 mov r6, r0

View File

@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
int hard_smp_processor_id(void); int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
void smp_fill_in_cpu_possible_map(void);
void smp_fill_in_sib_core_maps(void); void smp_fill_in_sib_core_maps(void);
void cpu_play_dead(void); void cpu_play_dead(void);
@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
#define smp_fill_in_sib_core_maps() do { } while (0) #define smp_fill_in_sib_core_maps() do { } while (0)
#define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0) #define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0) #define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0) #define scheduler_poke() do { } while (0)

View File

@ -13,16 +13,6 @@ typedef unsigned int tcflag_t;
typedef unsigned long tcflag_t; typedef unsigned long tcflag_t;
#endif #endif
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
#define NCCS 17 #define NCCS 17
struct termios { struct termios {
tcflag_t c_iflag; /* input mode flags */ tcflag_t c_iflag; /* input mode flags */

View File

@ -40,5 +40,14 @@ struct winsize {
unsigned short ws_ypixel; unsigned short ws_ypixel;
}; };
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
#endif /* _UAPI_SPARC_TERMIOS_H */ #endif /* _UAPI_SPARC_TERMIOS_H */

View File

@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
ncpus_probed++; ncpus_probed++;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
set_cpu_present(cpuid, true); set_cpu_present(cpuid, true);
set_cpu_possible(cpuid, true);
if (num_possible_cpus() < nr_cpu_ids)
set_cpu_possible(cpuid, true);
#endif #endif
return NULL; return NULL;
} }

View File

@ -688,7 +688,6 @@ void __init setup_arch(char **cmdline_p)
paging_init(); paging_init();
init_sparc64_elf_hwcap(); init_sparc64_elf_hwcap();
smp_fill_in_cpu_possible_map();
/* /*
* Once the OF device tree and MDESC have been setup and nr_cpus has * Once the OF device tree and MDESC have been setup and nr_cpus has
* been parsed, we know the list of possible cpus. Therefore we can * been parsed, we know the list of possible cpus. Therefore we can

View File

@ -1210,20 +1210,6 @@ void __init smp_setup_processor_id(void)
xcall_deliver_impl = hypervisor_xcall_deliver; xcall_deliver_impl = hypervisor_xcall_deliver;
} }
void __init smp_fill_in_cpu_possible_map(void)
{
int possible_cpus = num_possible_cpus();
int i;
if (possible_cpus > nr_cpu_ids)
possible_cpus = nr_cpu_ids;
for (i = 0; i < possible_cpus; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
}
void smp_fill_in_sib_core_maps(void) void smp_fill_in_sib_core_maps(void)
{ {
unsigned int i; unsigned int i;

View File

@ -668,24 +668,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
goto cleanup; goto cleanup;
} }
*winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), *winch = ((struct winch) { .fd = fd,
.fd = fd,
.tty_fd = tty_fd, .tty_fd = tty_fd,
.pid = pid, .pid = pid,
.port = port, .port = port,
.stack = stack }); .stack = stack });
spin_lock(&winch_handler_lock);
list_add(&winch->list, &winch_handlers);
spin_unlock(&winch_handler_lock);
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
IRQF_SHARED, "winch", winch) < 0) { IRQF_SHARED, "winch", winch) < 0) {
printk(KERN_ERR "register_winch_irq - failed to register " printk(KERN_ERR "register_winch_irq - failed to register "
"IRQ\n"); "IRQ\n");
spin_lock(&winch_handler_lock);
list_del(&winch->list);
spin_unlock(&winch_handler_lock);
goto out_free; goto out_free;
} }
spin_lock(&winch_handler_lock);
list_add(&winch->list, &winch_handlers);
spin_unlock(&winch_handler_lock);
return; return;
out_free: out_free:

View File

@ -1158,7 +1158,7 @@ static int __init ubd_init(void)
if (irq_req_buffer == NULL) { if (irq_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n"); printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1; return -ENOMEM;
} }
io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *), sizeof(struct io_thread_req *),
@ -1169,7 +1169,7 @@ static int __init ubd_init(void)
if (io_req_buffer == NULL) { if (io_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n"); printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1; return -ENOMEM;
} }
platform_driver_register(&ubd_driver); platform_driver_register(&ubd_driver);
mutex_lock(&ubd_lock); mutex_lock(&ubd_lock);

View File

@ -142,7 +142,7 @@ static bool get_bpf_flash(struct arglist *def)
if (allow != NULL) { if (allow != NULL) {
if (kstrtoul(allow, 10, &result) == 0) if (kstrtoul(allow, 10, &result) == 0)
return (allow > 0); return result > 0;
} }
return false; return false;
} }

View File

@ -15,8 +15,6 @@ typedef struct mm_context {
struct page *stub_pages[2]; struct page *stub_pages[2];
} mm_context_t; } mm_context_t;
extern void __switch_mm(struct mm_id * mm_idp);
/* Avoid tangled inclusion with asm/ldt.h */ /* Avoid tangled inclusion with asm/ldt.h */
extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm); extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
extern void free_ldt(struct mm_context *mm); extern void free_ldt(struct mm_context *mm);

View File

@ -15,4 +15,6 @@ struct mm_id {
int kill; int kill;
}; };
void __switch_mm(struct mm_id *mm_idp);
#endif #endif

View File

@ -258,6 +258,7 @@ config UNWINDER_ORC
config UNWINDER_FRAME_POINTER config UNWINDER_FRAME_POINTER
bool "Frame pointer unwinder" bool "Frame pointer unwinder"
select ARCH_WANT_FRAME_POINTERS
select FRAME_POINTER select FRAME_POINTER
help help
This option enables the frame pointer unwinder for unwinding kernel This option enables the frame pointer unwinder for unwinding kernel
@ -281,7 +282,3 @@ config UNWINDER_GUESS
overhead. overhead.
endchoice endchoice
config FRAME_POINTER
depends on !UNWINDER_ORC && !UNWINDER_GUESS
bool

View File

@ -153,5 +153,6 @@ SYM_FUNC_START(nh_avx2)
vpaddq T1, T0, T0 vpaddq T1, T0, T0
vpaddq T4, T0, T0 vpaddq T4, T0, T0
vmovdqu T0, (HASH) vmovdqu T0, (HASH)
vzeroupper
RET RET
SYM_FUNC_END(nh_avx2) SYM_FUNC_END(nh_avx2)

View File

@ -711,6 +711,7 @@ done_hash:
popq %r13 popq %r13
popq %r12 popq %r12
popq %rbx popq %rbx
vzeroupper
RET RET
SYM_FUNC_END(sha256_transform_rorx) SYM_FUNC_END(sha256_transform_rorx)

View File

@ -98,11 +98,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
static bool write_ok_or_segv(unsigned long ptr, size_t size) static bool write_ok_or_segv(unsigned long ptr, size_t size)
{ {
/*
* XXX: if access_ok, get_user, and put_user handled
* sig_on_uaccess_err, this could go away.
*/
if (!access_ok((void __user *)ptr, size)) { if (!access_ok((void __user *)ptr, size)) {
struct thread_struct *thread = &current->thread; struct thread_struct *thread = &current->thread;
@ -120,10 +115,8 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
bool emulate_vsyscall(unsigned long error_code, bool emulate_vsyscall(unsigned long error_code,
struct pt_regs *regs, unsigned long address) struct pt_regs *regs, unsigned long address)
{ {
struct task_struct *tsk;
unsigned long caller; unsigned long caller;
int vsyscall_nr, syscall_nr, tmp; int vsyscall_nr, syscall_nr, tmp;
int prev_sig_on_uaccess_err;
long ret; long ret;
unsigned long orig_dx; unsigned long orig_dx;
@ -172,8 +165,6 @@ bool emulate_vsyscall(unsigned long error_code,
goto sigsegv; goto sigsegv;
} }
tsk = current;
/* /*
* Check for access_ok violations and find the syscall nr. * Check for access_ok violations and find the syscall nr.
* *
@ -233,12 +224,8 @@ bool emulate_vsyscall(unsigned long error_code,
goto do_ret; /* skip requested */ goto do_ret; /* skip requested */
/* /*
* With a real vsyscall, page faults cause SIGSEGV. We want to * With a real vsyscall, page faults cause SIGSEGV.
* preserve that behavior to make writing exploits harder.
*/ */
prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
current->thread.sig_on_uaccess_err = 1;
ret = -EFAULT; ret = -EFAULT;
switch (vsyscall_nr) { switch (vsyscall_nr) {
case 0: case 0:
@ -261,23 +248,12 @@ bool emulate_vsyscall(unsigned long error_code,
break; break;
} }
current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
check_fault: check_fault:
if (ret == -EFAULT) { if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */ /* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs, warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall fault (exploit attempt?)"); "vsyscall fault (exploit attempt?)");
goto sigsegv;
/*
* If we failed to generate a signal for any reason,
* generate one here. (This should be impossible.)
*/
if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
!sigismember(&tsk->pending.signal, SIGSEGV)))
goto sigsegv;
return true; /* Don't emulate the ret. */
} }
regs->ax = ret; regs->ax = ret;

View File

@ -528,7 +528,6 @@ struct thread_struct {
unsigned long iopl_emul; unsigned long iopl_emul;
unsigned int iopl_warn:1; unsigned int iopl_warn:1;
unsigned int sig_on_uaccess_err:1;
/* Floating point and extended processor state */ /* Floating point and extended processor state */
struct fpu fpu; struct fpu fpu;

View File

@ -920,7 +920,8 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd)
hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
} else { } else {
apicd->prev_vector = 0; pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
free_moved_vector(apicd);
} }
raw_spin_unlock(&vector_lock); raw_spin_unlock(&vector_lock);
} }
@ -957,6 +958,7 @@ void irq_complete_move(struct irq_cfg *cfg)
*/ */
void irq_force_complete_move(struct irq_desc *desc) void irq_force_complete_move(struct irq_desc *desc)
{ {
unsigned int cpu = smp_processor_id();
struct apic_chip_data *apicd; struct apic_chip_data *apicd;
struct irq_data *irqd; struct irq_data *irqd;
unsigned int vector; unsigned int vector;
@ -981,10 +983,11 @@ void irq_force_complete_move(struct irq_desc *desc)
goto unlock; goto unlock;
/* /*
* If prev_vector is empty, no action required. * If prev_vector is empty or the descriptor is neither currently
* nor previously on the outgoing CPU no action required.
*/ */
vector = apicd->prev_vector; vector = apicd->prev_vector;
if (!vector) if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
goto unlock; goto unlock;
/* /*

View File

@ -192,11 +192,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
cur->warned = false; cur->warned = false;
/* /*
* If a non-zero TSC value for socket 0 may be valid then the default * The default adjust value cannot be assumed to be zero on any socket.
* adjusted value cannot assumed to be zero either.
*/ */
if (tsc_async_resets) cur->adjusted = bootval;
cur->adjusted = bootval;
/* /*
* Check whether this CPU is the first in a package to come up. In * Check whether this CPU is the first in a package to come up. In

View File

@ -148,7 +148,7 @@ AVXcode:
65: SEG=GS (Prefix) 65: SEG=GS (Prefix)
66: Operand-Size (Prefix) 66: Operand-Size (Prefix)
67: Address-Size (Prefix) 67: Address-Size (Prefix)
68: PUSH Iz (d64) 68: PUSH Iz
69: IMUL Gv,Ev,Iz 69: IMUL Gv,Ev,Iz
6a: PUSH Ib (d64) 6a: PUSH Ib (d64)
6b: IMUL Gv,Ev,Ib 6b: IMUL Gv,Ev,Ib

View File

@ -650,33 +650,8 @@ no_context(struct pt_regs *regs, unsigned long error_code,
} }
/* Are we prepared to handle this kernel fault? */ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
/*
* Any interrupt that takes a fault gets the fixup. This makes
* the below recursive fault logic only apply to a faults from
* task context.
*/
if (in_interrupt())
return;
/*
* Per the above we're !in_interrupt(), aka. task context.
*
* In this case we need to make sure we're not recursively
* faulting through the emulate_vsyscall() logic.
*/
if (current->thread.sig_on_uaccess_err && signal) {
set_signal_archinfo(address, error_code);
/* XXX: hwpoison faults will set the wrong code. */
force_sig_fault(signal, si_code, (void __user *)address);
}
/*
* Barring that, we can do the fixup and be happy.
*/
return; return;
}
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
/* /*

View File

@ -41,7 +41,8 @@ KCOV_INSTRUMENT := n
# make up the standalone purgatory.ro # make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel $(CC_FLAGS_CFI) PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel $(CC_FLAGS_CFI)
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0 PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
PURGATORY_CFLAGS += -fpic -fvisibility=hidden
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector PURGATORY_CFLAGS += -fno-stack-protector

View File

@ -690,6 +690,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) { if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue; continue;
} }
/*
* Do not perform relocations in .notes sections; any
* values there are meant for pre-boot consumption (e.g.
* startup_xen).
*/
if (sec_applies->shdr.sh_type == SHT_NOTE)
continue;
sh_symtab = sec_symtab->symtab; sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab; sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {

View File

@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>"); MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
MODULE_DESCRIPTION("EC-RDSA generic algorithm"); MODULE_DESCRIPTION("EC-RDSA generic algorithm");
MODULE_ALIAS_CRYPTO("ecrdsa");
MODULE_ALIAS_CRYPTO("ecrdsa-generic"); MODULE_ALIAS_CRYPTO("ecrdsa-generic");

View File

@ -576,7 +576,7 @@ static u_long get_word(struct vc_data *vc)
} }
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch; buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) { while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
tmp_pos += 2; tmp_pos += 2;
tmpx++; tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp); ch = get_char(vc, (u_short *)tmp_pos, &temp);

View File

@ -5,6 +5,7 @@
ccflags-y := -D_LINUX -DBUILDING_ACPICA ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation)
# use acpi.o to put all files here into acpi.o modparam namespace # use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o obj-y += acpi.o

View File

@ -475,6 +475,18 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
}, },
}, },
{
/* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
},
},
{
/* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
{ } { }
}; };

View File

@ -5441,7 +5441,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err; goto err;
break; break;
case BINDER_SET_MAX_THREADS: { case BINDER_SET_MAX_THREADS: {
int max_threads; u32 max_threads;
if (copy_from_user(&max_threads, ubuf, if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) { sizeof(max_threads))) {

View File

@ -114,8 +114,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static struct legacy_probe probe_list[NR_HOST]; static struct legacy_probe probe_list[NR_HOST];
static struct legacy_data legacy_data[NR_HOST]; static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST]; static struct ata_host *legacy_host[NR_HOST];
static int nr_legacy_host;
static int probe_all; /* Set to check all ISA port ranges */ static int probe_all; /* Set to check all ISA port ranges */
static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */ static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
@ -1239,9 +1237,11 @@ static __exit void legacy_exit(void)
{ {
int i; int i;
for (i = 0; i < nr_legacy_host; i++) { for (i = 0; i < NR_HOST; i++) {
struct legacy_data *ld = &legacy_data[i]; struct legacy_data *ld = &legacy_data[i];
ata_host_detach(legacy_host[i]);
if (legacy_host[i])
ata_host_detach(legacy_host[i]);
platform_device_unregister(ld->platform_dev); platform_device_unregister(ld->platform_dev);
} }
} }

View File

@ -2032,10 +2032,13 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags) if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set); blk_mq_free_tag_set(&tag_set);
mutex_destroy(&lock);
} }
module_init(null_init); module_init(null_init);
module_exit(null_exit); module_exit(null_exit);
MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
MODULE_DESCRIPTION("multi queue aware block test driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
if (!port) { if (!port) {
pr_warn("%s: no associated port!\n", name); pr_warn("%s: no associated port!\n", name);
rc = -ENXIO; rc = -ENXIO;
goto err; goto err_free_name;
}
index = ida_alloc(&ida_index, GFP_KERNEL);
if (index < 0) {
pr_warn("%s: failed to get index!\n", name);
rc = index;
goto err_put_port;
} }
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb)); memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq; ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp; ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, index); pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
parport_put_port(port);
if (!pdev) { if (!pdev) {
pr_warn("%s: failed to register device!\n", name); pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO; rc = -ENXIO;
ida_simple_remove(&ida_index, index); ida_free(&ida_index, index);
goto err; goto err_put_port;
} }
pp->pdev = pdev; pp->pdev = pdev;
pp->index = index; pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n"); dev_dbg(&pdev->dev, "registered pardevice\n");
err: err_put_port:
parport_put_port(port);
err_free_name:
kfree(name); kfree(name);
return rc; return rc;
} }
@ -750,7 +757,7 @@ static int pp_release(struct inode *inode, struct file *file)
if (pp->pdev) { if (pp->pdev) {
parport_unregister_device(pp->pdev); parport_unregister_device(pp->pdev);
ida_simple_remove(&ida_index, pp->index); ida_free(&ida_index, pp->index);
pp->pdev = NULL; pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
} }

View File

@ -2587,6 +2587,8 @@ static struct clk_hw *mmcc_msm8998_hws[] = {
static struct gdsc video_top_gdsc = { static struct gdsc video_top_gdsc = {
.gdscr = 0x1024, .gdscr = 0x1024,
.cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
.cxc_count = 3,
.pd = { .pd = {
.name = "video_top", .name = "video_top",
}, },
@ -2595,20 +2597,26 @@ static struct gdsc video_top_gdsc = {
static struct gdsc video_subcore0_gdsc = { static struct gdsc video_subcore0_gdsc = {
.gdscr = 0x1040, .gdscr = 0x1040,
.cxcs = (unsigned int []){ 0x1048 },
.cxc_count = 1,
.pd = { .pd = {
.name = "video_subcore0", .name = "video_subcore0",
}, },
.parent = &video_top_gdsc.pd, .parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON, .pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
}; };
static struct gdsc video_subcore1_gdsc = { static struct gdsc video_subcore1_gdsc = {
.gdscr = 0x1044, .gdscr = 0x1044,
.cxcs = (unsigned int []){ 0x104c },
.cxc_count = 1,
.pd = { .pd = {
.name = "video_subcore1", .name = "video_subcore1",
}, },
.parent = &video_top_gdsc.pd, .parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON, .pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
}; };
static struct gdsc mdss_gdsc = { static struct gdsc mdss_gdsc = {

View File

@ -1575,47 +1575,36 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0; return 0;
} }
static int cpufreq_offline(unsigned int cpu) static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
{ {
struct cpufreq_policy *policy;
int ret; int ret;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return 0;
}
down_write(&policy->rwsem);
if (has_target()) if (has_target())
cpufreq_stop_governor(policy); cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus); cpumask_clear_cpu(cpu, policy->cpus);
if (policy_is_inactive(policy)) {
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
} else if (cpu == policy->cpu) {
/* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus);
}
/* Start governor again for active policy */
if (!policy_is_inactive(policy)) { if (!policy_is_inactive(policy)) {
/* Nominate a new CPU if necessary. */
if (cpu == policy->cpu)
policy->cpu = cpumask_any(policy->cpus);
/* Start the governor again for the active policy. */
if (has_target()) { if (has_target()) {
ret = cpufreq_start_governor(policy); ret = cpufreq_start_governor(policy);
if (ret) if (ret)
pr_err("%s: Failed to start governor\n", __func__); pr_err("%s: Failed to start governor\n", __func__);
} }
goto unlock; return;
} }
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
if (cpufreq_thermal_control_enabled(cpufreq_driver)) { if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev); cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL; policy->cdev = NULL;
@ -1633,12 +1622,31 @@ static int cpufreq_offline(unsigned int cpu)
*/ */
if (cpufreq_driver->offline) { if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy); cpufreq_driver->offline(policy);
} else if (cpufreq_driver->exit) { return;
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
} }
unlock: if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return 0;
}
down_write(&policy->rwsem);
__cpufreq_offline(cpu, policy);
up_write(&policy->rwsem); up_write(&policy->rwsem);
return 0; return 0;
} }
@ -1656,19 +1664,26 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy) if (!policy)
return; return;
down_write(&policy->rwsem);
if (cpu_online(cpu)) if (cpu_online(cpu))
cpufreq_offline(cpu); __cpufreq_offline(cpu, policy);
cpumask_clear_cpu(cpu, policy->real_cpus); cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, dev); remove_cpu_dev_symlink(policy, dev);
if (cpumask_empty(policy->real_cpus)) { if (!cpumask_empty(policy->real_cpus)) {
/* We did light-weight exit earlier, do full tear down now */ up_write(&policy->rwsem);
if (cpufreq_driver->offline) return;
cpufreq_driver->exit(policy);
cpufreq_policy_free(policy);
} }
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
} }
/** /**

View File

@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
if (hash_iv_len) { if (hash_iv_len) {
packet_log(" Hash IV Length %u bytes\n", hash_iv_len); packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
packet_dump(" hash IV: ", ptr, hash_iv_len); packet_dump(" hash IV: ", ptr, hash_iv_len);
ptr += ciph_key_len; ptr += hash_iv_len;
} }
if (ciph_iv_len) { if (ciph_iv_len) {

View File

@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
}, },
}; };
#ifdef CONFIG_ACPI
static const struct acpi_device_id sp_acpi_match[] = { static const struct acpi_device_id sp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ }, { },
}; };
MODULE_DEVICE_TABLE(acpi, sp_acpi_match); MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id sp_of_match[] = { static const struct of_device_id sp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a", { .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&dev_vdata[0] }, .data = (const void *)&dev_vdata[0] },
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, sp_of_match); MODULE_DEVICE_TABLE(of, sp_of_match);
#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{ {
#ifdef CONFIG_OF
const struct of_device_id *match; const struct of_device_id *match;
match = of_match_node(sp_of_match, pdev->dev.of_node); match = of_match_node(sp_of_match, pdev->dev.of_node);
if (match && match->data) if (match && match->data)
return (struct sp_dev_vdata *)match->data; return (struct sp_dev_vdata *)match->data;
#endif
return NULL; return NULL;
} }
static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{ {
#ifdef CONFIG_ACPI
const struct acpi_device_id *match; const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev); match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data) if (match && match->driver_data)
return (struct sp_dev_vdata *)match->driver_data; return (struct sp_dev_vdata *)match->driver_data;
#endif
return NULL; return NULL;
} }
@ -222,12 +216,8 @@ static int sp_platform_resume(struct platform_device *pdev)
static struct platform_driver sp_platform_driver = { static struct platform_driver sp_platform_driver = {
.driver = { .driver = {
.name = "ccp", .name = "ccp",
#ifdef CONFIG_ACPI
.acpi_match_table = sp_acpi_match, .acpi_match_table = sp_acpi_match,
#endif
#ifdef CONFIG_OF
.of_match_table = sp_of_match, .of_match_table = sp_of_match,
#endif
}, },
.probe = sp_platform_probe, .probe = sp_platform_probe,
.remove = sp_platform_remove, .remove = sp_platform_remove,

View File

@ -95,8 +95,7 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */ /* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
if (reset_data->mode == ADF_DEV_RESET_ASYNC || if (reset_data->mode == ADF_DEV_RESET_ASYNC)
completion_done(&reset_data->compl))
kfree(reset_data); kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n"); WARN(1, "QAT: device restart failed. Device is unusable\n");
return; return;
@ -104,16 +103,8 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarted_notify(accel_dev); adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
/* /* The dev is back alive. Notify the caller if in sync mode */
* The dev is back alive. Notify the caller if in sync mode if (reset_data->mode == ADF_DEV_RESET_ASYNC)
*
* If device restart will take a more time than expected,
* the schedule_reset() function can timeout and exit. This can be
* detected by calling the completion_done() function. In this case
* the reset_data structure needs to be freed here.
*/
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
completion_done(&reset_data->compl))
kfree(reset_data); kfree(reset_data);
else else
complete(&reset_data->compl); complete(&reset_data->compl);
@ -148,10 +139,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
if (!timeout) { if (!timeout) {
dev_err(&GET_DEV(accel_dev), dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n"); "Reset device timeout expired\n");
cancel_work_sync(&reset_data->reset_work);
ret = -EFAULT; ret = -EFAULT;
} else {
kfree(reset_data);
} }
kfree(reset_data);
return ret; return ret;
} }
return 0; return 0;

View File

@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value); seq_printf(s, "%s: %d\n", obj->name, obj->value);
spin_lock_irq(&obj->lock); spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) { list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link); struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false); sync_print_fence(s, &pt->base, false);
} }
spin_unlock_irq(&obj->lock); spin_unlock(&obj->lock);
} }
static void sync_print_sync_file(struct seq_file *s, static void sync_print_sync_file(struct seq_file *s,

View File

@ -594,7 +594,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev; idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
if (ret)
return ret;
ret = dma_async_device_register(&idma64->dma); ret = dma_async_device_register(&idma64->dma);
if (ret) if (ret)

View File

@ -121,7 +121,8 @@ config EXTCON_MAX77843
config EXTCON_MAX8997 config EXTCON_MAX8997
tristate "Maxim MAX8997 EXTCON Support" tristate "Maxim MAX8997 EXTCON Support"
depends on MFD_MAX8997 && IRQ_DOMAIN depends on MFD_MAX8997
select IRQ_DOMAIN
help help
If you say yes here you get support for the MUIC device of If you say yes here you get support for the MUIC device of
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory

View File

@ -164,9 +164,14 @@ static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0; return 0;
} }
static void dmi_dev_release(struct device *dev)
{
kfree(dev);
}
static struct class dmi_class = { static struct class dmi_class = {
.name = "dmi", .name = "dmi",
.dev_release = (void(*)(struct device *)) kfree, .dev_release = dmi_dev_release,
.dev_uevent = dmi_dev_uevent, .dev_uevent = dmi_dev_uevent,
}; };

View File

@ -9,6 +9,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/mailbox_client.h> #include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
@ -96,8 +97,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
if (size & 3) if (size & 3)
return -EINVAL; return -EINVAL;
buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr, buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
GFP_ATOMIC); &bus_addr, GFP_ATOMIC);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
@ -125,7 +126,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
ret = -EINVAL; ret = -EINVAL;
} }
dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr); dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
return ret; return ret;
} }

View File

@ -30,6 +30,7 @@ static int fme_region_get_bridges(struct fpga_region *region)
static int fme_region_probe(struct platform_device *pdev) static int fme_region_probe(struct platform_device *pdev)
{ {
struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev); struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
struct fpga_region_info info = { 0 };
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct fpga_region *region; struct fpga_region *region;
struct fpga_manager *mgr; struct fpga_manager *mgr;
@ -39,20 +40,18 @@ static int fme_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr)) if (IS_ERR(mgr))
return -EPROBE_DEFER; return -EPROBE_DEFER;
region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges); info.mgr = mgr;
if (!region) { info.compat_id = mgr->compat_id;
ret = -ENOMEM; info.get_bridges = fme_region_get_bridges;
info.priv = pdata;
region = fpga_region_register_full(dev, &info);
if (IS_ERR(region)) {
ret = PTR_ERR(region);
goto eprobe_mgr_put; goto eprobe_mgr_put;
} }
region->priv = pdata;
region->compat_id = mgr->compat_id;
platform_set_drvdata(pdev, region); platform_set_drvdata(pdev, region);
ret = fpga_region_register(region);
if (ret)
goto eprobe_mgr_put;
dev_dbg(dev, "DFL FME FPGA Region probed\n"); dev_dbg(dev, "DFL FME FPGA Region probed\n");
return 0; return 0;

View File

@ -1400,19 +1400,15 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
if (!cdev) if (!cdev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
if (!cdev->region) {
ret = -ENOMEM;
goto free_cdev_exit;
}
cdev->parent = info->dev; cdev->parent = info->dev;
mutex_init(&cdev->lock); mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->port_dev_list); INIT_LIST_HEAD(&cdev->port_dev_list);
ret = fpga_region_register(cdev->region); cdev->region = fpga_region_register(info->dev, NULL, NULL);
if (ret) if (IS_ERR(cdev->region)) {
ret = PTR_ERR(cdev->region);
goto free_cdev_exit; goto free_cdev_exit;
}
/* create and init build info for enumeration */ /* create and init build info for enumeration */
binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);

View File

@ -33,14 +33,14 @@ struct fpga_region *fpga_region_class_find(
EXPORT_SYMBOL_GPL(fpga_region_class_find); EXPORT_SYMBOL_GPL(fpga_region_class_find);
/** /**
* fpga_region_get - get an exclusive reference to a fpga region * fpga_region_get - get an exclusive reference to an fpga region
* @region: FPGA Region struct * @region: FPGA Region struct
* *
* Caller should call fpga_region_put() when done with region. * Caller should call fpga_region_put() when done with region.
* *
* Return fpga_region struct if successful. * Return fpga_region struct if successful.
* Return -EBUSY if someone already has a reference to the region. * Return -EBUSY if someone already has a reference to the region.
* Return -ENODEV if @np is not a FPGA Region. * Return -ENODEV if @np is not an FPGA Region.
*/ */
static struct fpga_region *fpga_region_get(struct fpga_region *region) static struct fpga_region *fpga_region_get(struct fpga_region *region)
{ {
@ -52,7 +52,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
} }
get_device(dev); get_device(dev);
if (!try_module_get(dev->parent->driver->owner)) { if (!try_module_get(region->ops_owner)) {
put_device(dev); put_device(dev);
mutex_unlock(&region->mutex); mutex_unlock(&region->mutex);
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
@ -74,7 +74,7 @@ static void fpga_region_put(struct fpga_region *region)
dev_dbg(dev, "put\n"); dev_dbg(dev, "put\n");
module_put(dev->parent->driver->owner); module_put(region->ops_owner);
put_device(dev); put_device(dev);
mutex_unlock(&region->mutex); mutex_unlock(&region->mutex);
} }
@ -180,48 +180,60 @@ static struct attribute *fpga_region_attrs[] = {
ATTRIBUTE_GROUPS(fpga_region); ATTRIBUTE_GROUPS(fpga_region);
/** /**
* fpga_region_create - alloc and init a struct fpga_region * __fpga_region_register_full - create and register an FPGA Region device
* @dev: device parent * @parent: device parent
* @mgr: manager that programs this region * @info: parameters for FPGA Region
* @get_bridges: optional function to get bridges to a list * @owner: module containing the get_bridges function
* *
* The caller of this function is responsible for freeing the resulting region * Return: struct fpga_region or ERR_PTR()
* struct with fpga_region_free(). Using devm_fpga_region_create() instead is
* recommended.
*
* Return: struct fpga_region or NULL
*/ */
struct fpga_region struct fpga_region *
*fpga_region_create(struct device *dev, __fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
struct fpga_manager *mgr, struct module *owner)
int (*get_bridges)(struct fpga_region *))
{ {
struct fpga_region *region; struct fpga_region *region;
int id, ret = 0; int id, ret = 0;
if (!info) {
dev_err(parent,
"Attempt to register without required info structure\n");
return ERR_PTR(-EINVAL);
}
region = kzalloc(sizeof(*region), GFP_KERNEL); region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region) if (!region)
return NULL; return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
if (id < 0) if (id < 0) {
ret = id;
goto err_free; goto err_free;
}
region->mgr = info->mgr;
region->compat_id = info->compat_id;
region->priv = info->priv;
region->get_bridges = info->get_bridges;
region->ops_owner = owner;
region->mgr = mgr;
region->get_bridges = get_bridges;
mutex_init(&region->mutex); mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list); INIT_LIST_HEAD(&region->bridge_list);
device_initialize(&region->dev);
region->dev.class = fpga_region_class; region->dev.class = fpga_region_class;
region->dev.parent = dev; region->dev.parent = parent;
region->dev.of_node = dev->of_node; region->dev.of_node = parent->of_node;
region->dev.id = id; region->dev.id = id;
ret = dev_set_name(&region->dev, "region%d", id); ret = dev_set_name(&region->dev, "region%d", id);
if (ret) if (ret)
goto err_remove; goto err_remove;
ret = device_register(&region->dev);
if (ret) {
put_device(&region->dev);
return ERR_PTR(ret);
}
return region; return region;
err_remove: err_remove:
@ -229,84 +241,41 @@ struct fpga_region
err_free: err_free:
kfree(region); kfree(region);
return NULL; return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(fpga_region_create); EXPORT_SYMBOL_GPL(__fpga_region_register_full);
/** /**
* fpga_region_free - free a FPGA region created by fpga_region_create() * __fpga_region_register - create and register an FPGA Region device
* @region: FPGA region * @parent: device parent
*/
void fpga_region_free(struct fpga_region *region)
{
ida_simple_remove(&fpga_region_ida, region->dev.id);
kfree(region);
}
EXPORT_SYMBOL_GPL(fpga_region_free);
static void devm_fpga_region_release(struct device *dev, void *res)
{
struct fpga_region *region = *(struct fpga_region **)res;
fpga_region_free(region);
}
/**
* devm_fpga_region_create - create and initialize a managed FPGA region struct
* @dev: device parent
* @mgr: manager that programs this region * @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list * @get_bridges: optional function to get bridges to a list
* @owner: module containing the get_bridges function
* *
* This function is intended for use in a FPGA region driver's probe function. * This simple version of the register function should be sufficient for most users.
* After the region driver creates the region struct with * The fpga_region_register_full() function is available for users that need to
* devm_fpga_region_create(), it should register it with fpga_region_register(). * pass additional, optional parameters.
* The region driver's remove function should call fpga_region_unregister().
* The region struct allocated with this function will be freed automatically on
* driver detach. This includes the case of a probe function returning error
* before calling fpga_region_register(), the struct will still get cleaned up.
* *
* Return: struct fpga_region or NULL * Return: struct fpga_region or ERR_PTR()
*/ */
struct fpga_region struct fpga_region *
*devm_fpga_region_create(struct device *dev, __fpga_region_register(struct device *parent, struct fpga_manager *mgr,
struct fpga_manager *mgr, int (*get_bridges)(struct fpga_region *), struct module *owner)
int (*get_bridges)(struct fpga_region *))
{ {
struct fpga_region **ptr, *region; struct fpga_region_info info = { 0 };
ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL); info.mgr = mgr;
if (!ptr) info.get_bridges = get_bridges;
return NULL;
region = fpga_region_create(dev, mgr, get_bridges); return __fpga_region_register_full(parent, &info, owner);
if (!region) {
devres_free(ptr);
} else {
*ptr = region;
devres_add(dev, ptr);
}
return region;
} }
EXPORT_SYMBOL_GPL(devm_fpga_region_create); EXPORT_SYMBOL_GPL(__fpga_region_register);
/** /**
* fpga_region_register - register a FPGA region * fpga_region_unregister - unregister an FPGA region
* @region: FPGA region * @region: FPGA region
* *
* Return: 0 or -errno * This function is intended for use in an FPGA region driver's remove function.
*/
int fpga_region_register(struct fpga_region *region)
{
return device_add(&region->dev);
}
EXPORT_SYMBOL_GPL(fpga_region_register);
/**
* fpga_region_unregister - unregister a FPGA region
* @region: FPGA region
*
* This function is intended for use in a FPGA region driver's remove function.
*/ */
void fpga_region_unregister(struct fpga_region *region) void fpga_region_unregister(struct fpga_region *region)
{ {
@ -316,6 +285,10 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev) static void fpga_region_dev_release(struct device *dev)
{ {
struct fpga_region *region = to_fpga_region(dev);
ida_simple_remove(&fpga_region_ida, region->dev.id);
kfree(region);
} }
/** /**

View File

@ -405,16 +405,12 @@ static int of_fpga_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr)) if (IS_ERR(mgr))
return -EPROBE_DEFER; return -EPROBE_DEFER;
region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges); region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges);
if (!region) { if (IS_ERR(region)) {
ret = -ENOMEM; ret = PTR_ERR(region);
goto eprobe_mgr_put; goto eprobe_mgr_put;
} }
ret = fpga_region_register(region);
if (ret)
goto eprobe_mgr_put;
of_platform_populate(np, fpga_region_of_match, NULL, &region->dev); of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
platform_set_drvdata(pdev, region); platform_set_drvdata(pdev, region);

View File

@ -2073,6 +2073,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: SDMA trap\n"); DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id); instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
return instance;
switch (entry->ring_id) { switch (entry->ring_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring); amdgpu_fence_process(&adev->sdma.instance[instance].ring);

View File

@ -766,6 +766,14 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (process) { if (process) {
pr_debug("Process already found\n"); pr_debug("Process already found\n");
} else { } else {
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
process = create_process(thread); process = create_process(thread);
if (IS_ERR(process)) if (IS_ERR(process))
goto out; goto out;

View File

@ -2099,6 +2099,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream); dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL; dm_new_crtc_state->stream = NULL;
} }
dm_new_crtc_state->base.color_mgmt_changed = true;
} }
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {

View File

@ -379,6 +379,11 @@ bool cm_helper_translate_curve_to_hw_format(
i += increment) { i += increment) {
if (j == hw_points - 1) if (j == hw_points - 1)
break; break;
if (i >= TRANSFER_FUNC_POINTS) {
DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
i, TRANSFER_FUNC_POINTS);
return false;
}
rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i];

View File

@ -70,7 +70,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
__drm_atomic_helper_connector_destroy_state(connector->state); __drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state); kfree(connector->state);
__drm_atomic_helper_connector_reset(connector, &mw_state->base); connector->state = NULL;
if (mw_state)
__drm_atomic_helper_connector_reset(connector, &mw_state->base);
} }
static enum drm_connector_status static enum drm_connector_status

View File

@ -1978,6 +1978,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state); mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
if (!mhdp_state->current_mode)
return;
drm_mode_set_name(mhdp_state->current_mode); drm_mode_set_name(mhdp_state->current_mode);
dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);

View File

@ -766,10 +766,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
int ret; int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node); host = of_find_mipi_dsi_host_by_node(dsi_node);
if (!host) { if (!host)
dev_err(lt9611->dev, "failed to find dsi host\n"); return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
return ERR_PTR(-EPROBE_DEFER);
}
dsi = mipi_dsi_device_register_full(host, &info); dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) { if (IS_ERR(dsi)) {

View File

@ -453,10 +453,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
dev_dbg(tc->dev, "bus_formats %04x bpc %d\n", dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
connector->display_info.bus_formats[0], connector->display_info.bus_formats[0],
tc->bpc); tc->bpc);
/*
* Default hardware register settings of tc358775 configured
* with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
*/
if (connector->display_info.bus_formats[0] == if (connector->display_info.bus_formats[0] ==
MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
/* VESA-24 */ /* VESA-24 */
@ -467,14 +463,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2)); d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6)); d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
} else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */ } else {
d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3)); /* JEIDA-18 and JEIDA-24 */
d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0)); d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0)); d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0)); d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2)); d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0)); d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
} }
d2l_write(tc->i2c, VFUEN, VFUEN_EN); d2l_write(tc->i2c, VFUEN, VFUEN_EN);
@ -605,10 +602,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
}; };
host = of_find_mipi_dsi_host_by_node(tc->host_node); host = of_find_mipi_dsi_host_by_node(tc->host_node);
if (!host) { if (!host)
dev_err(dev, "failed to find dsi host\n"); return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
return -EPROBE_DEFER;
}
dsi = mipi_dsi_device_register_full(host, &info); dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) { if (IS_ERR(dsi)) {

View File

@ -560,7 +560,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
* *
* Return: 0 on success or a negative error code on failure. * Return: 0 on success or a negative error code on failure.
*/ */
ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
{ {
/* Note: Needs updating for non-default PPS or algorithm */ /* Note: Needs updating for non-default PPS or algorithm */
u8 tx[2] = { enable << 0, 0 }; u8 tx[2] = { enable << 0, 0 };
@ -585,8 +585,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
* *
* Return: 0 on success or a negative error code on failure. * Return: 0 on success or a negative error code on failure.
*/ */
ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
const struct drm_dsc_picture_parameter_set *pps) const struct drm_dsc_picture_parameter_set *pps)
{ {
struct mipi_dsi_msg msg = { struct mipi_dsi_msg msg = {
.channel = dsi->channel, .channel = dsi->channel,

View File

@ -21,6 +21,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
size = round_up(size, PAGE_SIZE); size = round_up(size, PAGE_SIZE);
if (size == 0)
return ERR_PTR(-EINVAL);
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
if (!mtk_gem_obj) if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
FREQ_1000_1001(params[i].pixel_freq)); FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq, i, params[i].phy_freq,
FREQ_1000_1001(params[i].phy_freq/10)*10); FREQ_1000_1001(params[i].phy_freq/1000)*1000);
/* Match strict frequency */ /* Match strict frequency */
if (phy_freq == params[i].phy_freq && if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq) vclk_freq == params[i].vclk_freq)
return MODE_OK; return MODE_OK;
/* Match 1000/1001 variant */ /* Match 1000/1001 variant */
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) && if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK; return MODE_OK;
} }
@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
for (freq = 0 ; params[freq].pixel_freq ; ++freq) { for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq || if ((phy_freq == params[freq].phy_freq ||
phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) && phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
(vclk_freq == params[freq].vclk_freq || (vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq) if (vclk_freq != params[freq].vclk_freq)

View File

@ -448,9 +448,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc); _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return;
ctl = phys_enc->hw_ctl; ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx); ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask); ctl->ops.update_pending_flush(ctl, flush_mask);

View File

@ -2235,6 +2235,9 @@ static const struct panel_desc innolux_g121x1_l03 = {
.unprepare = 200, .unprepare = 200,
.disable = 400, .disable = 400,
}, },
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
}; };
/* /*

View File

@ -1253,6 +1253,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
index = 1; index = 1;
addr = of_get_address(dev->of_node, index, NULL, NULL); addr = of_get_address(dev->of_node, index, NULL, NULL);
if (!addr)
return -EINVAL;
vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset; vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -164,6 +164,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* request and enable interrupt */ /* request and enable interrupt */
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0) {
dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
return ret;
}
if (!pdev->msi_enabled && !pdev->msix_enabled) if (!pdev->msi_enabled && !pdev->msix_enabled)
irq_flag = IRQF_SHARED; irq_flag = IRQF_SHARED;

View File

@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
if (np) { if (np) {
data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io"); data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision"); data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
} else { } else {
if (client->dev.platform_data) if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data; data->setup = *(struct shtc1_platform_data *)dev->platform_data;

View File

@ -289,6 +289,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
.driver_data = (kernel_ulong_t)&intel_th_2x, .driver_data = (kernel_ulong_t)&intel_th_2x,
}, },
{
/* Meteor Lake-S CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ {
/* Raptor Lake-S */ /* Raptor Lake-S */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),

View File

@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return -ENOMEM; return -ENOMEM;
stm->major = register_chrdev(0, stm_data->name, &stm_fops); stm->major = register_chrdev(0, stm_data->name, &stm_fops);
if (stm->major < 0) if (stm->major < 0) {
goto err_free; err = stm->major;
vfree(stm);
return err;
}
device_initialize(&stm->dev); device_initialize(&stm->dev);
stm->dev.devt = MKDEV(stm->major, 0); stm->dev.devt = MKDEV(stm->major, 0);
@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
err_device: err_device:
unregister_chrdev(stm->major, stm_data->name); unregister_chrdev(stm->major, stm_data->name);
/* matches device_initialize() above */ /* calls stm_device_release() */
put_device(&stm->dev); put_device(&stm->dev);
err_free:
vfree(stm);
return err; return err;
} }

View File

@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2,
} }
} }
static int dps310_calculate_temp(struct dps310_data *data) static int dps310_calculate_temp(struct dps310_data *data, int *val)
{ {
s64 c0; s64 c0;
s64 t; s64 t;
@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data)
t = c0 + ((s64)data->temp_raw * (s64)data->c1); t = c0 + ((s64)data->temp_raw * (s64)data->c1);
/* Convert to milliCelsius and scale the temperature */ /* Convert to milliCelsius and scale the temperature */
return (int)div_s64(t * 1000LL, kt); *val = (int)div_s64(t * 1000LL, kt);
return 0;
} }
static int dps310_read_temp(struct dps310_data *data, int *val, int *val2, static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
if (rc) if (rc)
return rc; return rc;
rc = dps310_calculate_temp(data); rc = dps310_calculate_temp(data, val);
if (rc < 0) if (rc)
return rc; return rc;
*val = rc;
return IIO_VAL_INT; return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO: case IIO_CHAN_INFO_OVERSAMPLING_RATIO:

View File

@ -159,76 +159,96 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{ {
struct device *dev = hr_dev->dev; struct hns_roce_buf_list *trunks;
u32 size = buf->size; u32 i;
int i;
if (size == 0) if (!buf)
return; return;
buf->size = 0; trunks = buf->trunk_list;
if (trunks) {
buf->trunk_list = NULL;
for (i = 0; i < buf->ntrunks; i++)
dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
trunks[i].buf, trunks[i].map);
if (hns_roce_buf_is_direct(buf)) { kfree(trunks);
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
for (i = 0; i < buf->npages; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
buf->page_list = NULL;
} }
kfree(buf);
} }
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, /*
struct hns_roce_buf *buf, u32 page_shift) * Allocate the dma buffer for storing ROCEE table entries
*
* @size: required size
* @page_shift: the unit size in a continuous dma address range
* @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
*/
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
u32 page_shift, u32 flags)
{ {
struct hns_roce_buf_list *buf_list; u32 trunk_size, page_size, alloced_size;
struct device *dev = hr_dev->dev; struct hns_roce_buf_list *trunks;
u32 page_size; struct hns_roce_buf *buf;
int i; gfp_t gfp_flags;
u32 ntrunk, i;
/* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */ /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift); if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
return ERR_PTR(-EINVAL);
gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
buf = kzalloc(sizeof(*buf), gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->page_shift = page_shift;
page_size = 1 << buf->page_shift; page_size = 1 << buf->page_shift;
buf->npages = DIV_ROUND_UP(size, page_size);
/* required size is not bigger than one trunk size */ /* Calc the trunk size and num by required size and page_shift */
if (size <= max_direct) { if (flags & HNS_ROCE_BUF_DIRECT) {
buf->page_list = NULL; buf->trunk_shift = ilog2(ALIGN(size, PAGE_SIZE));
buf->direct.buf = dma_alloc_coherent(dev, size, ntrunk = 1;
&buf->direct.map,
GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
} else { } else {
buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL); buf->trunk_shift = ilog2(ALIGN(page_size, PAGE_SIZE));
if (!buf_list) ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
return -ENOMEM;
for (i = 0; i < buf->npages; i++) {
buf_list[i].buf = dma_alloc_coherent(dev, page_size,
&buf_list[i].map,
GFP_KERNEL);
if (!buf_list[i].buf)
break;
}
if (i != buf->npages && i > 0) {
while (i-- > 0)
dma_free_coherent(dev, page_size,
buf_list[i].buf,
buf_list[i].map);
kfree(buf_list);
return -ENOMEM;
}
buf->page_list = buf_list;
} }
buf->size = size;
return 0; trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
if (!trunks) {
kfree(buf);
return ERR_PTR(-ENOMEM);
}
trunk_size = 1 << buf->trunk_shift;
alloced_size = 0;
for (i = 0; i < ntrunk; i++) {
trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
&trunks[i].map, gfp_flags);
if (!trunks[i].buf)
break;
alloced_size += trunk_size;
}
buf->ntrunks = i;
/* In nofail mode, it's only failed when the alloced size is 0 */
if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
for (i = 0; i < buf->ntrunks; i++)
dma_free_coherent(hr_dev->dev, trunk_size,
trunks[i].buf, trunks[i].map);
kfree(trunks);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
buf->npages = DIV_ROUND_UP(alloced_size, page_size);
buf->trunk_list = trunks;
return buf;
} }
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,

View File

@ -60,7 +60,7 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier, u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, u8 op_modifier, u16 op,
unsigned long timeout) unsigned int timeout)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
@ -78,7 +78,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier, u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, unsigned long timeout) u8 op_modifier, u16 op, unsigned int timeout)
{ {
int ret; int ret;
@ -108,7 +108,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier, u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, u8 op_modifier, u16 op,
unsigned long timeout) unsigned int timeout)
{ {
struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct hns_roce_cmd_context *context; struct hns_roce_cmd_context *context;
@ -159,7 +159,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier, u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, unsigned long timeout) u8 op_modifier, u16 op, unsigned int timeout)
{ {
int ret; int ret;
@ -173,7 +173,7 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout) unsigned int timeout)
{ {
int ret; int ret;

View File

@ -141,7 +141,7 @@ enum {
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout); unsigned int timeout);
struct hns_roce_cmd_mailbox * struct hns_roce_cmd_mailbox *
hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);

View File

@ -38,19 +38,19 @@
#define roce_raw_write(value, addr) \ #define roce_raw_write(value, addr) \
__raw_writel((__force u32)cpu_to_le32(value), (addr)) __raw_writel((__force u32)cpu_to_le32(value), (addr))
#define roce_get_field(origin, mask, shift) \ #define roce_get_field(origin, mask, shift) \
(((le32_to_cpu(origin)) & (mask)) >> (shift)) ((le32_to_cpu(origin) & (mask)) >> (u32)(shift))
#define roce_get_bit(origin, shift) \ #define roce_get_bit(origin, shift) \
roce_get_field((origin), (1ul << (shift)), (shift)) roce_get_field((origin), (1ul << (shift)), (shift))
#define roce_set_field(origin, mask, shift, val) \ #define roce_set_field(origin, mask, shift, val) \
do { \ do { \
(origin) &= ~cpu_to_le32(mask); \ (origin) &= ~cpu_to_le32(mask); \
(origin) |= cpu_to_le32(((u32)(val) << (shift)) & (mask)); \ (origin) |= cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
} while (0) } while (0)
#define roce_set_bit(origin, shift, val) \ #define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val)) roce_set_field((origin), (1ul << (shift)), (shift), (val))
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 #define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3

View File

@ -95,8 +95,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
struct hns_roce_db *db, int order) struct hns_roce_db *db, int order)
{ {
int o; unsigned long o;
int i; unsigned long i;
for (o = order; o <= 1; ++o) { for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o); i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
@ -154,8 +154,8 @@ int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
{ {
int o; unsigned long o;
int i; unsigned long i;
mutex_lock(&hr_dev->pgdir_mutex); mutex_lock(&hr_dev->pgdir_mutex);

View File

@ -115,11 +115,15 @@
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230 #define SRQ_DB_REG 0x230
#define HNS_ROCE_QP_BANK_NUM 8
/* The chip implementation of the consumer index is calculated /* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth * according to twice the actual EQ depth
*/ */
#define EQ_DEPTH_COEFF 2 #define EQ_DEPTH_COEFF 2
#define CQ_BANKID_MASK GENMASK(1, 0)
enum { enum {
SERV_TYPE_RC, SERV_TYPE_RC,
SERV_TYPE_UC, SERV_TYPE_UC,
@ -263,9 +267,6 @@ enum {
#define HNS_HW_PAGE_SHIFT 12 #define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
/* The minimum page count for hardware access page directly. */
#define HNS_HW_DIRECT_PAGE_COUNT 2
struct hns_roce_uar { struct hns_roce_uar {
u64 pfn; u64 pfn;
unsigned long index; unsigned long index;
@ -316,7 +317,7 @@ struct hns_roce_hem_table {
}; };
struct hns_roce_buf_region { struct hns_roce_buf_region {
int offset; /* page offset */ u32 offset; /* page offset */
u32 count; /* page count */ u32 count; /* page count */
int hopnum; /* addressing hop num */ int hopnum; /* addressing hop num */
}; };
@ -336,10 +337,10 @@ struct hns_roce_buf_attr {
size_t size; /* region size */ size_t size; /* region size */
int hopnum; /* multi-hop addressing hop num */ int hopnum; /* multi-hop addressing hop num */
} region[HNS_ROCE_MAX_BT_REGION]; } region[HNS_ROCE_MAX_BT_REGION];
int region_count; /* valid region count */ unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */ unsigned int page_shift; /* buffer page shift */
bool fixed_page; /* decide page shift is fixed-size or maximum size */ bool fixed_page; /* decide page shift is fixed-size or maximum size */
int user_access; /* umem access flag */ unsigned int user_access; /* umem access flag */
bool mtt_only; /* only alloc buffer-required MTT memory */ bool mtt_only; /* only alloc buffer-required MTT memory */
}; };
@ -350,7 +351,7 @@ struct hns_roce_hem_cfg {
unsigned int buf_pg_shift; /* buffer page shift */ unsigned int buf_pg_shift; /* buffer page shift */
unsigned int buf_pg_count; /* buffer page count */ unsigned int buf_pg_count; /* buffer page count */
struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION]; struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
int region_count; unsigned int region_count;
}; };
/* memory translate region */ /* memory translate region */
@ -398,7 +399,7 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */ u64 *wrid; /* Work request ID */
spinlock_t lock; spinlock_t lock;
u32 wqe_cnt; /* WQE num */ u32 wqe_cnt; /* WQE num */
int max_gs; u32 max_gs;
int offset; int offset;
int wqe_shift; /* WQE size */ int wqe_shift; /* WQE size */
u32 head; u32 head;
@ -417,11 +418,26 @@ struct hns_roce_buf_list {
dma_addr_t map; dma_addr_t map;
}; };
/*
* %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
* dma address range.
*
* %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
*
* %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
* the allocated size is smaller than the required size.
*/
enum {
HNS_ROCE_BUF_DIRECT = BIT(0),
HNS_ROCE_BUF_NOSLEEP = BIT(1),
HNS_ROCE_BUF_NOFAIL = BIT(2),
};
struct hns_roce_buf { struct hns_roce_buf {
struct hns_roce_buf_list direct; struct hns_roce_buf_list *trunk_list;
struct hns_roce_buf_list *page_list; u32 ntrunks;
u32 npages; u32 npages;
u32 size; unsigned int trunk_shift;
unsigned int page_shift; unsigned int page_shift;
}; };
@ -449,8 +465,8 @@ struct hns_roce_db {
} u; } u;
dma_addr_t dma; dma_addr_t dma;
void *virt_addr; void *virt_addr;
int index; unsigned long index;
int order; unsigned long order;
}; };
struct hns_roce_cq { struct hns_roce_cq {
@ -498,8 +514,8 @@ struct hns_roce_srq {
u64 *wrid; u64 *wrid;
struct hns_roce_idx_que idx_que; struct hns_roce_idx_que idx_que;
spinlock_t lock; spinlock_t lock;
int head; u16 head;
int tail; u16 tail;
struct mutex mutex; struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
}; };
@ -508,13 +524,22 @@ struct hns_roce_uar_table {
struct hns_roce_bitmap bitmap; struct hns_roce_bitmap bitmap;
}; };
struct hns_roce_bank {
struct ida ida;
u32 inuse; /* Number of IDs allocated */
u32 min; /* Lowest ID to allocate. */
u32 max; /* Highest ID to allocate. */
u32 next; /* Next ID to allocate. */
};
struct hns_roce_qp_table { struct hns_roce_qp_table {
struct hns_roce_bitmap bitmap;
struct hns_roce_hem_table qp_table; struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table; struct hns_roce_hem_table trrl_table;
struct hns_roce_hem_table sccc_table; struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex; struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
struct mutex bank_mutex;
}; };
struct hns_roce_cq_table { struct hns_roce_cq_table {
@ -728,11 +753,11 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */ int type_flag; /* Aeq:1 ceq:0 */
int eqn; int eqn;
u32 entries; u32 entries;
int log_entries; u32 log_entries;
int eqe_size; int eqe_size;
int irq; int irq;
int log_page_size; int log_page_size;
int cons_index; u32 cons_index;
struct hns_roce_buf_list *buf_list; struct hns_roce_buf_list *buf_list;
int over_ignore; int over_ignore;
int coalesce; int coalesce;
@ -740,7 +765,7 @@ struct hns_roce_eq {
int hop_num; int hop_num;
struct hns_roce_mtr mtr; struct hns_roce_mtr mtr;
u16 eq_max_cnt; u16 eq_max_cnt;
int eq_period; u32 eq_period;
int shift; int shift;
int event_type; int event_type;
int sub_type; int sub_type;
@ -763,8 +788,8 @@ struct hns_roce_caps {
u32 max_sq_inline; u32 max_sq_inline;
u32 max_rq_sg; u32 max_rq_sg;
u32 max_extend_sg; u32 max_extend_sg;
int num_qps; u32 num_qps;
int reserved_qps; u32 reserved_qps;
int num_qpc_timer; int num_qpc_timer;
int num_cqc_timer; int num_cqc_timer;
int num_srqs; int num_srqs;
@ -776,7 +801,7 @@ struct hns_roce_caps {
u32 max_srq_desc_sz; u32 max_srq_desc_sz;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int num_cqs; u32 num_cqs;
u32 max_cqes; u32 max_cqes;
u32 min_cqes; u32 min_cqes;
u32 min_wqes; u32 min_wqes;
@ -785,7 +810,7 @@ struct hns_roce_caps {
int num_aeq_vectors; int num_aeq_vectors;
int num_comp_vectors; int num_comp_vectors;
int num_other_vectors; int num_other_vectors;
int num_mtpts; u32 num_mtpts;
u32 num_mtt_segs; u32 num_mtt_segs;
u32 num_cqe_segs; u32 num_cqe_segs;
u32 num_srqwqe_segs; u32 num_srqwqe_segs;
@ -896,7 +921,7 @@ struct hns_roce_hw {
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event); u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev); int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr); const union ib_gid *gid, const struct ib_gid_attr *attr);
@ -1067,29 +1092,19 @@ static inline struct hns_roce_qp
return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1)); return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
} }
static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf) static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
unsigned int offset)
{ {
if (buf->page_list) return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
return false; (offset & ((1 << buf->trunk_shift) - 1));
return true;
} }
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
{ {
if (hns_roce_buf_is_direct(buf)) unsigned int offset = idx << buf->page_shift;
return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
return (char *)(buf->page_list[offset >> buf->page_shift].buf) + return buf->trunk_list[offset >> buf->trunk_shift].map +
(offset & ((1 << buf->page_shift) - 1)); (offset & ((1 << buf->trunk_shift) - 1));
}
static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
{
if (hns_roce_buf_is_direct(buf))
return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
else
return buf->page_list[idx].map;
} }
#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT) #define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
@ -1161,7 +1176,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr); struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, int page_cnt); dma_addr_t *pages, unsigned int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
@ -1221,8 +1236,8 @@ int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw); int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf); void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf, u32 page_shift); u32 page_shift, u32 flags);
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf); int buf_cnt, int start, struct hns_roce_buf *buf);
@ -1244,10 +1259,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n); void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
struct ib_cq *ib_cq); struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
@ -1277,7 +1292,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev);

View File

@ -60,16 +60,16 @@ enum {
(sizeof(struct scatterlist) + sizeof(void *))) (sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \ #define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2) ((type) < HEM_TYPE_MTT && (hop_num) == 2)
#define check_whether_bt_num_2(type, hop_num) \ #define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2)) ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
#define check_whether_bt_num_1(type, hop_num) \ #define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
struct hns_roce_hem_chunk { struct hns_roce_hem_chunk {
struct list_head list; struct list_head list;

View File

@ -288,7 +288,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
dev_err(dev, "inline len(1-%d)=%d, illegal", dev_err(dev, "inline len(1-%d)=%d, illegal",
ctrl->msg_length, le32_to_cpu(ctrl->msg_length),
hr_dev->caps.max_sq_inline); hr_dev->caps.max_sq_inline);
goto out; goto out;
} }
@ -1715,7 +1715,7 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
} }
static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout) unsigned int timeout)
{ {
u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
unsigned long end; unsigned long end;
@ -3674,10 +3674,10 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return 0; return 0;
} }
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
{ {
roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
(req_not << eq->log_entries), eq->doorbell); (req_not << eq->log_entries), eq->doorbell);
} }
static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,

View File

@ -650,7 +650,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
unsigned int sge_idx; unsigned int sge_idx;
unsigned int wqe_idx; unsigned int wqe_idx;
void *wqe = NULL; void *wqe = NULL;
int nreq; u32 nreq;
int ret; int ret;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
@ -828,7 +828,7 @@ static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
} }
static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n) static void *get_idx_buf(struct hns_roce_idx_que *idx_que, unsigned int n)
{ {
return hns_roce_buf_offset(idx_que->mtr.kmem, return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift); n << idx_que->entry_shift);
@ -869,12 +869,12 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db; struct hns_roce_v2_db srq_db;
unsigned long flags; unsigned long flags;
unsigned int ind;
__le32 *srq_idx; __le32 *srq_idx;
int ret = 0; int ret = 0;
int wqe_idx; int wqe_idx;
void *wqe; void *wqe;
int nreq; int nreq;
int ind;
int i; int i;
spin_lock_irqsave(&srq->lock, flags); spin_lock_irqsave(&srq->lock, flags);
@ -1128,7 +1128,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma)); upper_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
} else { } else {
@ -1136,7 +1136,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
upper_32_bits(dma)); upper_32_bits(dma));
roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
} }
@ -1907,8 +1907,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
} }
} }
static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num, static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
int *buf_page_size, int *bt_page_size, u32 hem_type) u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
{ {
u64 obj_per_chunk; u64 obj_per_chunk;
u64 bt_chunk_size = PAGE_SIZE; u64 bt_chunk_size = PAGE_SIZE;
@ -2382,10 +2382,10 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
u32 buf_chk_sz; u32 buf_chk_sz;
dma_addr_t t; dma_addr_t t;
int func_num = 1; int func_num = 1;
int pg_num_a; u32 pg_num_a;
int pg_num_b; u32 pg_num_b;
int pg_num; u32 pg_num;
int size; u32 size;
int i; int i;
switch (type) { switch (type) {
@ -2549,7 +2549,7 @@ static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc; struct hns_roce_cmq_desc desc;
struct hns_roce_mbox_status *mb_st = struct hns_roce_mbox_status *mb_st =
(struct hns_roce_mbox_status *)desc.data; (struct hns_roce_mbox_status *)desc.data;
enum hns_roce_cmd_return_status status; int status;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
@ -2620,7 +2620,7 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
} }
static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout) unsigned int timeout)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long end; unsigned long end;
@ -2970,7 +2970,7 @@ static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
} }
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
{ {
struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
@ -3278,8 +3278,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
wc->status == IB_WC_WR_FLUSH_ERR)) wc->status == IB_WC_WR_FLUSH_ERR))
return; return;
ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, cqe_status);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
cq->cqe_size, false); cq->cqe_size, false);
/* /*
@ -3314,7 +3315,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send; int is_send;
u16 wqe_ctr; u16 wqe_ctr;
u32 opcode; u32 opcode;
int qpn; u32 qpn;
int ret; int ret;
/* Find cqe according to consumer index */ /* Find cqe according to consumer index */

View File

@ -53,7 +53,7 @@
* GID[0][0], GID[1][0],.....GID[N - 1][0], * GID[0][0], GID[1][0],.....GID[N - 1][0],
* And so on * And so on
*/ */
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index) u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{ {
return gid_index * hr_dev->caps.num_ports + port; return gid_index * hr_dev->caps.num_ports + port;
} }

View File

@ -484,18 +484,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_mtr *mtr = &mr->pbl_mtr; struct hns_roce_mtr *mtr = &mr->pbl_mtr;
int ret = 0; int ret, sg_num = 0;
mr->npages = 0; mr->npages = 0;
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
sizeof(dma_addr_t), GFP_KERNEL); sizeof(dma_addr_t), GFP_KERNEL);
if (!mr->page_list) if (!mr->page_list)
return ret; return sg_num;
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
if (ret < 1) { if (sg_num < 1) {
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
goto err_page_list; goto err_page_list;
} }
@ -506,17 +506,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
ret = 0; sg_num = 0;
} else { } else {
mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size); mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
ret = mr->npages;
} }
err_page_list: err_page_list:
kvfree(mr->page_list); kvfree(mr->page_list);
mr->page_list = NULL; mr->page_list = NULL;
return ret; return sg_num;
} }
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@ -694,15 +693,6 @@ static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
return size; return size;
} }
static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
unsigned int page_shift)
{
if (is_direct)
return ALIGN(alloc_size, 1 << page_shift);
else
return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
}
/* /*
* check the given pages in continuous address space * check the given pages in continuous address space
* Returns 0 on success, or the error page num. * Returns 0 on success, or the error page num.
@ -731,7 +721,6 @@ static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
/* release kernel buffers */ /* release kernel buffers */
if (mtr->kmem) { if (mtr->kmem) {
hns_roce_buf_free(hr_dev, mtr->kmem); hns_roce_buf_free(hr_dev, mtr->kmem);
kfree(mtr->kmem);
mtr->kmem = NULL; mtr->kmem = NULL;
} }
} }
@ -743,13 +732,12 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int best_pg_shift; unsigned int best_pg_shift;
int all_pg_count = 0; int all_pg_count = 0;
size_t direct_size;
size_t total_size; size_t total_size;
int ret; int ret;
total_size = mtr_bufs_size(buf_attr); total_size = mtr_bufs_size(buf_attr);
if (total_size < 1) { if (total_size < 1) {
ibdev_err(ibdev, "Failed to check mtr size\n"); ibdev_err(ibdev, "failed to check mtr size\n.");
return -EINVAL; return -EINVAL;
} }
@ -761,7 +749,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = ib_umem_get(ibdev, user_addr, total_size, mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access); buf_attr->user_access);
if (IS_ERR_OR_NULL(mtr->umem)) { if (IS_ERR_OR_NULL(mtr->umem)) {
ibdev_err(ibdev, "Failed to get umem, ret %ld\n", ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
PTR_ERR(mtr->umem)); PTR_ERR(mtr->umem));
return -ENOMEM; return -ENOMEM;
} }
@ -779,19 +767,16 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
ret = 0; ret = 0;
} else { } else {
mtr->umem = NULL; mtr->umem = NULL;
mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL); mtr->kmem =
if (!mtr->kmem) { hns_roce_buf_alloc(hr_dev, total_size,
ibdev_err(ibdev, "Failed to alloc kmem\n"); buf_attr->page_shift,
return -ENOMEM; is_direct ? HNS_ROCE_BUF_DIRECT : 0);
} if (IS_ERR(mtr->kmem)) {
direct_size = mtr_kmem_direct_size(is_direct, total_size, ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
buf_attr->page_shift); PTR_ERR(mtr->kmem));
ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size, return PTR_ERR(mtr->kmem);
mtr->kmem, buf_attr->page_shift);
if (ret) {
ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
goto err_alloc_mem;
} }
best_pg_shift = buf_attr->page_shift; best_pg_shift = buf_attr->page_shift;
all_pg_count = mtr->kmem->npages; all_pg_count = mtr->kmem->npages;
} }
@ -799,7 +784,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
/* must bigger than minimum hardware page shift */ /* must bigger than minimum hardware page shift */
if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) { if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
ret = -EINVAL; ret = -EINVAL;
ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n", ibdev_err(ibdev,
"failed to check mtr, page shift = %u count = %d.\n",
best_pg_shift, all_pg_count); best_pg_shift, all_pg_count);
goto err_alloc_mem; goto err_alloc_mem;
} }
@ -840,12 +826,12 @@ static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
} }
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, int page_cnt) dma_addr_t *pages, unsigned int page_cnt)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r; struct hns_roce_buf_region *r;
unsigned int i;
int err; int err;
int i;
/* /*
* Only use the first page address as root ba when hopnum is 0, this * Only use the first page address as root ba when hopnum is 0, this
@ -882,13 +868,12 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{ {
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
int start_index; int start_index;
int mtt_count;
int total = 0; int total = 0;
__le64 *mtts; __le64 *mtts;
int npage; u32 npage;
u64 addr; u64 addr;
int left;
if (!mtt_buf || mtt_max < 1) if (!mtt_buf || mtt_max < 1)
goto done; goto done;

View File

@ -154,9 +154,66 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
} }
} }
static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) static u8 get_affinity_cq_bank(u8 qp_bank)
{ {
return (qp_bank >> 1) & CQ_BANKID_MASK;
}
static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
struct hns_roce_bank *bank)
{
#define INVALID_LOAD_QPNUM 0xFFFFFFFF
struct ib_cq *scq = init_attr->send_cq;
u32 least_load = INVALID_LOAD_QPNUM;
unsigned long cqn = 0;
u8 bankid = 0;
u32 bankcnt;
u8 i;
if (scq)
cqn = to_hr_cq(scq)->cqn;
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
continue;
bankcnt = bank[i].inuse;
if (bankcnt < least_load) {
least_load = bankcnt;
bankid = i;
}
}
return bankid;
}
static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
unsigned long *qpn)
{
int id;
id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
if (id < 0) {
id = ida_alloc_range(&bank->ida, bank->min, bank->max,
GFP_KERNEL);
if (id < 0)
return id;
}
/* the QPN should keep increasing until the max value is reached. */
bank->next = (id + 1) > bank->max ? bank->min : id + 1;
/* the lower 3 bits is bankid */
*qpn = (id << 3) | bankid;
return 0;
}
static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned long num = 0; unsigned long num = 0;
u8 bankid;
int ret; int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
@ -169,13 +226,21 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1; hr_qp->doorbell_qpn = 1;
} else { } else {
ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, mutex_lock(&qp_table->bank_mutex);
1, 1, &num); bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
&num);
if (ret) { if (ret) {
ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n"); ibdev_err(&hr_dev->ib_dev,
return -ENOMEM; "failed to alloc QPN, ret = %d\n", ret);
mutex_unlock(&qp_table->bank_mutex);
return ret;
} }
qp_table->bank[bankid].inuse++;
mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num; hr_qp->doorbell_qpn = (u32)num;
} }
@ -340,9 +405,15 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
} }
static inline u8 get_qp_bankid(unsigned long qpn)
{
/* The lower 3 bits of QPN are used to hash to different banks */
return (u8)(qpn & GENMASK(2, 0));
}
static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; u8 bankid;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
return; return;
@ -350,7 +421,13 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->qpn < hr_dev->caps.reserved_qps) if (hr_qp->qpn < hr_dev->caps.reserved_qps)
return; return;
hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); bankid = get_qp_bankid(hr_qp->qpn);
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
mutex_unlock(&hr_dev->qp_table.bank_mutex);
} }
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@ -944,7 +1021,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_db; goto err_db;
} }
ret = alloc_qpn(hr_dev, hr_qp); ret = alloc_qpn(hr_dev, hr_qp, init_attr);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
goto err_buf; goto err_buf;
@ -1257,22 +1334,22 @@ static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
} }
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n) void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
{ {
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
} }
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n) void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
{ {
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
} }
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n) void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
{ {
return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
} }
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
struct ib_cq *ib_cq) struct ib_cq *ib_cq)
{ {
struct hns_roce_cq *hr_cq; struct hns_roce_cq *hr_cq;
@ -1293,22 +1370,25 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0; unsigned int reserved_from_bot;
int reserved_from_bot; unsigned int i;
int ret;
mutex_init(&qp_table->scc_mutex); mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa); xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps; reserved_from_bot = hr_dev->caps.reserved_qps;
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, for (i = 0; i < reserved_from_bot; i++) {
hr_dev->caps.num_qps - 1, reserved_from_bot, hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
reserved_from_top); hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
if (ret) { }
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
ret); for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
return ret; ida_init(&hr_dev->qp_table.bank[i].ida);
hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
HNS_ROCE_QP_BANK_NUM - 1;
hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
} }
return 0; return 0;
@ -1316,5 +1396,8 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
{ {
hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); int i;
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
} }

View File

@ -185,8 +185,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev); ppriv = ipoib_priv(pdev);
snprintf(intf_name, sizeof(intf_name), "%s.%04x", /* If you increase IFNAMSIZ, update snprintf below
ppriv->dev->name, pkey); * to allow longer names.
*/
BUILD_BUG_ON(IFNAMSIZ != 16);
snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
pkey);
ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (IS_ERR(ndev)) { if (IS_ERR(ndev)) {

Some files were not shown because too many files have changed in this diff Show More