Merge 6.1.34 into android14-6.1-lts

Changes in 6.1.34
	scsi: megaraid_sas: Add flexible array member for SGLs
	net: sfp: fix state loss when updating state_hw_mask
	spi: mt65xx: make sure operations completed before unloading
	platform/surface: aggregator: Allow completion work-items to be executed in parallel
	platform/surface: aggregator_tabletsw: Add support for book mode in KIP subsystem
	spi: qup: Request DMA before enabling clocks
	afs: Fix setting of mtime when creating a file/dir/symlink
	wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll
	bpf, sockmap: Avoid potential NULL dereference in sk_psock_verdict_data_ready()
	neighbour: fix unaligned access to pneigh_entry
	net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods
	net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294
	bpf: Fix UAF in task local storage
	bpf: Fix elem_size not being set for inner maps
	net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down
	net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT
	net: enetc: correct the statistics of rx bytes
	net: enetc: correct rx_bytes statistics of XDP
	net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values
	drm/i915: Explain the magic numbers for AUX SYNC/precharge length
	drm/i915: Use 18 fast wake AUX sync len
	Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER
	Bluetooth: Fix l2cap_disconnect_req deadlock
	Bluetooth: ISO: don't try to remove CIG if there are bound CIS left
	Bluetooth: L2CAP: Add missing checks for invalid DCID
	wifi: mac80211: use correct iftype HE cap
	wifi: cfg80211: reject bad AP MLD address
	wifi: mac80211: mlme: fix non-inheritence element
	wifi: mac80211: don't translate beacon/presp addrs
	qed/qede: Fix scheduling while atomic
	wifi: cfg80211: fix locking in sched scan stop work
	selftests/bpf: Verify optval=NULL case
	selftests/bpf: Fix sockopt_sk selftest
	netfilter: nft_bitwise: fix register tracking
	netfilter: conntrack: fix NULL pointer dereference in nf_confirm_cthelper
	netfilter: ipset: Add schedule point in call_ad().
	netfilter: nf_tables: out-of-bound check in chain blob
	ipv6: rpl: Fix Route of Death.
	tcp: gso: really support BIG TCP
	rfs: annotate lockless accesses to sk->sk_rxhash
	rfs: annotate lockless accesses to RFS sock flow table
	net: sched: add rcu annotations around qdisc->qdisc_sleeping
	drm/i915/selftests: Stop using kthread_stop()
	drm/i915/selftests: Add some missing error propagation
	net: sched: move rtm_tca_policy declaration to include file
	net: sched: act_police: fix sparse errors in tcf_police_dump()
	net: sched: fix possible refcount leak in tc_chain_tmplt_add()
	bpf: Add extra path pointer check to d_path helper
	drm/amdgpu: fix Null pointer dereference error in amdgpu_device_recover_vram
	lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
	net: bcmgenet: Fix EEE implementation
	bnxt_en: Don't issue AP reset during ethtool's reset operation
	bnxt_en: Query default VLAN before VNIC setup on a VF
	bnxt_en: Skip firmware fatal error recovery if chip is not accessible
	bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE event
	bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks
	batman-adv: Broken sync while rescheduling delayed work
	Input: xpad - delete a Razer DeathAdder mouse VID/PID entry
	Input: psmouse - fix OOB access in Elantech protocol
	Input: fix open count when closing inhibited device
	ALSA: hda: Fix kctl->id initialization
	ALSA: ymfpci: Fix kctl->id initialization
	ALSA: gus: Fix kctl->id initialization
	ALSA: cmipci: Fix kctl->id initialization
	ALSA: hda/realtek: Add quirk for Clevo NS50AU
	ALSA: ice1712,ice1724: fix the kcontrol->id initialization
	ALSA: hda/realtek: Add a quirk for HP Slim Desktop S01
	ALSA: hda/realtek: Add Lenovo P3 Tower platform
	ALSA: hda/realtek: Add quirks for Asus ROG 2024 laptops using CS35L41
	drm/i915/gt: Use the correct error value when kernel_context() fails
	drm/amd/pm: conditionally disable pcie lane switching for some sienna_cichlid SKUs
	drm/amdgpu: fix xclk freq on CHIP_STONEY
	drm/amdgpu: change reserved vram info print
	drm/amd/pm: Fix power context allocation in SMU13
	drm/amd/display: Reduce sdp bw after urgent to 90%
	wifi: iwlwifi: mvm: Fix -Warray-bounds bug in iwl_mvm_wait_d3_notif()
	can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in J1939 Socket
	can: j1939: change j1939_netdev_lock type to mutex
	can: j1939: avoid possible use-after-free when j1939_can_rx_register fails
	mptcp: only send RM_ADDR in nl_cmd_remove
	mptcp: add address into userspace pm list
	mptcp: update userspace pm infos
	selftests: mptcp: update userspace pm addr tests
	selftests: mptcp: update userspace pm subflow tests
	ceph: fix use-after-free bug for inodes when flushing capsnaps
	s390/dasd: Use correct lock while counting channel queue length
	Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk
	Bluetooth: fix debugfs registration
	Bluetooth: hci_qca: fix debugfs registration
	tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta'
	rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting
	rbd: get snapshot context after exclusive lock is ensured to be held
	virtio_net: use control_buf for coalesce params
	soc: qcom: icc-bwmon: fix incorrect error code passed to dev_err_probe()
	pinctrl: meson-axg: add missing GPIOA_18 gpio group
	usb: usbfs: Enforce page requirements for mmap
	usb: usbfs: Use consistent mmap functions
	mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM
	mm: page_table_check: Ensure user pages are not slab pages
	arm64: dts: qcom: sc8280xp: Flush RSC sleep & wake votes
	ARM: at91: pm: fix imbalanced reference counter for ethernet devices
	ARM: dts: at91: sama7g5ek: fix debounce delay property for shdwc
	ASoC: codecs: wsa883x: do not set can_multi_write flag
	ASoC: codecs: wsa881x: do not set can_multi_write flag
	arm64: dts: qcom: sc7180-lite: Fix SDRAM freq for misidentified sc7180-lite boards
	arm64: dts: imx8qm-mek: correct GPIOs for USDHC2 CD and WP signals
	arm64: dts: imx8-ss-dma: assign default clock rate for lpuarts
	ASoC: mediatek: mt8195-afe-pcm: Convert to platform remove callback returning void
	ASoC: mediatek: mt8195: fix use-after-free in driver remove path
	ASoC: simple-card-utils: fix PCM constraint error check
	blk-mq: fix blk_mq_hw_ctx active request accounting
	arm64: dts: imx8mn-beacon: Fix SPI CS pinmux
	i2c: mv64xxx: Fix reading invalid status value in atomic mode
	firmware: arm_ffa: Set handle field to zero in memory descriptor
	gpio: sim: fix memory corruption when adding named lines and unnamed hogs
	i2c: sprd: Delete i2c adapter in .remove's error path
	riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable
	eeprom: at24: also select REGMAP
	soundwire: stream: Add missing clear of alloc_slave_rt
	riscv: fix kprobe __user string arg print fault issue
	vduse: avoid empty string for dev name
	vhost: support PACKED when setting-getting vring_base
	vhost_vdpa: support PACKED when setting-getting vring_base
	ksmbd: fix out-of-bound read in deassemble_neg_contexts()
	ksmbd: fix out-of-bound read in parse_lease_state()
	ksmbd: check the validation of pdu_size in ksmbd_conn_handler_loop
	Revert "ext4: don't clear SB_RDONLY when remounting r/w until quota is re-enabled"
	ext4: only check dquot_initialize_needed() when debugging
	wifi: rtw89: correct PS calculation for SUPPORTS_DYNAMIC_PS
	wifi: rtw88: correct PS calculation for SUPPORTS_DYNAMIC_PS
	Revert "staging: rtl8192e: Replace macro RTL_PCI_DEVICE with PCI_DEVICE"
	Linux 6.1.34

Note, commit 898c9a0ee7 ("bpf, sockmap: Avoid potential NULL
dereference in sk_psock_verdict_data_ready()") is merged away in this
merge, due to missing dependencies, it will come back in later.

Change-Id: I8e57d0914e6114822a8941a4663525d85377ca8a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-14 19:19:21 +00:00
commit ee4c9c95ff
161 changed files with 1476 additions and 685 deletions

View File

@ -54,3 +54,22 @@ Build kernel with:
Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
table support without extra kernel parameter.
Implementation notes
====================
We specifically decided not to use VMA information in order to avoid relying on
MM states (except for limited "struct page" info). The page table check is a
separate from Linux-MM state machine that verifies that the user accessible
pages are not falsely shared.
PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
regions into the userspace via /dev/mem. At the same time, pages may change
their properties (e.g., from anonymous pages to named pages) while they are
still being mapped in the userspace, leading to "corruption" detected by the
page table check.
Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
/dev/mem. However, these pages are always considered as named pages, so they
won't break the logic used in the page table check.

View File

@ -1247,8 +1247,8 @@ ping_group_range - 2 INTEGERS
Restrict ICMP_PROTO datagram sockets to users in the group range.
The default is "1 0", meaning, that nobody (not even root) may
create ping sockets. Setting it to "100 100" would grant permissions
to the single group. "0 4294967295" would enable it for the world, "100
4294967295" would enable it for the users, but not daemons.
to the single group. "0 4294967294" would enable it for the world, "100
4294967294" would enable it for the users, but not daemons.
tcp_early_demux - BOOLEAN
Enable early demux for established TCP sockets.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 33
SUBLEVEL = 34
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -789,7 +789,7 @@ &sdmmc2 {
};
&shdwc {
atmel,shdwc-debouncer = <976>;
debounce-delay-us = <976>;
status = "okay";
input@0 {

View File

@ -334,16 +334,14 @@ static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
pdev = of_find_device_by_node(eth->np);
if (!pdev)
return false;
/* put_device(eth->dev) is called at the end of suspend. */
eth->dev = &pdev->dev;
}
/* No quirks if device isn't a wakeup source. */
if (!device_may_wakeup(eth->dev)) {
put_device(eth->dev);
if (!device_may_wakeup(eth->dev))
return false;
}
/* put_device(eth->dev) is called at the end of suspend. */
return true;
}
@ -439,7 +437,8 @@ static int at91_pm_config_quirks(bool suspend)
pr_err("AT91: PM: failed to enable %s clocks\n",
j == AT91_PM_G_ETH ? "geth" : "eth");
}
} else {
}
/*
* Release the reference to eth->dev taken in
* at91_pm_eth_quirk_is_valid().
@ -447,7 +446,6 @@ static int at91_pm_config_quirks(bool suspend)
put_device(eth->dev);
eth->dev = NULL;
}
}
return ret;
}

View File

@ -26,6 +26,8 @@ lpuart0: serial@5a060000 {
clocks = <&uart0_lpcg IMX_LPCG_CLK_4>,
<&uart0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "baud";
assigned-clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <80000000>;
power-domains = <&pd IMX_SC_R_UART_0>;
status = "disabled";
};
@ -36,6 +38,8 @@ lpuart1: serial@5a070000 {
clocks = <&uart1_lpcg IMX_LPCG_CLK_4>,
<&uart1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "baud";
assigned-clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <80000000>;
power-domains = <&pd IMX_SC_R_UART_1>;
status = "disabled";
};
@ -46,6 +50,8 @@ lpuart2: serial@5a080000 {
clocks = <&uart2_lpcg IMX_LPCG_CLK_4>,
<&uart2_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "baud";
assigned-clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <80000000>;
power-domains = <&pd IMX_SC_R_UART_2>;
status = "disabled";
};
@ -56,6 +62,8 @@ lpuart3: serial@5a090000 {
clocks = <&uart3_lpcg IMX_LPCG_CLK_4>,
<&uart3_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "baud";
assigned-clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <80000000>;
power-domains = <&pd IMX_SC_R_UART_3>;
status = "disabled";
};

View File

@ -81,7 +81,7 @@ sound {
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_espi2>;
cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
eeprom@0 {
@ -202,7 +202,7 @@ pinctrl_espi2: espi2grp {
MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41
>;
};

View File

@ -82,8 +82,8 @@ &usdhc2 {
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
vmmc-supply = <&reg_usdhc2_vmmc>;
cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>;
wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>;
cd-gpios = <&lsio_gpio5 22 GPIO_ACTIVE_LOW>;
wp-gpios = <&lsio_gpio5 21 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@ -16,3 +16,11 @@ &cpu6_opp11 {
&cpu6_opp12 {
opp-peak-kBps = <8532000 23347200>;
};
&cpu6_opp13 {
opp-peak-kBps = <8532000 23347200>;
};
&cpu6_opp14 {
opp-peak-kBps = <8532000 23347200>;
};

View File

@ -1727,6 +1727,7 @@ apps_rsc: rsc@18200000 {
qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>,
<WAKE_TCS 3>, <CONTROL_TCS 1>;
label = "apps_rsc";
power-domains = <&CLUSTER_PD>;
apps_bcm_voter: bcm-voter {
compatible = "qcom,bcm-voter";

View File

@ -25,6 +25,7 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU
select ARCH_HAS_SET_MEMORY if MMU

View File

@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
_PAGE_EXEC | _PAGE_WRITE)
#define PAGE_COPY PAGE_READ
#define PAGE_COPY_EXEC PAGE_EXEC
#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
#define PAGE_COPY_EXEC PAGE_READ_EXEC
#define PAGE_SHARED PAGE_WRITE
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC

View File

@ -286,7 +286,7 @@ static const pgprot_t protection_map[16] = {
[VM_EXEC] = PAGE_EXEC,
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READ,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,

View File

@ -685,6 +685,10 @@ static void __blk_mq_free_request(struct request *rq)
blk_crypto_free_request(rq);
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
if (rq->rq_flags & RQF_MQ_INFLIGHT)
__blk_mq_dec_active_requests(hctx);
if (rq->tag != BLK_MQ_NO_TAG)
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
if (sched_tag != BLK_MQ_NO_TAG)
@ -696,15 +700,11 @@ static void __blk_mq_free_request(struct request *rq)
void blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if ((rq->rq_flags & RQF_ELVPRIV) &&
q->elevator->type->ops.finish_request)
q->elevator->type->ops.finish_request(rq);
if (rq->rq_flags & RQF_MQ_INFLIGHT)
__blk_mq_dec_active_requests(hctx);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->disk->bdi);

View File

@ -1334,14 +1334,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
/*
* Must be called after rbd_obj_calc_img_extents().
*/
static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
if (!obj_req->num_img_extents ||
(rbd_obj_is_entire(obj_req) &&
!obj_req->img_request->snapc->num_snaps))
return false;
rbd_assert(obj_req->img_request->snapc);
return true;
if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
dout("%s %p objno %llu discard\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (!obj_req->num_img_extents) {
dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (rbd_obj_is_entire(obj_req) &&
!obj_req->img_request->snapc->num_snaps) {
dout("%s %p objno %llu entire\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
@ -1442,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
rbd_assert(obj_req->img_request->snapc);
return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
num_ops);
}
@ -1578,15 +1595,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request,
mutex_init(&img_request->state_mutex);
}
/*
* Only snap_id is captured here, for reads. For writes, snapshot
* context is captured in rbd_img_object_requests() after exclusive
* lock is ensured to be held.
*/
static void rbd_img_capture_header(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
lockdep_assert_held(&rbd_dev->header_rwsem);
if (rbd_img_is_write(img_req))
img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
else
if (!rbd_img_is_write(img_req))
img_req->snap_id = rbd_dev->spec->snap_id;
if (rbd_dev_parent_get(rbd_dev))
@ -2233,9 +2253,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
if (ret)
return ret;
if (rbd_obj_copyup_enabled(obj_req))
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
@ -2341,8 +2358,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
if (ret)
return ret;
if (rbd_obj_copyup_enabled(obj_req))
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
if (!obj_req->num_img_extents) {
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req))
@ -3287,6 +3302,7 @@ static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
case RBD_OBJ_WRITE_START:
rbd_assert(!*result);
rbd_obj_set_copyup_enabled(obj_req);
if (rbd_obj_write_is_noop(obj_req))
return true;
@ -3473,9 +3489,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
if (rbd_img_is_write(img_req)) {
rbd_assert(!img_req->snapc);
down_read(&rbd_dev->header_rwsem);
img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
}
for_each_obj_request(img_req, obj_req) {
int result = 0;
@ -3493,7 +3519,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)
static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
int ret;
again:
@ -3514,9 +3539,6 @@ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
if (*result)
return true;
rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
rbd_img_object_requests(img_req);
if (!img_req->pending.num_pending) {
*result = img_req->pending.result;
@ -3978,6 +4000,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
ret = rbd_object_map_open(rbd_dev);
if (ret)

View File

@ -78,7 +78,8 @@ enum qca_flags {
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED,
QCA_BT_OFF,
QCA_ROM_FW
QCA_ROM_FW,
QCA_DEBUGFS_CREATED,
};
enum qca_capabilities {
@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
if (!hdev->debugfs)
return;
if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
return;
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
/* read only */

View File

@ -424,6 +424,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
}
mem_region->handle = 0;
mem_region->reserved_0 = 0;
mem_region->reserved_1 = 0;
mem_region->ep_count = args->nattrs;

View File

@ -717,8 +717,10 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
if (!line_names)
return ERR_PTR(-ENOMEM);
list_for_each_entry(line, &bank->line_list, siblings)
list_for_each_entry(line, &bank->line_list, siblings) {
if (line->name && (line->offset <= max_offset))
line_names[line->offset] = line->name;
}
return line_names;
}

View File

@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
@ -691,11 +692,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
* is initialized.
*/
bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@ -712,6 +708,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}

View File

@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;

View File

@ -799,7 +799,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block;
struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
@ -811,8 +811,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
list_for_each_entry(block, &mgr->reserved_pages, link)
drm_buddy_block_print(mm, block, printer);
list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
drm_printf(printer, "%#018llx-%#018llx: %llu\n",
rsv->start, rsv->start + rsv->size, rsv->size);
mutex_unlock(&mgr->lock);
}

View File

@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
if (adev->flags & AMD_IS_APU)
if (adev->flags & AMD_IS_APU) {
switch (adev->asic_type) {
case CHIP_STONEY:
/* vbios says 48Mhz, but the actual freq is 100Mhz */
return 10000;
default:
return reference_clock;
}
}
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))

View File

@ -137,7 +137,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_sdp_bw_after_urgent = 100.0,
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented

View File

@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;
*gen_speed_override = 0xff;
*lane_width_override = 0xff;
switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
uint32_t smu_pcie_arg;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
/* lclk dpm table setup */
for (i = 0; i < MAX_PCIE_CONF; i++) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
((table_member1[i] <= pcie_gen_cap) ?
(table_member1[i] << 8) :
(pcie_gen_cap << 8)) |
((table_member2[i] <= pcie_width_cap) ?
table_member2[i] :
pcie_width_cap);
smu_pcie_arg = (i << 16 |
pcie_table->pcie_gen[i] << 8 |
pcie_table->pcie_lane[i]);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
NULL);
if (ret)
return ret;
if (table_member1[i] > pcie_gen_cap)
dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
if (table_member2[i] > pcie_width_cap)
dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
}
return 0;

View File

@ -582,11 +582,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
return 0;
}

View File

@ -119,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
static int intel_dp_aux_sync_len(void)
{
int precharge = 16; /* 10-16 */
int preamble = 16;
return precharge + preamble;
}
static int intel_dp_aux_fw_sync_len(void)
{
int precharge = 10; /* 10-16 */
int preamble = 8;
return precharge + preamble;
}
static int g4x_dp_aux_precharge_len(void)
{
int precharge_min = 10;
int preamble = 16;
/* HW wants the length of the extra precharge in 2us units */
return (intel_dp_aux_sync_len() -
precharge_min - preamble) / 2;
}
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
@ -141,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
@ -165,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;

View File

@ -179,97 +179,108 @@ static int live_nop_switch(void *arg)
}
struct parallel_switch {
struct task_struct *tsk;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_context *ce[2];
int result;
};
static int __live_parallel_switch1(void *data)
static void __live_parallel_switch1(struct kthread_work *work)
{
struct parallel_switch *arg = data;
struct parallel_switch *arg =
container_of(work, typeof(*arg), work);
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
arg->result = 0;
do {
struct i915_request *rq = NULL;
int err, n;
int n;
err = 0;
for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *prev = rq;
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
i915_request_put(prev);
return PTR_ERR(rq);
arg->result = PTR_ERR(rq);
break;
}
i915_request_get(rq);
if (prev) {
err = i915_request_await_dma_fence(rq, &prev->fence);
arg->result =
i915_request_await_dma_fence(rq,
&prev->fence);
i915_request_put(prev);
}
i915_request_add(rq);
}
if (IS_ERR_OR_NULL(rq))
break;
if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
arg->result = -ETIME;
i915_request_put(rq);
if (err)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
} while (!arg->result && !__igt_timeout(end_time, NULL));
pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
return 0;
pr_info("%s: %lu switches (sync) <%d>\n",
arg->ce[0]->engine->name, count, arg->result);
}
static int __live_parallel_switchN(void *data)
static void __live_parallel_switchN(struct kthread_work *work)
{
struct parallel_switch *arg = data;
struct parallel_switch *arg =
container_of(work, typeof(*arg), work);
struct i915_request *rq = NULL;
IGT_TIMEOUT(end_time);
unsigned long count;
int n;
count = 0;
arg->result = 0;
do {
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *prev = rq;
int err = 0;
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
i915_request_put(prev);
return PTR_ERR(rq);
arg->result = PTR_ERR(rq);
break;
}
i915_request_get(rq);
if (prev) {
err = i915_request_await_dma_fence(rq, &prev->fence);
arg->result =
i915_request_await_dma_fence(rq,
&prev->fence);
i915_request_put(prev);
}
i915_request_add(rq);
if (err) {
i915_request_put(rq);
return err;
}
}
count++;
} while (!__igt_timeout(end_time, NULL));
} while (!arg->result && !__igt_timeout(end_time, NULL));
if (!IS_ERR_OR_NULL(rq))
i915_request_put(rq);
pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
return 0;
pr_info("%s: %lu switches (many) <%d>\n",
arg->ce[0]->engine->name, count, arg->result);
}
static int live_parallel_switch(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
__live_parallel_switch1,
__live_parallel_switchN,
NULL,
@ -277,7 +288,7 @@ static int live_parallel_switch(void *arg)
struct parallel_switch *data = NULL;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
int (* const *fn)(void *arg);
void (* const *fn)(struct kthread_work *);
struct i915_gem_context *ctx;
struct intel_context *ce;
struct file *file;
@ -335,8 +346,10 @@ static int live_parallel_switch(void *arg)
continue;
ce = intel_context_create(data[m].ce[0]->engine);
if (IS_ERR(ce))
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
err = intel_context_pin(ce);
if (err) {
@ -348,9 +361,24 @@ static int live_parallel_switch(void *arg)
}
}
for (n = 0; n < count; n++) {
struct kthread_worker *worker;
if (!data[n].ce[0])
continue;
worker = kthread_create_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
goto out;
}
data[n].worker = worker;
}
for (fn = func; !err && *fn; fn++) {
struct igt_live_test t;
int n;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@ -360,34 +388,23 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
data[n].tsk = kthread_run(*fn, &data[n],
"igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(data[n].tsk)) {
err = PTR_ERR(data[n].tsk);
break;
data[n].result = 0;
kthread_init_work(&data[n].work, *fn);
kthread_queue_work(data[n].worker, &data[n].work);
}
get_task_struct(data[n].tsk);
}
yield(); /* start all threads before we kthread_stop() */
for (n = 0; n < count; n++) {
int status;
if (IS_ERR_OR_NULL(data[n].tsk))
continue;
status = kthread_stop(data[n].tsk);
if (status && !err)
err = status;
put_task_struct(data[n].tsk);
data[n].tsk = NULL;
if (data[n].ce[0]) {
kthread_flush_work(&data[n].work);
if (data[n].result && !err)
err = data[n].result;
}
}
if (igt_live_test_end(&t))
err = -EIO;
if (igt_live_test_end(&t)) {
err = err ?: -EIO;
break;
}
}
out:
@ -399,6 +416,9 @@ static int live_parallel_switch(void *arg)
intel_context_unpin(data[n].ce[m]);
intel_context_put(data[n].ce[m]);
}
if (data[n].worker)
kthread_destroy_worker(data[n].worker);
}
kfree(data);
out_file:

View File

@ -1532,8 +1532,8 @@ static int live_busywait_preempt(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
int err = -ENOMEM;
u32 *map;
int err;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
@ -1541,13 +1541,17 @@ static int live_busywait_preempt(void *arg)
*/
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
if (IS_ERR(ctx_hi))
return PTR_ERR(ctx_hi);
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
if (IS_ERR(ctx_lo)) {
err = PTR_ERR(ctx_lo);
goto err_ctx_hi;
}
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@ -3475,12 +3479,14 @@ static int random_priority(struct rnd_state *rnd)
struct preempt_smoke {
struct intel_gt *gt;
struct kthread_work work;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
int result;
};
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
@ -3540,34 +3546,31 @@ static int smoke_submit(struct preempt_smoke *smoke,
return err;
}
static int smoke_crescendo_thread(void *arg)
static void smoke_crescendo_work(struct kthread_work *work)
{
struct preempt_smoke *smoke = arg;
struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->result = smoke_submit(smoke, ctx,
count % I915_PRIORITY_MAX,
smoke->batch);
if (err)
return err;
count++;
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
} while (!smoke->result && count < smoke->ncontext &&
!__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
}
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct kthread_worker *worker[I915_NUM_ENGINES] = {};
struct preempt_smoke *arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -3578,6 +3581,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
if (!arg)
return -ENOMEM;
memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
@ -3585,31 +3590,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, arg,
"igt/smoke:%d", id);
if (IS_ERR(tsk[id])) {
err = PTR_ERR(tsk[id]);
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
}
get_task_struct(tsk[id]);
}
yield(); /* start all threads before we kthread_stop() */
kthread_init_work(&arg[id].work, smoke_crescendo_work);
kthread_queue_work(worker[id], &arg[id].work);
}
count = 0;
for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
if (IS_ERR_OR_NULL(worker[id]))
continue;
status = kthread_stop(tsk[id]);
if (status && !err)
err = status;
kthread_flush_work(&arg[id].work);
if (arg[id].result && !err)
err = arg[id].result;
count += arg[id].count;
put_task_struct(tsk[id]);
kthread_destroy_worker(worker[id]);
}
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",

View File

@ -866,10 +866,13 @@ static int igt_reset_active_engine(void *arg)
}
struct active_engine {
struct task_struct *task;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
unsigned long resets;
unsigned int flags;
bool stop;
int result;
};
#define TEST_ACTIVE BIT(0)
@ -900,10 +903,10 @@ static int active_request_put(struct i915_request *rq)
return err;
}
static int active_engine(void *data)
static void active_engine(struct kthread_work *work)
{
I915_RND_STATE(prng);
struct active_engine *arg = data;
struct active_engine *arg = container_of(work, typeof(*arg), work);
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
struct intel_context *ce[ARRAY_SIZE(rq)];
@ -913,16 +916,17 @@ static int active_engine(void *data)
for (count = 0; count < ARRAY_SIZE(ce); count++) {
ce[count] = intel_context_create(engine);
if (IS_ERR(ce[count])) {
err = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
while (--count)
intel_context_put(ce[count]);
return err;
return;
}
}
count = 0;
while (!kthread_should_stop()) {
while (!READ_ONCE(arg->stop)) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
@ -967,7 +971,7 @@ static int active_engine(void *data)
intel_context_put(ce[count]);
}
return err;
arg->result = err;
}
static int __igt_reset_engines(struct intel_gt *gt,
@ -1022,7 +1026,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
struct kthread_worker *worker;
threads[tmp].resets =
i915_reset_engine_count(global, other);
@ -1036,19 +1040,21 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other;
threads[tmp].flags = flags;
tsk = kthread_run(active_engine, &threads[tmp],
"igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
worker = kthread_create_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
pr_err("[%s] Worker create failed: %d!\n",
engine->name, err);
goto unwind;
}
threads[tmp].task = tsk;
get_task_struct(tsk);
}
threads[tmp].worker = worker;
yield(); /* start all threads before we begin */
kthread_init_work(&threads[tmp].work, active_engine);
kthread_queue_work(threads[tmp].worker,
&threads[tmp].work);
}
st_engine_heartbeat_disable_no_pm(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
@ -1197,17 +1203,20 @@ static int __igt_reset_engines(struct intel_gt *gt,
for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
if (!threads[tmp].worker)
continue;
ret = kthread_stop(threads[tmp].task);
WRITE_ONCE(threads[tmp].stop, true);
kthread_flush_work(&threads[tmp].work);
ret = READ_ONCE(threads[tmp].result);
if (ret) {
pr_err("kthread for other engine %s failed, err=%d\n",
other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp].task);
kthread_destroy_worker(threads[tmp].worker);
/* GuC based resets are not logged per engine */
if (!using_guc) {

View File

@ -299,9 +299,18 @@ __live_request_alloc(struct intel_context *ce)
return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
struct smoke_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct smoketest *t;
bool stop;
int result;
};
static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
{
struct smoketest *t = arg;
struct smoke_thread *thread = container_of(work, typeof(*thread), work);
struct smoketest *t = thread->t;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@ -320,8 +329,10 @@ static int __igt_breadcrumbs_smoketest(void *arg)
*/
requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
if (!requests) {
thread->result = -ENOMEM;
return;
}
order = i915_random_order(total, &prng);
if (!order) {
@ -329,7 +340,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
goto out_requests;
}
while (!kthread_should_stop()) {
while (!READ_ONCE(thread->stop)) {
struct i915_sw_fence *submit, *wait;
unsigned int n, count;
@ -437,7 +448,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
kfree(order);
out_requests:
kfree(requests);
return err;
thread->result = err;
}
static int mock_breadcrumbs_smoketest(void *arg)
@ -450,7 +461,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
.request_alloc = __mock_request_alloc
};
unsigned int ncpus = num_online_cpus();
struct task_struct **threads;
struct smoke_thread *threads;
unsigned int n;
int ret = 0;
@ -479,28 +490,37 @@ static int mock_breadcrumbs_smoketest(void *arg)
}
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
&t, "igt/%d", n);
if (IS_ERR(threads[n])) {
ret = PTR_ERR(threads[n]);
struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
break;
}
get_task_struct(threads[n]);
threads[n].worker = worker;
threads[n].t = &t;
threads[n].stop = false;
threads[n].result = 0;
kthread_init_work(&threads[n].work,
__igt_breadcrumbs_smoketest);
kthread_queue_work(worker, &threads[n].work);
}
yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
int err;
err = kthread_stop(threads[n]);
WRITE_ONCE(threads[n].stop, true);
kthread_flush_work(&threads[n].work);
err = READ_ONCE(threads[n].result);
if (err < 0 && !ret)
ret = err;
put_task_struct(threads[n]);
kthread_destroy_worker(threads[n].worker);
}
pr_info("Completed %lu waits for %lu fence across %d cpus\n",
atomic_long_read(&t.num_waits),
@ -1419,9 +1439,18 @@ static int live_sequential_engines(void *arg)
return err;
}
static int __live_parallel_engine1(void *arg)
struct parallel_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
int result;
};
static void __live_parallel_engine1(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
IGT_TIMEOUT(end_time);
unsigned long count;
int err = 0;
@ -1452,12 +1481,14 @@ static int __live_parallel_engine1(void *arg)
intel_engine_pm_put(engine);
pr_info("%s: %lu request + sync\n", engine->name, count);
return err;
thread->result = err;
}
static int __live_parallel_engineN(void *arg)
static void __live_parallel_engineN(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
IGT_TIMEOUT(end_time);
unsigned long count;
int err = 0;
@ -1479,7 +1510,7 @@ static int __live_parallel_engineN(void *arg)
intel_engine_pm_put(engine);
pr_info("%s: %lu requests\n", engine->name, count);
return err;
thread->result = err;
}
static bool wake_all(struct drm_i915_private *i915)
@ -1505,9 +1536,11 @@ static int wait_for_all(struct drm_i915_private *i915)
return -ETIME;
}
static int __live_parallel_spin(void *arg)
static void __live_parallel_spin(struct kthread_work *work)
{
struct intel_engine_cs *engine = arg;
struct parallel_thread *thread =
container_of(work, typeof(*thread), work);
struct intel_engine_cs *engine = thread->engine;
struct igt_spinner spin;
struct i915_request *rq;
int err = 0;
@ -1520,7 +1553,8 @@ static int __live_parallel_spin(void *arg)
if (igt_spinner_init(&spin, engine->gt)) {
wake_all(engine->i915);
return -ENOMEM;
thread->result = -ENOMEM;
return;
}
intel_engine_pm_get(engine);
@ -1553,22 +1587,22 @@ static int __live_parallel_spin(void *arg)
out_spin:
igt_spinner_fini(&spin);
return err;
thread->result = err;
}
static int live_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
__live_parallel_engine1,
__live_parallel_engineN,
__live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
struct parallel_thread *threads;
struct intel_engine_cs *engine;
int (* const *fn)(void *arg);
struct task_struct **tsk;
void (* const *fn)(struct kthread_work *);
int err = 0;
/*
@ -1576,8 +1610,8 @@ static int live_parallel_engines(void *arg)
* tests that we load up the system maximally.
*/
tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
if (!tsk)
threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
for (fn = func; !err && *fn; fn++) {
@ -1594,37 +1628,44 @@ static int live_parallel_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
"igt/parallel:%s",
struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(tsk[idx])) {
err = PTR_ERR(tsk[idx]);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
break;
}
get_task_struct(tsk[idx++]);
}
yield(); /* start all threads before we kthread_stop() */
threads[idx].worker = worker;
threads[idx].result = 0;
threads[idx].engine = engine;
kthread_init_work(&threads[idx].work, *fn);
kthread_queue_work(worker, &threads[idx].work);
idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
int status;
if (IS_ERR(tsk[idx]))
if (!threads[idx].worker)
break;
status = kthread_stop(tsk[idx]);
kthread_flush_work(&threads[idx].work);
status = READ_ONCE(threads[idx].result);
if (status && !err)
err = status;
put_task_struct(tsk[idx++]);
kthread_destroy_worker(threads[idx++].worker);
}
if (igt_live_test_end(&t))
err = -EIO;
}
kfree(tsk);
kfree(threads);
return err;
}
@ -1672,7 +1713,7 @@ static int live_breadcrumbs_smoketest(void *arg)
const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct smoke_thread *threads;
struct igt_live_test live;
intel_wakeref_t wakeref;
struct smoketest *smoke;
@ -1746,23 +1787,26 @@ static int live_breadcrumbs_smoketest(void *arg)
smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
&smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
}
get_task_struct(tsk);
threads[idx * ncpus + n] = tsk;
threads[i].worker = worker;
threads[i].t = &smoke[idx];
kthread_init_work(&threads[i].work,
__igt_breadcrumbs_smoketest);
kthread_queue_work(worker, &threads[i].work);
}
idx++;
}
yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
@ -1771,17 +1815,19 @@ static int live_breadcrumbs_smoketest(void *arg)
num_fences = 0;
for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk = threads[idx * ncpus + n];
unsigned int i = idx * ncpus + n;
int err;
if (!tsk)
if (!threads[i].worker)
continue;
err = kthread_stop(tsk);
WRITE_ONCE(threads[i].stop, true);
kthread_flush_work(&threads[i].work);
err = READ_ONCE(threads[i].result);
if (err < 0 && !ret)
ret = err;
put_task_struct(tsk);
kthread_destroy_worker(threads[i].worker);
}
num_waits += atomic_long_read(&smoke[idx].num_waits);
@ -2891,9 +2937,18 @@ static int perf_series_engines(void *arg)
return err;
}
static int p_sync0(void *arg)
struct p_thread {
struct perf_stats p;
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
int result;
};
static void p_sync0(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
@ -2902,13 +2957,16 @@ static int p_sync0(void *arg)
int err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -2958,12 +3016,13 @@ static int p_sync0(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int p_sync1(void *arg)
static void p_sync1(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct i915_request *prev = NULL;
struct intel_context *ce;
@ -2973,13 +3032,16 @@ static int p_sync1(void *arg)
int err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -3031,12 +3093,13 @@ static int p_sync1(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int p_many(void *arg)
static void p_many(struct kthread_work *work)
{
struct perf_stats *p = arg;
struct p_thread *thread = container_of(work, typeof(*thread), work);
struct perf_stats *p = &thread->p;
struct intel_engine_cs *engine = p->engine;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
@ -3045,13 +3108,16 @@ static int p_many(void *arg)
bool busy;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
if (IS_ERR(ce)) {
thread->result = PTR_ERR(ce);
return;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return err;
thread->result = err;
return;
}
if (intel_engine_supports_stats(engine)) {
@ -3092,26 +3158,23 @@ static int p_many(void *arg)
intel_context_unpin(ce);
intel_context_put(ce);
return err;
thread->result = err;
}
static int perf_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
static void (* const func[])(struct kthread_work *) = {
p_sync0,
p_sync1,
p_many,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
void (* const *fn)(struct kthread_work *);
struct intel_engine_cs *engine;
int (* const *fn)(void *arg);
struct pm_qos_request qos;
struct {
struct perf_stats p;
struct task_struct *tsk;
} *engines;
struct p_thread *engines;
int err = 0;
engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
@ -3134,36 +3197,45 @@ static int perf_parallel_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
intel_engine_pm_get(engine);
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
engines[idx].p.engine = engine;
engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
"igt:%s", engine->name);
if (IS_ERR(engines[idx].tsk)) {
err = PTR_ERR(engines[idx].tsk);
worker = kthread_create_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
intel_engine_pm_put(engine);
break;
}
get_task_struct(engines[idx++].tsk);
}
engines[idx].worker = worker;
engines[idx].result = 0;
engines[idx].p.engine = engine;
engines[idx].engine = engine;
yield(); /* start all threads before we kthread_stop() */
kthread_init_work(&engines[idx].work, *fn);
kthread_queue_work(worker, &engines[idx].work);
idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
int status;
if (IS_ERR(engines[idx].tsk))
if (!engines[idx].worker)
break;
status = kthread_stop(engines[idx].tsk);
kthread_flush_work(&engines[idx].work);
status = READ_ONCE(engines[idx].result);
if (status && !err)
err = status;
intel_engine_pm_put(engine);
put_task_struct(engines[idx++].tsk);
kthread_destroy_worker(engines[idx].worker);
idx++;
}
if (igt_live_test_end(&t))

View File

@ -520,6 +520,17 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
MV64XXX_I2C_REG_CONTROL_IFLG) {
/*
* It seems that sometime the controller updates the status
* register only after it asserts IFLG in control register.
* This may result in weird bugs when in atomic mode. A delay
* of 100 ns before reading the status register solves this
* issue. This bug does not seem to appear when using
* interrupts.
*/
if (drv_data->atomic)
ndelay(100);
status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
mv64xxx_i2c_fsm(drv_data, status);
mv64xxx_i2c_do_action(drv_data);

View File

@ -576,11 +576,13 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
ret = pm_runtime_resume_and_get(i2c_dev->dev);
ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0)
return ret;
dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret));
i2c_del_adapter(&i2c_dev->adap);
if (ret >= 0)
clk_disable_unprepare(i2c_dev->clk);
pm_runtime_put_noidle(i2c_dev->dev);

View File

@ -701,7 +701,7 @@ void input_close_device(struct input_handle *handle)
__input_release_device(handle);
if (!dev->inhibited && !--dev->users) {
if (!--dev->users && !dev->inhibited) {
if (dev->poller)
input_dev_poller_stop(dev->poller);
if (dev->close)

View File

@ -282,7 +282,6 @@ static const struct xpad_device {
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },

View File

@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
int id = ((packet[3] & 0xe0) >> 5) - 1;
int id;
int pres, traces;
if (id < 0)
id = ((packet[3] & 0xe0) >> 5) - 1;
if (id < 0 || id >= ETP_MAX_FINGERS)
return;
etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
int id, sid;
id = ((packet[0] & 0xe0) >> 5) - 1;
if (id < 0)
if (id < 0 || id >= ETP_MAX_FINGERS)
return;
sid = ((packet[3] & 0xe0) >> 5) - 1;
@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
if (sid >= 0) {
if (sid >= 0 && sid < ETP_MAX_FINGERS) {
etd->mt[sid].x += delta_x2 * weight;
etd->mt[sid].y -= delta_y2 * weight;
input_mt_slot(dev, sid);

View File

@ -6,6 +6,7 @@ config EEPROM_AT24
depends on I2C && SYSFS
select NVMEM
select NVMEM_SYSFS
select REGMAP
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs

View File

@ -1199,8 +1199,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
if (vid)
return -EOPNOTSUPP;
return lan9303_alr_add_port(chip, addr, port, false);
}
@ -1212,8 +1210,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
if (vid)
return -EOPNOTSUPP;
lan9303_alr_del_port(chip, addr, port);
return 0;

View File

@ -2389,6 +2389,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u64 ns;
if (!ptp)
goto async_event_process_exit;
spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
@ -4787,6 +4790,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
!bp->ptp_cfg)
continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
@ -8808,6 +8814,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
@ -11573,6 +11582,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
static void bnxt_fw_health_check(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
struct pci_dev *pdev = bp->pdev;
u32 val;
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
@ -11586,7 +11596,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
if (val == fw_health->last_fw_heartbeat) {
if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
fw_health->arrests++;
goto fw_reset;
}
@ -11594,7 +11604,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
if (val != fw_health->last_fw_reset_cnt) {
if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
fw_health->discoveries++;
goto fw_reset;
}
@ -12998,26 +13008,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
struct udp_tunnel_info ti;
unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
}
static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
unsigned int cmd;
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.sync_table = bnxt_udp_tunnel_sync,
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {

View File

@ -3727,7 +3727,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
}
}
if (req & BNXT_FW_RESET_AP) {
if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code >= 0x10803) {
if (!bnxt_firmware_reset_ap(dev)) {

View File

@ -929,6 +929,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
} else {
bnxt_ptp_timecounter_init(bp, true);
}
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {

View File

@ -1290,7 +1290,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@ -1310,7 +1311,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
if (enable)
if (tx_lpi_enabled)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@ -1331,6 +1332,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
priv->eee.eee_enabled = enable;
priv->eee.eee_active = enable;
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
@ -1346,6 +1348,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_enabled = p->eee_enabled;
e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
@ -1355,7 +1358,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct ethtool_eee *p = &priv->eee;
int ret = 0;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@ -1366,16 +1368,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
bcmgenet_eee_enable_set(dev, false, false);
} else {
ret = phy_init_eee(dev->phydev, false);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
}
p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
bcmgenet_eee_enable_set(dev, true);
bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
@ -4274,9 +4271,6 @@ static int bcmgenet_resume(struct device *d)
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
bcmgenet_netif_start(dev);
netif_device_attach(dev);

View File

@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
#endif /* __BCMGENET_H__ */

View File

@ -88,6 +88,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
priv->eee.eee_enabled && priv->eee.eee_active,
priv->eee.tx_lpi_enabled);
}
/* setup netdev link state when PHY link status change and

View File

@ -1209,7 +1209,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (!skb)
break;
rx_byte_cnt += skb->len;
/* When set, the outer VLAN header is extracted and reported
* in the receive buffer descriptor. So rx_byte_cnt should
* add the length of the extracted VLAN header.
*/
if (bd_status & ENETC_RXBD_FLAG_VLAN)
rx_byte_cnt += VLAN_HLEN;
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
napi_gro_receive(napi, skb);
@ -1532,6 +1538,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
&cleaned_cnt, &xdp_buff);
/* When set, the outer VLAN header is extracted and reported
* in the receive buffer descriptor. So rx_byte_cnt should
* add the length of the extracted VLAN header.
*/
if (bd_status & ENETC_RXBD_FLAG_VLAN)
rx_byte_cnt += VLAN_HLEN;
rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
switch (xdp_act) {

View File

@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
if (!cdev) {
if (!cdev || cdev->recov_in_prog) {
memset(stats, 0, sizeof(*stats));
return;
}

View File

@ -271,6 +271,10 @@ struct qede_dev {
#define QEDE_ERR_WARN 3
struct qede_dump_info dump_info;
struct delayed_work periodic_task;
unsigned long stats_coal_ticks;
u32 stats_coal_usecs;
spinlock_t stats_lock; /* lock for vport stats access */
};
enum QEDE_STATE {

View File

@ -430,6 +430,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
}
spin_lock(&edev->stats_lock);
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
@ -439,6 +441,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
buf++;
}
spin_unlock(&edev->stats_lock);
__qede_unlock(edev);
}
@ -830,6 +834,7 @@ static int qede_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
return rc;
}
@ -843,6 +848,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
int i, rc = 0;
u16 rxc, txc;
if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
if (edev->stats_coal_usecs) {
edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
schedule_delayed_work(&edev->periodic_task, 0);
DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
edev->stats_coal_ticks);
} else {
cancel_delayed_work_sync(&edev->periodic_task);
}
}
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
return -EINVAL;
@ -2253,7 +2271,8 @@ static int qede_get_per_coalesce(struct net_device *dev,
}
static const struct ethtool_ops qede_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@ -2304,7 +2323,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,

View File

@ -308,6 +308,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
spin_lock(&edev->stats_lock);
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
p_common->ttl0_discard = stats.common.ttl0_discard;
@ -405,6 +407,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@ -413,9 +417,10 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
spin_lock(&edev->stats_lock);
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@ -435,6 +440,8 @@ static void qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@ -1061,6 +1068,23 @@ static void qede_unlock(struct qede_dev *edev)
rtnl_unlock();
}
static void qede_periodic_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
periodic_task.work);
qede_fill_by_demand_stats(edev);
schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
}
static void qede_init_periodic_task(struct qede_dev *edev)
{
INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
spin_lock_init(&edev->stats_lock);
edev->stats_coal_usecs = USEC_PER_SEC;
edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
}
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
@ -1080,6 +1104,7 @@ static void qede_sp_task(struct work_struct *work)
*/
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
cancel_delayed_work_sync(&edev->periodic_task);
#ifdef CONFIG_QED_SRIOV
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
@ -1270,6 +1295,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
qede_init_periodic_task(edev);
rc = register_netdev(edev->ndev);
if (rc) {
@ -1294,6 +1320,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
/* retain user config (for example - after recovery) */
if (edev->stats_coal_usecs)
schedule_delayed_work(&edev->periodic_task, 0);
return 0;
err4:
@ -1362,6 +1393,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
cancel_delayed_work_sync(&edev->periodic_task);
edev->ops->common->set_power_state(cdev, PCI_D0);

View File

@ -2199,6 +2199,11 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
break;
}
/* Force a poll to re-read the hardware signal state after
* sfp_sm_mod_probe() changed state_hw_mask.
*/
mod_delayed_work(system_wq, &sfp->poll, 1);
err = sfp_hwmon_insert(sfp);
if (err)
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",

View File

@ -200,6 +200,8 @@ struct control_buf {
__virtio16 vid;
__virtio64 offloads;
struct virtio_net_ctrl_rss rss;
struct virtio_net_ctrl_coal_tx coal_tx;
struct virtio_net_ctrl_coal_rx coal_rx;
};
struct virtnet_info {
@ -2786,12 +2788,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs_tx, sgs_rx;
struct virtio_net_ctrl_coal_tx coal_tx;
struct virtio_net_ctrl_coal_rx coal_rx;
coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@ -2802,9 +2802,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
vi->tx_usecs = ec->tx_coalesce_usecs;
vi->tx_max_packets = ec->tx_max_coalesced_frames;
coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,

View File

@ -2724,17 +2724,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
if (wowlan_info_ver < 2) {
struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
notif = kmemdup(notif_v1,
offsetofend(struct iwl_wowlan_info_notif,
received_beacons),
GFP_ATOMIC);
notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
if (!notif)
return false;
notif->tid_tear_down = notif_v1->tid_tear_down;
notif->station_id = notif_v1->station_id;
memset_after(notif, 0, station_id);
} else {
notif = (void *)pkt->data;
}

View File

@ -921,7 +921,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
poll_list);
spin_lock_bh(&dev->sta_poll_lock);
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;

View File

@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (hw->conf.flags & IEEE80211_CONF_PS) {
rtwdev->ps_enabled = true;
} else {
rtwdev->ps_enabled = false;
rtw_leave_lps(rtwdev);
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
@ -206,6 +197,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->bcn_ctrl = bcn_ctrl;
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_recalc_lps(rtwdev, vif);
mutex_unlock(&rtwdev->mutex);
@ -236,6 +228,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
rtwvif->bcn_ctrl = 0;
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
}
@ -428,6 +421,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT)
rtw_conf_tx(rtwdev, rtwvif);
if (changed & BSS_CHANGED_PS)
rtw_recalc_lps(rtwdev, NULL);
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);

View File

@ -248,8 +248,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
*
* mac80211 should iterate vifs and determine if driver can enter
* ps by passing IEEE80211_CONF_PS to us, all we need to do is to
* rtw_recalc_lps() iterate vifs and determine if driver can enter
* ps by vif->type and vif->cfg.ps, all we need to do here is to
* get that vif and check if device is having traffic more than the
* threshold.
*/

View File

@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
__rtw_leave_lps_deep(rtwdev);
}
struct rtw_vif_recalc_lps_iter_data {
struct rtw_dev *rtwdev;
struct ieee80211_vif *found_vif;
int count;
};
static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data,
struct ieee80211_vif *vif)
{
if (data->count < 0)
return;
if (vif->type != NL80211_IFTYPE_STATION) {
data->count = -1;
return;
}
data->count++;
data->found_vif = vif;
}
static void rtw_vif_recalc_lps_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
__rtw_vif_recalc_lps(data, vif);
}
void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif)
{
struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev };
if (new_vif)
__rtw_vif_recalc_lps(&data, new_vif);
rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data);
if (data.count == 1 && data.found_vif->cfg.ps) {
rtwdev->ps_enabled = true;
} else {
rtwdev->ps_enabled = false;
rtw_leave_lps(rtwdev);
}
}

View File

@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
void rtw_leave_lps(struct rtw_dev *rtwdev);
void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev);
void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif);
#endif

View File

@ -78,15 +78,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
!(hw->conf.flags & IEEE80211_CONF_IDLE))
rtw89_leave_ips(rtwdev);
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (hw->conf.flags & IEEE80211_CONF_PS) {
rtwdev->lps_enabled = true;
} else {
rtw89_leave_lps(rtwdev);
rtwdev->lps_enabled = false;
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
&hw->conf.chandef);
@ -142,6 +133,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_core_txq_init(rtwdev, vif->txq);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
rtw89_recalc_lps(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
@ -165,6 +158,8 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
rtw89_mac_remove_vif(rtwdev, rtwvif);
rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
list_del_init(&rtwvif->list);
rtw89_recalc_lps(rtwdev);
mutex_unlock(&rtwdev->mutex);
}
@ -411,6 +406,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_P2P_PS)
rtw89_process_p2p_ps(rtwdev, vif);
if (changed & BSS_CHANGED_PS)
rtw89_recalc_lps(rtwdev);
mutex_unlock(&rtwdev->mutex);
}

View File

@ -244,3 +244,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
rtw89_p2p_disable_all_noa(rtwdev, vif);
rtw89_p2p_update_noa(rtwdev, vif);
}
void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
{
struct ieee80211_vif *vif, *found_vif = NULL;
struct rtw89_vif *rtwvif;
int count = 0;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
vif = rtwvif_to_vif(rtwvif);
if (vif->type != NL80211_IFTYPE_STATION) {
count = 0;
break;
}
count++;
found_vif = vif;
}
if (count == 1 && found_vif->cfg.ps) {
rtwdev->lps_enabled = true;
} else {
rtw89_leave_lps(rtwdev);
rtwdev->lps_enabled = false;
}
}

View File

@ -13,5 +13,6 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
#endif

View File

@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOA_15),
GPIO_GROUP(GPIOA_16),
GPIO_GROUP(GPIOA_17),
GPIO_GROUP(GPIOA_18),
GPIO_GROUP(GPIOA_19),
GPIO_GROUP(GPIOA_20),

View File

@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
cplt->dev = dev;
cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!cplt->wq)
return -ENOMEM;

View File

@ -201,6 +201,7 @@ enum ssam_kip_cover_state {
SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
SSAM_KIP_COVER_STATE_BOOK = 0x06,
};
static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
@ -221,6 +222,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 stat
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
return "folded-back";
case SSAM_KIP_COVER_STATE_BOOK:
return "book";
default:
dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
return "<unknown>";
@ -233,6 +237,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 s
case SSAM_KIP_COVER_STATE_DISCONNECTED:
case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
case SSAM_KIP_COVER_STATE_BOOK:
return true;
case SSAM_KIP_COVER_STATE_CLOSED:

View File

@ -552,10 +552,10 @@ static int __dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
spin_lock_irqsave(&block->queue_lock, flags);
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(&block->queue_lock, flags);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}

View File

@ -3323,7 +3323,7 @@ static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
/* copy the io request frame as well as 8 SGEs data for r1 command*/
memcpy(r1_cmd->io_request, cmd->io_request,
(sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =

View File

@ -526,7 +526,10 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
__le32 Control; /* 0x3C */
union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
union {
union MPI2_SGE_IO_UNION SGL; /* 0x80 */
DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
};
};
/*

View File

@ -603,12 +603,12 @@ static int bwmon_probe(struct platform_device *pdev)
bwmon->max_bw_kbps = UINT_MAX;
opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
bwmon->min_bw_kbps = 0;
opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
if (IS_ERR(opp))
return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
bwmon->dev = dev;

View File

@ -2019,8 +2019,10 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
skip_alloc_master_rt:
s_rt = sdw_slave_rt_find(slave, stream);
if (s_rt)
if (s_rt) {
alloc_slave_rt = false;
goto skip_alloc_slave_rt;
}
s_rt = sdw_slave_rt_alloc(slave, m_rt);
if (!s_rt) {

View File

@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;

View File

@ -1029,23 +1029,8 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENXIO;
}
ret = clk_prepare_enable(cclk);
if (ret) {
dev_err(dev, "cannot enable core clock\n");
return ret;
}
ret = clk_prepare_enable(iclk);
if (ret) {
clk_disable_unprepare(cclk);
dev_err(dev, "cannot enable iface clock\n");
return ret;
}
master = spi_alloc_master(dev, sizeof(struct spi_qup));
if (!master) {
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
dev_err(dev, "cannot allocate master\n");
return -ENOMEM;
}
@ -1093,6 +1078,19 @@ static int spi_qup_probe(struct platform_device *pdev)
spin_lock_init(&controller->lock);
init_completion(&controller->done);
ret = clk_prepare_enable(cclk);
if (ret) {
dev_err(dev, "cannot enable core clock\n");
goto error_dma;
}
ret = clk_prepare_enable(iclk);
if (ret) {
clk_disable_unprepare(cclk);
dev_err(dev, "cannot enable iface clock\n");
goto error_dma;
}
iomode = readl_relaxed(base + QUP_IO_M_MODES);
size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
@ -1122,7 +1120,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
goto error_dma;
goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
@ -1146,7 +1144,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
goto error_dma;
goto error_clk;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
@ -1161,11 +1159,12 @@ static int spi_qup_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
error_clk:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
error_dma:
spi_qup_release_dma(master);
error:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
spi_master_put(master);
return ret;
}

View File

@ -48,9 +48,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
{RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
{}
};

View File

@ -55,6 +55,11 @@
#define IS_HARDWARE_TYPE_8192SE(_priv) \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
#define RTL_PCI_DEVICE(vend, dev, cfg) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
.driver_data = (kernel_ulong_t)&(cfg)
#define TOTAL_CAM_ENTRY 32
#define CAM_CONTENT_COUNT 8

View File

@ -122,12 +122,14 @@ struct tee_cmd_unmap_shared_mem {
* @hi_addr: [in] bits [63:32] of the physical address of the TA binary
* @size: [in] size of TA binary in bytes
* @ta_handle: [out] return handle of the loaded TA
* @return_origin: [out] origin of return code after TEE processing
*/
struct tee_cmd_load_ta {
u32 low_addr;
u32 hi_addr;
u32 size;
u32 ta_handle;
u32 return_origin;
};
/**

View File

@ -423,7 +423,10 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
} else if (arg->ret == TEEC_SUCCESS) {
} else {
arg->ret_origin = load_cmd.return_origin;
if (arg->ret == TEEC_SUCCESS) {
ret = get_ta_refcount(load_cmd.ta_handle);
if (!ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
@ -438,6 +441,7 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
set_session_id(load_cmd.ta_handle, 0, &arg->session);
}
}
}
mutex_unlock(&ta_refcount_mutex);
pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",

View File

@ -172,3 +172,44 @@ void hcd_buffer_free(
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma)
{
if (size == 0)
return NULL;
if (hcd->localmem_pool)
return gen_pool_dma_alloc_align(hcd->localmem_pool,
size, dma, PAGE_SIZE);
/* some USB hosts just use PIO */
if (!hcd_uses_dma(hcd)) {
*dma = DMA_MAPPING_ERROR;
return (void *)__get_free_pages(mem_flags,
get_order(size));
}
return dma_alloc_coherent(hcd->self.sysdev,
size, dma, mem_flags);
}
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma)
{
if (!addr)
return;
if (hcd->localmem_pool) {
gen_pool_free(hcd->localmem_pool,
(unsigned long)addr, size);
return;
}
if (!hcd_uses_dma(hcd)) {
free_pages((unsigned long)addr, get_order(size));
return;
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}

View File

@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps)
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{
struct usb_dev_state *ps = usbm->ps;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags);
usb_free_coherent(ps->dev, usbm->size, usbm->mem,
usbm->dma_handle);
hcd_buffer_free_pages(hcd, usbm->size,
usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory));
kfree(usbm);
@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
dma_addr_t dma_handle;
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
&dma_handle);
mem = hcd_buffer_alloc_pages(hcd,
size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
/*
* In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
* normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
* whether we are in such cases, and then use remap_pfn_range (or
* dma_mmap_coherent) to map normal (or DMA) pages into the user
* space, respectively.
*/
if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {

View File

@ -1443,6 +1443,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
if (config->vq_num > 0xffff)
return false;
if (!config->name[0])
return false;
if (!device_is_allowed(config->device_id))
return false;

View File

@ -549,7 +549,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq->last_avail_idx = vq_state.packed.last_avail_idx |
(vq_state.packed.last_avail_counter << 15);
vq->last_used_idx = vq_state.packed.last_used_idx |
(vq_state.packed.last_used_counter << 15);
} else {
vq->last_avail_idx = vq_state.split.avail_index;
}
break;
}
@ -567,9 +574,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
} else {
vq_state.split.avail_index = vq->last_avail_idx;
if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
}
r = ops->set_vq_state(vdpa, idx, &vq_state);
break;
case VHOST_SET_VRING_CALL:

View File

@ -1633,16 +1633,24 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq->last_avail_idx = s.num & 0xffff;
vq->last_used_idx = (s.num >> 16) & 0xffff;
} else {
if (s.num > 0xffff) {
r = -EINVAL;
break;
}
vq->last_avail_idx = s.num;
}
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
else
s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;

View File

@ -85,13 +85,17 @@ struct vhost_virtqueue {
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
/* Last available index we saw. */
/* Last available index we saw.
* Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
/* Caches available index value from user. */
u16 avail_idx;
/* Last index we used. */
/* Last index we used.
* Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */

View File

@ -1357,6 +1357,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
op->dentry = dentry;
op->create.mode = S_IFDIR | mode;
op->create.reason = afs_edit_dir_for_mkdir;
op->mtime = current_time(dir);
op->ops = &afs_mkdir_operation;
return afs_do_sync_operation(op);
}
@ -1660,6 +1661,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
op->dentry = dentry;
op->create.mode = S_IFREG | mode;
op->create.reason = afs_edit_dir_for_create;
op->mtime = current_time(dir);
op->ops = &afs_create_operation;
return afs_do_sync_operation(op);
@ -1795,6 +1797,7 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
op->ops = &afs_symlink_operation;
op->create.reason = afs_edit_dir_for_symlink;
op->create.symlink = content;
op->mtime = current_time(dir);
return afs_do_sync_operation(op);
error:

View File

@ -1626,6 +1626,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_session *session = NULL;
bool need_put = false;
int mds;
dout("ceph_flush_snaps %p\n", inode);
@ -1670,8 +1671,13 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
ceph_put_mds_session(session);
/* we flushed them all; remove this inode from the queue */
spin_lock(&mdsc->snap_flush_lock);
if (!list_empty(&ci->i_snap_flush_item))
need_put = true;
list_del_init(&ci->i_snap_flush_item);
spin_unlock(&mdsc->snap_flush_lock);
if (need_put)
iput(inode);
}
/*

View File

@ -693,8 +693,10 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
if (list_empty(&ci->i_snap_flush_item))
if (list_empty(&ci->i_snap_flush_item)) {
ihold(inode);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
}
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}

View File

@ -6328,7 +6328,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
struct ext4_mount_options old_opts;
ext4_group_t g;
int err = 0;
int enable_rw = 0;
#ifdef CONFIG_QUOTA
int enable_quota = 0;
int i, j;
@ -6515,7 +6514,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
if (err)
goto restore_opts;
enable_rw = 1;
sb->s_flags &= ~SB_RDONLY;
if (ext4_has_feature_mmp(sb)) {
err = ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block));
@ -6562,9 +6561,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
if (enable_rw)
sb->s_flags &= ~SB_RDONLY;
/*
* Reinitialize lazy itable initialization thread based on
* current settings

View File

@ -2006,8 +2006,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
else {
u32 ref;
#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
#endif
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
@ -2070,8 +2071,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
#endif
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
block = ext4_new_meta_blocks(handle, inode, goal, 0,

View File

@ -296,6 +296,9 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
return true;
}
#define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
#define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
/**
* ksmbd_conn_handler_loop() - session thread to listen on new smb requests
* @p: connection instance
@ -352,6 +355,9 @@ int ksmbd_conn_handler_loop(void *p)
if (pdu_size > MAX_STREAM_PROT_LEN)
break;
if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE)
break;
/* 4 for rfc1002 length field */
/* 1 for implied bcc[0] */
size = pdu_size + 4 + 1;
@ -379,6 +385,12 @@ int ksmbd_conn_handler_loop(void *p)
continue;
}
if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
SMB2_PROTO_NUMBER) {
if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
break;
}
if (!default_conn_ops.process_fn) {
pr_err("No connection request callback\n");
break;

View File

@ -1415,32 +1415,18 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
*/
struct lease_ctx_info *parse_lease_state(void *open_req)
{
char *data_offset;
struct create_context *cc;
unsigned int next = 0;
char *name;
bool found = false;
struct smb2_create_req *req = (struct smb2_create_req *)open_req;
struct lease_ctx_info *lreq = kzalloc(sizeof(struct lease_ctx_info),
GFP_KERNEL);
struct lease_ctx_info *lreq;
cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4);
if (IS_ERR_OR_NULL(cc))
return NULL;
lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL);
if (!lreq)
return NULL;
data_offset = (char *)req + le32_to_cpu(req->CreateContextsOffset);
cc = (struct create_context *)data_offset;
do {
cc = (struct create_context *)((char *)cc + next);
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
if (le16_to_cpu(cc->NameLength) != 4 ||
strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
next = le32_to_cpu(cc->Next);
continue;
}
found = true;
break;
} while (next != 0);
if (found) {
if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
@ -1463,10 +1449,6 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
return lreq;
}
kfree(lreq);
return NULL;
}
/**
* smb2_find_context_vals() - find a particular context info in open request
* @open_req: buffer containing smb2 file open(create) request

View File

@ -979,13 +979,13 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
struct smb2_negotiate_req *req,
int len_of_smb)
unsigned int len_of_smb)
{
/* +4 is to account for the RFC1001 len field */
struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
int i = 0, len_of_ctxts;
int offset = le32_to_cpu(req->NegotiateContextOffset);
int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
unsigned int offset = le32_to_cpu(req->NegotiateContextOffset);
unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
__le32 status = STATUS_INVALID_PARAMETER;
ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
@ -999,7 +999,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
while (i++ < neg_ctxt_cnt) {
int clen, ctxt_len;
if (len_of_ctxts < sizeof(struct smb2_neg_context))
if (len_of_ctxts < (int)sizeof(struct smb2_neg_context))
break;
pctx = (struct smb2_neg_context *)((char *)pctx + offset);
@ -1054,9 +1054,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
}
/* offsets must be 8 byte aligned */
clen = (clen + 7) & ~0x7;
offset = clen + sizeof(struct smb2_neg_context);
len_of_ctxts -= clen + sizeof(struct smb2_neg_context);
offset = (ctxt_len + 7) & ~0x7;
len_of_ctxts -= offset;
}
return status;
}

View File

@ -618,7 +618,7 @@ struct netdev_queue {
netdevice_tracker dev_tracker;
struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
struct Qdisc __rcu *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
#endif
@ -771,8 +771,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
table->ents[index] = val;
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
}
}

View File

@ -631,6 +631,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*
* For slab pages, since slab reuses the bits in struct page to store its
* internal states, the page->mapping does not exist as such, nor do these
* flags below. So in order to avoid testing non-existent bits, please
* make sure that PageSlab(page) actually evaluates to false before calling
* the following functions (e.g., PageAnon). See mm/slab.h.
*/
#define PAGE_MAPPING_ANON 0x1
#define PAGE_MAPPING_MOVABLE 0x2

View File

@ -511,6 +511,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
void hcd_buffer_free(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma);
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma);
/* generic bus glue, needed for host controllers that don't use PCI */
extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);

View File

@ -350,6 +350,7 @@ enum {
enum {
HCI_SETUP,
HCI_CONFIG,
HCI_DEBUGFS_CREATED,
HCI_AUTO_OFF,
HCI_RFKILLED,
HCI_MGMT,

View File

@ -514,6 +514,7 @@ struct hci_dev {
struct work_struct cmd_sync_work;
struct list_head cmd_sync_work_list;
struct mutex cmd_sync_work_lock;
struct mutex unregister_lock;
struct work_struct cmd_sync_cancel_work;
struct work_struct reenable_adv_work;

View File

@ -186,7 +186,7 @@ struct pneigh_entry {
netdevice_tracker dev_tracker;
u32 flags;
u8 protocol;
u8 key[];
u32 key[];
};
/*

View File

@ -54,7 +54,7 @@ struct netns_sysctl_ipv6 {
int seg6_flowlabel;
u32 ioam6_id;
u64 ioam6_id_wide;
bool skip_notify_on_dev_down;
int skip_notify_on_dev_down;
u8 fib_notify_on_flag_change;
ANDROID_KABI_RESERVE(1);
};

View File

@ -16,11 +16,7 @@
#define PING_HTABLE_SIZE 64
#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
/*
* gid_t is either uint or ushort. We want to pass it to
* proc_dointvec_minmax(), so it must not be larger than MAX_INT
*/
#define GID_T_MAX (((gid_t)~0U) >> 1)
#define GID_T_MAX (((gid_t)~0U) - 1)
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
struct pingv6_ops {

View File

@ -128,6 +128,8 @@ static inline void qdisc_run(struct Qdisc *q)
}
}
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/

View File

@ -23,9 +23,6 @@ static inline int rpl_init(void)
static inline void rpl_exit(void) {}
#endif
/* Worst decompression memory usage ipv6 address (16) + pad 7 */
#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
unsigned char cmpre);

View File

@ -551,7 +551,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
}
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
@ -760,7 +760,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
if (rcu_access_pointer(txq->qdisc) !=
rcu_access_pointer(txq->qdisc_sleeping))
return true;
}
return false;

View File

@ -1158,8 +1158,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED)
sock_rps_record_flow_hash(sk->sk_rxhash);
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
}
#endif
}
@ -1168,15 +1172,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
if (unlikely(sk->sk_rxhash != skb->hash))
sk->sk_rxhash = skb->hash;
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in sock_rps_record_flow().
*/
if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
WRITE_ONCE(sk->sk_rxhash, skb->hash);
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
sk->sk_rxhash = 0;
/* Paired with READ_ONCE() in sock_rps_record_flow() */
WRITE_ONCE(sk->sk_rxhash, 0);
#endif
}

View File

@ -61,9 +61,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
/* Misc members not needed in bpf_map_meta_equal() check. */
inner_map_meta->ops = inner_map->ops;
if (inner_map->ops == &array_map_ops) {
struct bpf_array *inner_array_meta =
container_of(inner_map_meta, struct bpf_array, map);
struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
inner_array_meta->index_mask = inner_array->index_mask;
inner_array_meta->elem_size = inner_array->elem_size;
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
container_of(inner_map_meta, struct bpf_array, map)->index_mask =
container_of(inner_map, struct bpf_array, map)->index_mask;
}
fdput(f);

Some files were not shown because too many files have changed in this diff Show More