Merge 6.1.42 into android14-6.1-lts
Changes in 6.1.42 io_uring: treat -EAGAIN for REQ_F_NOWAIT as final for io-wq ALSA: hda/realtek - remove 3k pull low procedure ALSA: hda/realtek: Add quirk for Clevo NS70AU ALSA: hda/realtek: Enable Mute LED on HP Laptop 15s-eq2xxx maple_tree: set the node limit when creating a new root node maple_tree: fix node allocation testing on 32 bit keys: Fix linking a duplicate key to a keyring's assoc_array perf probe: Add test for regression introduced by switch to die_get_decl_file() btrfs: fix warning when putting transaction with qgroups enabled after abort fuse: revalidate: don't invalidate if interrupted fuse: Apply flags2 only when userspace set the FUSE_INIT_EXT btrfs: set_page_extent_mapped after read_folio in btrfs_cont_expand btrfs: zoned: fix memory leak after finding block group with super blocks fuse: ioctl: translate ENOSYS in outarg btrfs: fix race between balance and cancel/pause selftests: tc: set timeout to 15 minutes selftests: tc: add 'ct' action kconfig dep regmap: Drop initial version of maximum transfer length fixes of: Preserve "of-display" device name for compatibility regmap: Account for register length in SMBus I/O limits arm64/fpsimd: Ensure SME storage is allocated after SVE VL changes can: mcp251xfd: __mcp251xfd_chip_set_mode(): increase poll timeout can: bcm: Fix UAF in bcm_proc_show() can: gs_usb: gs_can_open(): improve error handling selftests: tc: add ConnTrack procfs kconfig dma-buf/dma-resv: Stop leaking on krealloc() failure drm/amdgpu/vkms: relax timer deactivation by hrtimer_try_to_cancel drm/amdgpu/pm: make gfxclock consistent for sienna cichlid drm/amdgpu/pm: make mclk consistent for smu 13.0.7 drm/client: Fix memory leak in drm_client_target_cloned drm/client: Fix memory leak in drm_client_modeset_probe drm/amd/display: only accept async flips for fast updates drm/amd/display: Disable MPC split by default on special asic drm/amd/display: check TG is non-null before checking if enabled drm/amd/display: Keep PHY active for DP displays on DCN31 ASoC: fsl_sai: Disable bit clock with transmitter ASoC: fsl_sai: Revert "ASoC: fsl_sai: Enable MCTL_MCLK_EN bit for master mode" ASoC: tegra: Fix ADX byte map ASoC: rt5640: Fix sleep in atomic context ASoC: cs42l51: fix driver to properly autoload with automatic module loading ASoC: codecs: wcd938x: fix missing clsh ctrl error handling ASoC: codecs: wcd-mbhc-v2: fix resource leaks on component remove ASoC: qdsp6: audioreach: fix topology probe deferral ASoC: tegra: Fix AMX byte map ASoC: codecs: wcd938x: fix resource leaks on component remove ASoC: codecs: wcd938x: fix missing mbhc init error handling ASoC: codecs: wcd934x: fix resource leaks on component remove ASoC: codecs: wcd938x: fix codec initialisation race ASoC: codecs: wcd938x: fix soundwire initialisation race ext4: correct inline offset when handling xattrs in inode body drm/radeon: Fix integer overflow in radeon_cs_parser_init ALSA: emu10k1: roll up loops in DSP setup code for Audigy quota: Properly disable quotas when add_dquot_ref() fails quota: fix warning in dqgrab() HID: add quirk for 03f0:464a HP Elite Presenter Mouse ovl: check type and offset of struct vfsmount in ovl_entry udf: Fix uninitialized array access for some pathnames fs: jfs: Fix UBSAN: array-index-out-of-bounds in dbAllocDmapLev MIPS: dec: prom: Address -Warray-bounds warning FS: JFS: Fix null-ptr-deref Read in txBegin FS: JFS: Check for read-only mounted filesystem in txBegin ACPI: video: Add backlight=native DMI quirk for Dell Studio 1569 rcu-tasks: Avoid pr_info() with spin lock in cblist_init_generic() rcu: Mark additional concurrent load from ->cpu_no_qs.b.exp sched/fair: Don't balance task to its current running CPU wifi: ath11k: fix registration of 6Ghz-only phy without the full channel range bpf: Print a warning only if writing to unprivileged_bpf_disabled. bpf: Address KCSAN report on bpf_lru_list bpf: tcp: Avoid taking fast sock lock in iterator wifi: ath11k: add support default regdb while searching board-2.bin for WCN6855 wifi: mac80211_hwsim: Fix possible NULL dereference spi: dw: Add compatible for Intel Mount Evans SoC wifi: ath11k: fix memory leak in WMI firmware stats net: ethernet: litex: add support for 64 bit stats devlink: report devlink_port_type_warn source device wifi: wext-core: Fix -Wstringop-overflow warning in ioctl_standard_iw_point() wifi: iwlwifi: Add support for new PCI Id wifi: iwlwifi: mvm: avoid baid size integer overflow wifi: iwlwifi: pcie: add device id 51F1 for killer 1675 igb: Fix igb_down hung on surprise removal net: hns3: fix strncpy() not using dest-buf length as length issue ASoC: amd: acp: fix for invalid dai id handling in acp_get_byte_count() ASoC: codecs: wcd938x: fix mbhc impedance loglevel ASoC: codecs: wcd938x: fix dB range for HPHL and HPHR ASoC: qcom: q6apm: do not close GPR port before closing graph sched/fair: Use recent_used_cpu to test p->cpus_ptr sched/psi: Fix avgs_work re-arm in psi_avgs_work() sched/psi: Rearrange polling code in preparation sched/psi: Rename existing poll members in preparation sched/psi: Extract update_triggers side effect sched/psi: Allow unprivileged polling of N*2s period sched/psi: use kernfs polling functions for PSI trigger polling pinctrl: renesas: rzv2m: Handle non-unique subnode names pinctrl: renesas: rzg2l: Handle non-unique subnode names spi: bcm63xx: fix max prepend length fbdev: imxfb: warn about invalid left/right margin fbdev: imxfb: Removed unneeded release_mem_region perf build: Fix library not found error when using CSLIBS btrfs: be a bit more careful when setting mirror_num_ret in btrfs_map_block spi: s3c64xx: clear loopback bit after loopback test kallsyms: Improve the performance of kallsyms_lookup_name() kallsyms: Correctly sequence symbols when CONFIG_LTO_CLANG=y kallsyms: strip LTO-only suffixes from promoted global functions dsa: mv88e6xxx: Do a final check before timing out net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field()/cpsw_ale_set_field() bridge: Add extack warning when enabling STP in netns. net: ethernet: mtk_eth_soc: handle probe deferral cifs: fix mid leak during reconnection after timeout threshold ASoC: SOF: ipc3-dtrace: uninitialized data in dfsentry_trace_filter_write() net: sched: cls_matchall: Undo tcf_bind_filter in case of failure after mall_set_parms net: sched: cls_u32: Undo tcf_bind_filter if u32_replace_hw_knode net: sched: cls_u32: Undo refcount decrement in case update failed net: sched: cls_bpf: Undo tcf_bind_filter in case of an error net: dsa: microchip: ksz8: Separate static MAC table operations for code reuse net: dsa: microchip: ksz8: Make ksz8_r_sta_mac_table() static net: dsa: microchip: ksz8_r_sta_mac_table(): Avoid using error code for empty entries net: dsa: microchip: correct KSZ8795 static MAC table access iavf: Fix use-after-free in free_netdev iavf: Fix out-of-bounds when setting channels on remove iavf: use internal state to free traffic IRQs iavf: Move netdev_update_features() into watchdog task iavf: send VLAN offloading caps once after VFR iavf: make functions static where possible iavf: Wait for reset in callbacks which trigger it iavf: fix a deadlock caused by rtnl and driver's lock circular dependencies iavf: fix reset task race with iavf_remove() security: keys: Modify mismatched function name octeontx2-pf: Dont allocate BPIDs for LBK interfaces bpf: Fix subprog idx logic in check_max_stack_depth bpf: Repeat check_max_stack_depth for async callbacks bpf, arm64: Fix BTI type used for freplace attached functions igc: Avoid transmit queue timeout for XDP igc: Prevent garbled TX queue with XDP ZEROCOPY net: ipv4: use consistent txhash in TIME_WAIT and SYN_RECV tcp: annotate data-races around tcp_rsk(req)->txhash tcp: annotate data-races around tcp_rsk(req)->ts_recent net: ipv4: Use kfree_sensitive instead of kfree net:ipv6: check return value of pskb_trim() Revert "tcp: avoid the lookup process failing to get sk in ehash table" fbdev: au1200fb: Fix missing IRQ check in au1200fb_drv_probe llc: Don't drop packet from non-root netns. ALSA: hda/realtek: Fix generic fixup definition for cs35l41 amp netfilter: nf_tables: fix spurious set element insertion failure netfilter: nf_tables: can't schedule in nft_chain_validate netfilter: nft_set_pipapo: fix improper element removal netfilter: nf_tables: skip bound chain in netns release path netfilter: nf_tables: skip bound chain on rule flush Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync Bluetooth: hci_event: call disconnect callback before deleting conn Bluetooth: ISO: fix iso_conn related locking and validity issues Bluetooth: hci_sync: Avoid use-after-free in dbg for hci_remove_adv_monitor() tcp: annotate data-races around tp->tcp_tx_delay tcp: annotate data-races around tp->tsoffset tcp: annotate data-races around tp->keepalive_time tcp: annotate data-races around tp->keepalive_intvl tcp: annotate data-races around tp->keepalive_probes tcp: annotate data-races around icsk->icsk_syn_retries tcp: annotate data-races around tp->linger2 tcp: annotate data-races around rskq_defer_accept tcp: annotate data-races around tp->notsent_lowat tcp: annotate data-races around icsk->icsk_user_timeout tcp: annotate data-races around fastopenq.max_qlen net: phy: prevent stale pointer dereference in phy_init() jbd2: recheck chechpointing non-dirty buffer tracing/histograms: Return an error if we fail to add histogram to hist_vars list drm/ttm: fix bulk_move corruption when adding a entry spi: dw: Remove misleading comment for Mount Evans SoC kallsyms: add kallsyms_seqs_of_names to list of special symbols scripts/kallsyms.c Make the comment up-to-date with current implementation scripts/kallsyms: update the usage in the comment block bpf: allow precision tracking for programs with subprogs bpf: stop setting precise in current state bpf: aggressively forget precise markings during state checkpointing selftests/bpf: make test_align selftest more robust selftests/bpf: Workaround verification failure for fexit_bpf2bpf/func_replace_return_code selftests/bpf: Fix sk_assign on s390x drm/amd/display: use max_dsc_bpp in amdgpu_dm drm/amd/display: fix some coding style issues drm/dp_mst: Clear MSG_RDY flag before sending new message drm/amd/display: force connector state when bpc changes during compliance drm/amd/display: Clean up errors & warnings in amdgpu_dm.c drm/amd/display: fix linux dp link lost handled only one time drm/amd/display: Add polling method to handle MST reply packet Revert "drm/amd/display: edp do not add non-edid timings" Linux 6.1.42 Change-Id: I6b7257a16f9a025d0c23dfd3eb43317c1c164a93 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
8976ff249f
@ -105,6 +105,10 @@ prevent overly frequent polling. Max limit is chosen as a high enough number
|
||||
after which monitors are most likely not needed and psi averages can be used
|
||||
instead.
|
||||
|
||||
Unprivileged users can also create monitors, with the only limitation that the
|
||||
window size must be a multiple of 2s, in order to prevent excessive resource
|
||||
usage.
|
||||
|
||||
When activated, psi monitor stays active for at least the duration of one
|
||||
tracking window to avoid repeated activations/deactivations when system is
|
||||
bouncing in and out of the stall state.
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 41
|
||||
SUBLEVEL = 42
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
@ -803,6 +803,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
|
||||
int vec_set_vector_length(struct task_struct *task, enum vec_type type,
|
||||
unsigned long vl, unsigned long flags)
|
||||
{
|
||||
bool free_sme = false;
|
||||
|
||||
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC))
|
||||
return -EINVAL;
|
||||
@ -851,21 +853,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
|
||||
thread_sm_enabled(&task->thread))
|
||||
sve_to_fpsimd(task);
|
||||
|
||||
if (system_supports_sme() && type == ARM64_VEC_SME) {
|
||||
task->thread.svcr &= ~(SVCR_SM_MASK |
|
||||
SVCR_ZA_MASK);
|
||||
clear_thread_flag(TIF_SME);
|
||||
if (system_supports_sme()) {
|
||||
if (type == ARM64_VEC_SME ||
|
||||
!(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
|
||||
/*
|
||||
* We are changing the SME VL or weren't using
|
||||
* SME anyway, discard the state and force a
|
||||
* reallocation.
|
||||
*/
|
||||
task->thread.svcr &= ~(SVCR_SM_MASK |
|
||||
SVCR_ZA_MASK);
|
||||
clear_thread_flag(TIF_SME);
|
||||
free_sme = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (task == current)
|
||||
put_cpu_fpsimd_context();
|
||||
|
||||
/*
|
||||
* Force reallocation of task SVE and SME state to the correct
|
||||
* size on next use:
|
||||
* Free the changed states if they are not in use, SME will be
|
||||
* reallocated to the correct size on next use and we just
|
||||
* allocate SVE now in case it is needed for use in streaming
|
||||
* mode.
|
||||
*/
|
||||
sve_free(task);
|
||||
if (system_supports_sme() && type == ARM64_VEC_SME)
|
||||
if (system_supports_sve()) {
|
||||
sve_free(task);
|
||||
sve_alloc(task, true);
|
||||
}
|
||||
|
||||
if (free_sme)
|
||||
sme_free(task);
|
||||
|
||||
task_set_vl(task, type, vl);
|
||||
|
@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
||||
*
|
||||
*/
|
||||
|
||||
emit_bti(A64_BTI_C, ctx);
|
||||
/* bpf function may be invoked by 3 instruction types:
|
||||
* 1. bl, attached via freplace to bpf prog via short jump
|
||||
* 2. br, attached via freplace to bpf prog via long jump
|
||||
* 3. blr, working as a function pointer, used by emit_call.
|
||||
* So BTI_JC should used here to support both br and blr.
|
||||
*/
|
||||
emit_bti(A64_BTI_JC, ctx);
|
||||
|
||||
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
|
||||
emit(A64_NOP, ctx);
|
||||
|
@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic)
|
||||
*/
|
||||
typedef struct {
|
||||
int pagesize;
|
||||
unsigned char bitmap[0];
|
||||
unsigned char bitmap[];
|
||||
} memmap;
|
||||
|
||||
|
||||
|
@ -510,6 +510,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
/* Dell Studio 1569 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
/* Acer Aspire 3830TG */
|
||||
|
@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
|
||||
static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
|
||||
.write = regmap_i2c_smbus_i2c_write,
|
||||
.read = regmap_i2c_smbus_i2c_read,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
|
||||
};
|
||||
|
||||
static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
|
||||
@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
|
||||
static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
|
||||
.write = regmap_i2c_smbus_i2c_write_reg16,
|
||||
.read = regmap_i2c_smbus_i2c_read_reg16,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
|
||||
};
|
||||
|
||||
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
|
||||
|
@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
|
||||
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
|
||||
.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
|
||||
.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
|
||||
.free_context = spi_avmm_bridge_ctx_free,
|
||||
};
|
||||
|
||||
|
@ -2064,8 +2064,6 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
size_t val_count = val_len / val_bytes;
|
||||
size_t chunk_count, chunk_bytes;
|
||||
size_t chunk_regs = val_count;
|
||||
size_t max_data = map->max_raw_write - map->format.reg_bytes -
|
||||
map->format.pad_bytes;
|
||||
int ret, i;
|
||||
|
||||
if (!val_count)
|
||||
@ -2073,8 +2071,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
if (map->use_single_write)
|
||||
chunk_regs = 1;
|
||||
else if (map->max_raw_write && val_len > max_data)
|
||||
chunk_regs = max_data / val_bytes;
|
||||
else if (map->max_raw_write && val_len > map->max_raw_write)
|
||||
chunk_regs = map->max_raw_write / val_bytes;
|
||||
|
||||
chunk_count = val_count / chunk_regs;
|
||||
chunk_bytes = chunk_regs * val_bytes;
|
||||
|
@ -566,6 +566,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
struct dma_fence **new_fences;
|
||||
unsigned int count;
|
||||
|
||||
while (*num_fences)
|
||||
@ -574,13 +575,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
count = cursor.num_fences + 1;
|
||||
|
||||
/* Eventually re-allocate the array */
|
||||
*fences = krealloc_array(*fences, count,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (count && !*fences) {
|
||||
new_fences = krealloc_array(*fences, count,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (count && !new_fences) {
|
||||
kfree(*fences);
|
||||
*fences = NULL;
|
||||
*num_fences = 0;
|
||||
dma_resv_iter_end(&cursor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*fences = new_fences;
|
||||
}
|
||||
|
||||
(*fences)[(*num_fences)++] = dma_fence_get(fence);
|
||||
|
@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
|
||||
DRM_WARN("%s: vblank timer overrun\n", __func__);
|
||||
|
||||
ret = drm_crtc_handle_vblank(crtc);
|
||||
/* Don't queue timer again when vblank is disabled. */
|
||||
if (!ret)
|
||||
DRM_ERROR("amdgpu_vkms failure on handling vblank");
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
hrtimer_cancel(&amdgpu_crtc->vblank_timer);
|
||||
hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
|
||||
}
|
||||
|
||||
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
|
||||
|
@ -40,6 +40,9 @@
|
||||
#include "dc/dc_stat.h"
|
||||
#include "amdgpu_dm_trace.h"
|
||||
#include "dc/inc/dc_link_ddc.h"
|
||||
#include "dpcd_defs.h"
|
||||
#include "dc/inc/link_dpcd.h"
|
||||
#include "link_service_types.h"
|
||||
|
||||
#include "vid.h"
|
||||
#include "amdgpu.h"
|
||||
@ -211,7 +214,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
|
||||
|
||||
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector,
|
||||
uint32_t link_index,
|
||||
u32 link_index,
|
||||
struct amdgpu_encoder *amdgpu_encoder);
|
||||
static int amdgpu_dm_encoder_init(struct drm_device *dev,
|
||||
struct amdgpu_encoder *aencoder,
|
||||
@ -263,7 +266,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
||||
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
u32 *vbl, u32 *position)
|
||||
{
|
||||
uint32_t v_blank_start, v_blank_end, h_position, v_position;
|
||||
u32 v_blank_start, v_blank_end, h_position, v_position;
|
||||
|
||||
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
|
||||
return -EINVAL;
|
||||
@ -391,7 +394,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
unsigned long flags;
|
||||
struct drm_pending_vblank_event *e;
|
||||
uint32_t vpos, hpos, v_blank_start, v_blank_end;
|
||||
u32 vpos, hpos, v_blank_start, v_blank_end;
|
||||
bool vrr_active;
|
||||
|
||||
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
|
||||
@ -405,12 +408,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -678,7 +681,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
struct dc_link *link;
|
||||
uint8_t link_index = 0;
|
||||
u8 link_index = 0;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (adev == NULL)
|
||||
@ -779,7 +782,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
|
||||
struct amdgpu_device *adev = irq_params->adev;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
struct dmcub_trace_buf_entry entry = { 0 };
|
||||
uint32_t count = 0;
|
||||
u32 count = 0;
|
||||
struct dmub_hpd_work *dmub_hpd_wrk;
|
||||
struct dc_link *plink = NULL;
|
||||
|
||||
@ -858,7 +861,7 @@ static int dm_set_powergating_state(void *handle,
|
||||
}
|
||||
|
||||
/* Prototypes of private functions */
|
||||
static int dm_early_init(void* handle);
|
||||
static int dm_early_init(void *handle);
|
||||
|
||||
/* Allocate memory for FBC compressed data */
|
||||
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
|
||||
@ -1045,7 +1048,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
||||
struct dmub_srv_hw_params hw_params;
|
||||
enum dmub_status status;
|
||||
const unsigned char *fw_inst_const, *fw_bss_data;
|
||||
uint32_t i, fw_inst_const_size, fw_bss_data_size;
|
||||
u32 i, fw_inst_const_size, fw_bss_data_size;
|
||||
bool has_hw_support;
|
||||
|
||||
if (!dmub_srv)
|
||||
@ -1206,10 +1209,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
|
||||
{
|
||||
uint64_t pt_base;
|
||||
uint32_t logical_addr_low;
|
||||
uint32_t logical_addr_high;
|
||||
uint32_t agp_base, agp_bot, agp_top;
|
||||
u64 pt_base;
|
||||
u32 logical_addr_low;
|
||||
u32 logical_addr_high;
|
||||
u32 agp_base, agp_bot, agp_top;
|
||||
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
|
||||
|
||||
memset(pa_config, 0, sizeof(*pa_config));
|
||||
@ -1257,7 +1260,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
|
||||
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
|
||||
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
|
||||
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
|
||||
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
|
||||
|
||||
@ -1273,6 +1276,21 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
|
||||
}
|
||||
|
||||
static void force_connector_state(
|
||||
struct amdgpu_dm_connector *aconnector,
|
||||
enum drm_connector_force force_state)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
aconnector->base.force = force_state;
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
}
|
||||
|
||||
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
{
|
||||
struct hpd_rx_irq_offload_work *offload_work;
|
||||
@ -1281,6 +1299,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
struct amdgpu_device *adev;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
unsigned long flags;
|
||||
union test_response test_response;
|
||||
|
||||
memset(&test_response, 0, sizeof(test_response));
|
||||
|
||||
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
|
||||
aconnector = offload_work->offload_wq->aconnector;
|
||||
@ -1304,16 +1325,58 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
if (amdgpu_in_reset(adev))
|
||||
goto skip;
|
||||
|
||||
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
|
||||
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
|
||||
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
|
||||
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
|
||||
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
|
||||
dc_link_dp_handle_automated_test(dc_link);
|
||||
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
|
||||
if (aconnector->timing_changed) {
|
||||
/* force connector disconnect and reconnect */
|
||||
force_connector_state(aconnector, DRM_FORCE_OFF);
|
||||
msleep(100);
|
||||
force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
|
||||
}
|
||||
|
||||
test_response.bits.ACK = 1;
|
||||
|
||||
core_link_write_dpcd(
|
||||
dc_link,
|
||||
DP_TEST_RESPONSE,
|
||||
&test_response.raw,
|
||||
sizeof(test_response));
|
||||
} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
|
||||
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
dc_link_dp_handle_link_loss(dc_link);
|
||||
/* offload_work->data is from handle_hpd_rx_irq->
|
||||
* schedule_hpd_rx_offload_work.this is defer handle
|
||||
* for hpd short pulse. upon here, link status may be
|
||||
* changed, need get latest link status from dpcd
|
||||
* registers. if link status is good, skip run link
|
||||
* training again.
|
||||
*/
|
||||
union hpd_irq_data irq_data;
|
||||
|
||||
memset(&irq_data, 0, sizeof(irq_data));
|
||||
|
||||
/* before dc_link_dp_handle_link_loss, allow new link lost handle
|
||||
* request be added to work queue if link lost at end of dc_link_
|
||||
* dp_handle_link_loss
|
||||
*/
|
||||
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
|
||||
offload_work->offload_wq->is_handling_link_loss = false;
|
||||
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
|
||||
|
||||
if ((read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
|
||||
hpd_rx_irq_check_link_loss_status(dc_link, &irq_data))
|
||||
dc_link_dp_handle_link_loss(dc_link);
|
||||
}
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
||||
@ -1482,7 +1545,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
spin_lock_init(&adev->dm.vblank_lock);
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
if (amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
goto error;
|
||||
}
|
||||
@ -1617,9 +1680,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
|
||||
adev->dm.dc->debug.disable_stutter = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
|
||||
adev->dm.dc->debug.disable_dsc = true;
|
||||
}
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
|
||||
adev->dm.dc->debug.disable_clock_gate = true;
|
||||
@ -1840,8 +1902,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
mutex_destroy(&adev->dm.audio_lock);
|
||||
mutex_destroy(&adev->dm.dc_lock);
|
||||
mutex_destroy(&adev->dm.dpia_aux_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
@ -1850,7 +1910,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
int r;
|
||||
const struct dmcu_firmware_header_v1_0 *hdr;
|
||||
|
||||
switch(adev->asic_type) {
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
@ -2536,7 +2596,7 @@ struct amdgpu_dm_connector *
|
||||
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
uint32_t i;
|
||||
u32 i;
|
||||
struct drm_connector_state *new_con_state;
|
||||
struct drm_connector *connector;
|
||||
struct drm_crtc *crtc_from_state;
|
||||
@ -2642,7 +2702,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
struct dc_scaling_info scaling_infos[MAX_SURFACES];
|
||||
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
} * bundle;
|
||||
} *bundle;
|
||||
int k, m;
|
||||
|
||||
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
|
||||
@ -2672,8 +2732,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
|
||||
cleanup:
|
||||
kfree(bundle);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int dm_resume(void *handle)
|
||||
@ -2887,8 +2945,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
|
||||
.set_powergating_state = dm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dm_ip_block =
|
||||
{
|
||||
const struct amdgpu_ip_block_version dm_ip_block = {
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
@ -2945,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
|
||||
caps->aux_support = false;
|
||||
|
||||
if (caps->ext_caps->bits.oled == 1 /*||
|
||||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
|
||||
if (caps->ext_caps->bits.oled == 1
|
||||
/*
|
||||
* ||
|
||||
* caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
* caps->ext_caps->bits.hdr_aux_backlight_control == 1
|
||||
*/)
|
||||
caps->aux_support = true;
|
||||
|
||||
if (amdgpu_backlight == 0)
|
||||
@ -3076,6 +3136,10 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
aconnector->edid);
|
||||
}
|
||||
|
||||
aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
|
||||
if (!aconnector->timing_requested)
|
||||
dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
|
||||
|
||||
drm_connector_update_edid_property(connector, aconnector->edid);
|
||||
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
|
||||
update_connector_ext_caps(aconnector);
|
||||
@ -3087,6 +3151,8 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->edid = NULL;
|
||||
kfree(aconnector->timing_requested);
|
||||
aconnector->timing_requested = NULL;
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
|
||||
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
|
||||
@ -3131,6 +3197,8 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
aconnector->timing_changed = false;
|
||||
|
||||
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
@ -3170,84 +3238,6 @@ static void handle_hpd_irq(void *param)
|
||||
|
||||
}
|
||||
|
||||
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
uint8_t dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
int dpcd_bytes_to_read;
|
||||
|
||||
const int max_process_count = 30;
|
||||
int process_count = 0;
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
while (dret == dpcd_bytes_to_read &&
|
||||
process_count < max_process_count) {
|
||||
uint8_t retry;
|
||||
dret = 0;
|
||||
|
||||
process_count++;
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
/* handle HPD short pulse irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq(
|
||||
&aconnector->mst_mgr,
|
||||
esi,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
const int ack_dpcd_bytes_to_write =
|
||||
dpcd_bytes_to_read - 1;
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
uint8_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_write(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
&esi[1],
|
||||
ack_dpcd_bytes_to_write);
|
||||
if (wret == ack_dpcd_bytes_to_write)
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if there is new irq to be handled */
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
|
||||
union hpd_irq_data hpd_irq_data)
|
||||
{
|
||||
@ -3280,7 +3270,7 @@ static void handle_hpd_rx_irq(void *param)
|
||||
union hpd_irq_data hpd_irq_data;
|
||||
bool link_loss = false;
|
||||
bool has_left_work = false;
|
||||
int idx = aconnector->base.index;
|
||||
int idx = dc_link->link_index;
|
||||
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
|
||||
|
||||
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
|
||||
@ -3309,7 +3299,23 @@ static void handle_hpd_rx_irq(void *param)
|
||||
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg(aconnector);
|
||||
bool skip = false;
|
||||
|
||||
/*
|
||||
* DOWN_REP_MSG_RDY is also handled by polling method
|
||||
* mgr->cbs->poll_hpd_irq()
|
||||
*/
|
||||
spin_lock(&offload_wq->offload_lock);
|
||||
skip = offload_wq->is_handling_mst_msg_rdy_event;
|
||||
|
||||
if (!skip)
|
||||
offload_wq->is_handling_mst_msg_rdy_event = true;
|
||||
|
||||
spin_unlock(&offload_wq->offload_lock);
|
||||
|
||||
if (!skip)
|
||||
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3402,7 +3408,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
dc_link = aconnector->dc_link;
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
int_params.irq_source = dc_link->irq_source_hpd;
|
||||
|
||||
@ -3411,7 +3417,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
(void *) aconnector);
|
||||
}
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
|
||||
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
|
||||
|
||||
/* Also register for DP short pulse (hpd_rx). */
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
@ -3420,11 +3426,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
amdgpu_dm_irq_register_interrupt(adev, &int_params,
|
||||
handle_hpd_rx_irq,
|
||||
(void *) aconnector);
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3437,7 +3443,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
@ -3451,11 +3457,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
|
||||
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add crtc irq id!\n");
|
||||
return r;
|
||||
@ -3463,7 +3470,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
|
||||
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
|
||||
int_params.irq_source =
|
||||
dc_interrupt_to_irq_source(dc, i+1 , 0);
|
||||
dc_interrupt_to_irq_source(dc, i + 1, 0);
|
||||
|
||||
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
|
||||
|
||||
@ -3519,7 +3526,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
if (adev->family >= AMDGPU_FAMILY_AI)
|
||||
client_id = SOC15_IH_CLIENTID_DCE;
|
||||
@ -3536,7 +3543,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
|
||||
@ -3985,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
unsigned *min, unsigned *max)
|
||||
unsigned int *min, unsigned int *max)
|
||||
{
|
||||
if (!caps)
|
||||
return 0;
|
||||
@ -4005,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4018,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
|
||||
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4236,12 +4244,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
|
||||
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
int32_t i;
|
||||
s32 i;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct amdgpu_encoder *aencoder = NULL;
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
uint32_t link_cnt;
|
||||
int32_t primary_planes;
|
||||
u32 link_cnt;
|
||||
s32 primary_planes;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
const struct dc_plane_cap *plane;
|
||||
bool psr_feature_enabled = false;
|
||||
@ -4499,7 +4507,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
drm_atomic_private_obj_fini(&dm->atomic_obj);
|
||||
return;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -4768,7 +4775,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
|
||||
static int
|
||||
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
||||
const struct drm_plane_state *plane_state,
|
||||
const uint64_t tiling_flags,
|
||||
const u64 tiling_flags,
|
||||
struct dc_plane_info *plane_info,
|
||||
struct dc_plane_address *address,
|
||||
bool tmz_surface,
|
||||
@ -4977,7 +4984,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
uint32_t num_clips;
|
||||
bool bb_changed;
|
||||
bool fb_changed;
|
||||
uint32_t i = 0;
|
||||
u32 i = 0;
|
||||
|
||||
flip_addrs->dirty_rect_count = 0;
|
||||
|
||||
@ -5111,7 +5118,7 @@ static enum dc_color_depth
|
||||
convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
bool is_y420, int requested_bpc)
|
||||
{
|
||||
uint8_t bpc;
|
||||
u8 bpc;
|
||||
|
||||
if (is_y420) {
|
||||
bpc = 8;
|
||||
@ -5225,6 +5232,7 @@ static bool adjust_colour_depth_from_display_info(
|
||||
{
|
||||
enum dc_color_depth depth = timing_out->display_color_depth;
|
||||
int normalized_clk;
|
||||
|
||||
do {
|
||||
normalized_clk = timing_out->pix_clk_100hz / 10;
|
||||
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
||||
@ -5440,6 +5448,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct dc_sink *sink = NULL;
|
||||
|
||||
sink_init_data.link = aconnector->dc_link;
|
||||
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
|
||||
|
||||
@ -5563,7 +5572,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
return &aconnector->freesync_vid_base;
|
||||
|
||||
/* Find the preferred mode */
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
if (m->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
m_pref = m;
|
||||
break;
|
||||
@ -5587,7 +5596,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
* For some monitors, preferred mode is not the mode with highest
|
||||
* supported refresh rate.
|
||||
*/
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
current_refresh = drm_mode_vrefresh(m);
|
||||
|
||||
if (m->hdisplay == m_pref->hdisplay &&
|
||||
@ -5655,8 +5664,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
|
||||
uint32_t max_dsc_target_bpp_limit_override)
|
||||
{
|
||||
const struct dc_link_settings *verified_link_cap = NULL;
|
||||
uint32_t link_bw_in_kbps;
|
||||
uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
|
||||
u32 link_bw_in_kbps;
|
||||
u32 edp_min_bpp_x16, edp_max_bpp_x16;
|
||||
struct dc *dc = sink->ctx->dc;
|
||||
struct dc_dsc_bw_range bw_range = {0};
|
||||
struct dc_dsc_config dsc_cfg = {0};
|
||||
@ -5713,17 +5722,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
|
||||
struct dsc_dec_dpcd_caps *dsc_caps)
|
||||
{
|
||||
struct drm_connector *drm_connector = &aconnector->base;
|
||||
uint32_t link_bandwidth_kbps;
|
||||
uint32_t max_dsc_target_bpp_limit_override = 0;
|
||||
u32 link_bandwidth_kbps;
|
||||
struct dc *dc = sink->ctx->dc;
|
||||
uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
|
||||
uint32_t dsc_max_supported_bw_in_kbps;
|
||||
u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
|
||||
u32 dsc_max_supported_bw_in_kbps;
|
||||
u32 max_dsc_target_bpp_limit_override =
|
||||
drm_connector->display_info.max_dsc_bpp;
|
||||
|
||||
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
dc_link_get_link_cap(aconnector->dc_link));
|
||||
if (stream->link && stream->link->local_sink)
|
||||
max_dsc_target_bpp_limit_override =
|
||||
stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
|
||||
|
||||
/* Set DSC policy according to dsc_clock_en */
|
||||
dc_dsc_policy_set_enable_dsc_when_not_needed(
|
||||
@ -5860,7 +5867,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
* This may not be an error, the use case is when we have no
|
||||
* usermode calls to reset and set mode upon hotplug. In this
|
||||
* case, we call set mode ourselves to restore the previous mode
|
||||
* and the modelist may not be filled in in time.
|
||||
* and the modelist may not be filled in time.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
@ -5884,9 +5891,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
drm_mode_set_crtcinfo(&mode, 0);
|
||||
|
||||
/*
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
if (!scale || mode_refresh != preferred_refresh)
|
||||
fill_stream_properties_from_drm_display_mode(
|
||||
stream, &mode, &aconnector->base, con_state, NULL,
|
||||
@ -5896,6 +5903,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
stream, &mode, &aconnector->base, con_state, old_stream,
|
||||
requested_bpc);
|
||||
|
||||
if (aconnector->timing_changed) {
|
||||
DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
|
||||
__func__,
|
||||
stream->timing.display_color_depth,
|
||||
aconnector->timing_requested->display_color_depth);
|
||||
stream->timing = *aconnector->timing_requested;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/* SST DSC determination policy */
|
||||
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
|
||||
@ -6540,6 +6555,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
if (!state->duplicated) {
|
||||
int max_bpc = conn_state->max_requested_bpc;
|
||||
|
||||
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
|
||||
aconnector->force_yuv420_output;
|
||||
color_depth = convert_color_depth_from_display_info(connector,
|
||||
@ -6860,7 +6876,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
{
|
||||
struct drm_display_mode *m;
|
||||
|
||||
list_for_each_entry (m, &aconnector->base.probed_modes, head) {
|
||||
list_for_each_entry(m, &aconnector->base.probed_modes, head) {
|
||||
if (drm_mode_equal(m, mode))
|
||||
return true;
|
||||
}
|
||||
@ -6873,7 +6889,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
|
||||
const struct drm_display_mode *m;
|
||||
struct drm_display_mode *new_mode;
|
||||
uint i;
|
||||
uint32_t new_modes_count = 0;
|
||||
u32 new_modes_count = 0;
|
||||
|
||||
/* Standard FPS values
|
||||
*
|
||||
@ -6887,7 +6903,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
|
||||
* 60 - Commonly used
|
||||
* 48,72,96,120 - Multiples of 24
|
||||
*/
|
||||
static const uint32_t common_rates[] = {
|
||||
static const u32 common_rates[] = {
|
||||
23976, 24000, 25000, 29970, 30000,
|
||||
48000, 50000, 60000, 72000, 96000, 120000
|
||||
};
|
||||
@ -6903,8 +6919,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
|
||||
uint64_t target_vtotal, target_vtotal_diff;
|
||||
uint64_t num, den;
|
||||
u64 target_vtotal, target_vtotal_diff;
|
||||
u64 num, den;
|
||||
|
||||
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
|
||||
continue;
|
||||
@ -6972,13 +6988,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, edid);
|
||||
/* most eDP supports only timings from its edid,
|
||||
* usually only detailed timings are available
|
||||
* from eDP edid. timings which are not from edid
|
||||
* may damage eDP
|
||||
*/
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_freesync_modes(connector, edid);
|
||||
}
|
||||
amdgpu_dm_fbc_init(connector);
|
||||
@ -7010,6 +7020,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
|
||||
aconnector->audio_inst = -1;
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
@ -7152,7 +7163,7 @@ create_i2c(struct ddc_service *ddc_service,
|
||||
*/
|
||||
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *aconnector,
|
||||
uint32_t link_index,
|
||||
u32 link_index,
|
||||
struct amdgpu_encoder *aencoder)
|
||||
{
|
||||
int res = 0;
|
||||
@ -7163,7 +7174,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
|
||||
link->priv = aconnector;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s()\n", __func__);
|
||||
|
||||
i2c = create_i2c(link->ddc, link->link_index, &res);
|
||||
if (!i2c) {
|
||||
@ -7643,8 +7653,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_crtc *pcrtc,
|
||||
bool wait_for_vblank)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t timestamp_ns;
|
||||
u32 i;
|
||||
u64 timestamp_ns;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
|
||||
@ -7655,7 +7665,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
|
||||
int planes_count = 0, vpos, hpos;
|
||||
unsigned long flags;
|
||||
uint32_t target_vblank, last_flip_vblank;
|
||||
u32 target_vblank, last_flip_vblank;
|
||||
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
|
||||
bool cursor_update = false;
|
||||
bool pflip_present = false;
|
||||
@ -7757,7 +7767,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* Only allow immediate flips for fast updates that don't
|
||||
* change memory domain, FB pitch, DCC state, rotation or
|
||||
* mirroring.
|
||||
*
|
||||
* dm_crtc_helper_atomic_check() only accepts async flips with
|
||||
* fast updates.
|
||||
*/
|
||||
if (crtc->state->async_flip &&
|
||||
acrtc_state->update_type != UPDATE_TYPE_FAST)
|
||||
drm_warn_once(state->dev,
|
||||
"[PLANE:%d:%s] async flip with non-fast update\n",
|
||||
plane->base.id, plane->name);
|
||||
bundle->flip_addrs[planes_count].flip_immediate =
|
||||
crtc->state->async_flip &&
|
||||
acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
@ -7800,8 +7818,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* DRI3/Present extension with defined target_msc.
|
||||
*/
|
||||
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
/* For variable refresh rate mode only:
|
||||
* Get vblank of last completed flip to avoid > 1 vrr
|
||||
* flips per video frame by use of throttling, but allow
|
||||
@ -8096,7 +8113,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
struct dm_atomic_state *dm_state;
|
||||
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
|
||||
uint32_t i, j;
|
||||
u32 i, j;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
unsigned long flags;
|
||||
@ -8128,8 +8145,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dc_resource_state_copy_construct_current(dm->dc, dc_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
@ -8152,9 +8169,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -8639,8 +8654,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
|
||||
&commit->flip_done, 10*HZ);
|
||||
|
||||
if (ret == 0)
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
|
||||
"timed out\n", crtc->base.id, crtc->name);
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
|
||||
crtc->base.id, crtc->name);
|
||||
|
||||
drm_crtc_commit_put(commit);
|
||||
}
|
||||
@ -8725,8 +8740,9 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
|
||||
uint64_t num, den, res;
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
|
||||
{
|
||||
u64 num, den, res;
|
||||
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
|
||||
|
||||
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
|
||||
@ -8848,9 +8864,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
goto skip_modeset;
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -8879,8 +8893,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
old_crtc_state)) {
|
||||
new_crtc_state->mode_changed = false;
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Mode change not required for front porch change, "
|
||||
"setting mode_changed to %d",
|
||||
"Mode change not required for front porch change, setting mode_changed to %d",
|
||||
new_crtc_state->mode_changed);
|
||||
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
@ -8892,9 +8905,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
struct drm_display_mode *high_mode;
|
||||
|
||||
high_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
@ -9062,6 +9074,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
||||
*/
|
||||
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
|
||||
struct amdgpu_framebuffer *old_afb, *new_afb;
|
||||
|
||||
if (other->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
@ -9160,11 +9173,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
|
||||
}
|
||||
|
||||
/* Core DRM takes care of checking FB modifiers, so we only need to
|
||||
* check tiling flags when the FB doesn't have a modifier. */
|
||||
* check tiling flags when the FB doesn't have a modifier.
|
||||
*/
|
||||
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
if (adev->family < AMDGPU_FAMILY_AI) {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
|
||||
} else {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
|
||||
@ -9377,12 +9391,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
|
||||
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
|
||||
* cursor per pipe but it's going to inherit the scaling and
|
||||
* positioning from the underlying pipe. Check the cursor plane's
|
||||
* blending properties match the underlying planes'. */
|
||||
* blending properties match the underlying planes'.
|
||||
*/
|
||||
|
||||
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
|
||||
if (!new_cursor_state || !new_cursor_state->fb) {
|
||||
if (!new_cursor_state || !new_cursor_state->fb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
|
||||
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
|
||||
@ -9428,6 +9442,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
struct drm_connector_state *conn_state, *old_conn_state;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
|
||||
if (!conn_state->crtc)
|
||||
conn_state = old_conn_state;
|
||||
@ -9870,7 +9885,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Store the overall update type for use later in atomic check. */
|
||||
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
struct dm_crtc_state *dm_new_crtc_state =
|
||||
to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
@ -9892,7 +9907,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
|
||||
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
|
||||
|
||||
trace_amdgpu_dm_atomic_check_finish(state, ret);
|
||||
|
||||
@ -9902,7 +9917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
static bool is_dp_capable_without_timing_msa(struct dc *dc,
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector)
|
||||
{
|
||||
uint8_t dpcd_data;
|
||||
u8 dpcd_data;
|
||||
bool capable = false;
|
||||
|
||||
if (amdgpu_dm_connector->dc_link &&
|
||||
@ -9921,7 +9936,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
|
||||
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
|
||||
unsigned int offset,
|
||||
unsigned int total_length,
|
||||
uint8_t *data,
|
||||
u8 *data,
|
||||
unsigned int length,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb)
|
||||
{
|
||||
@ -9976,7 +9991,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
|
||||
uint8_t *edid_ext, int len,
|
||||
u8 *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
int i;
|
||||
@ -10017,7 +10032,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
|
||||
uint8_t *edid_ext, int len,
|
||||
u8 *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
int i;
|
||||
@ -10033,7 +10048,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
||||
uint8_t *edid_ext, int len,
|
||||
u8 *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
|
||||
@ -10047,7 +10062,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
||||
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
uint8_t *edid_ext = NULL;
|
||||
u8 *edid_ext = NULL;
|
||||
int i;
|
||||
bool valid_vsdb_found = false;
|
||||
|
||||
@ -10223,7 +10238,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
|
||||
}
|
||||
|
||||
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
uint32_t value, const char *func_name)
|
||||
u32 value, const char *func_name)
|
||||
{
|
||||
#ifdef DM_CHECK_ADDR_0
|
||||
if (address == 0) {
|
||||
@ -10238,7 +10253,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
const char *func_name)
|
||||
{
|
||||
uint32_t value;
|
||||
u32 value;
|
||||
#ifdef DM_CHECK_ADDR_0
|
||||
if (address == 0) {
|
||||
DC_ERR("invalid register read; address = 0\n");
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_plane.h>
|
||||
#include "link_service_types.h"
|
||||
|
||||
/*
|
||||
* This file contains the definition for amdgpu_display_manager
|
||||
@ -192,6 +193,11 @@ struct hpd_rx_irq_offload_work_queue {
|
||||
* we're handling link loss
|
||||
*/
|
||||
bool is_handling_link_loss;
|
||||
/**
|
||||
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
|
||||
* ready event when we're already handling mst message ready event
|
||||
*/
|
||||
bool is_handling_mst_msg_rdy_event;
|
||||
/**
|
||||
* @aconnector: The aconnector that this work queue is attached to
|
||||
*/
|
||||
@ -613,6 +619,8 @@ struct amdgpu_dm_connector {
|
||||
struct drm_dp_mst_port *port;
|
||||
struct amdgpu_dm_connector *mst_port;
|
||||
struct drm_dp_aux *dsc_aux;
|
||||
struct mutex handle_mst_msg_ready;
|
||||
|
||||
/* TODO see if we can merge with ddc_bus or make a dm_connector */
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
||||
@ -650,6 +658,10 @@ struct amdgpu_dm_connector {
|
||||
|
||||
/* Record progress status of mst*/
|
||||
uint8_t mst_status;
|
||||
|
||||
/* Automated testing */
|
||||
bool timing_changed;
|
||||
struct dc_crtc_timing *timing_requested;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
@ -406,6 +406,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only allow async flips for fast updates that don't change the FB
|
||||
* pitch, the DCC state, rotation, etc.
|
||||
*/
|
||||
if (crtc_state->async_flip &&
|
||||
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
|
||||
drm_dbg_atomic(crtc->dev,
|
||||
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
|
||||
crtc->base.id, crtc->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
@ -38,6 +38,9 @@
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
#include "amdgpu_dm_mst_types.h"
|
||||
#include "dpcd_defs.h"
|
||||
#include "dc/inc/core_types.h"
|
||||
#include "dc_link_dp.h"
|
||||
|
||||
#include "dm_helpers.h"
|
||||
#include "ddc_service_types.h"
|
||||
@ -1056,6 +1059,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
|
||||
sizeof(new_downspread));
|
||||
}
|
||||
|
||||
bool dm_helpers_dp_handle_test_pattern_request(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
union link_test_pattern dpcd_test_pattern,
|
||||
union test_misc dpcd_test_params)
|
||||
{
|
||||
enum dp_test_pattern test_pattern;
|
||||
enum dp_test_pattern_color_space test_pattern_color_space =
|
||||
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
|
||||
enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
|
||||
enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
|
||||
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream == NULL)
|
||||
continue;
|
||||
|
||||
if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
|
||||
!pipes[i].prev_odm_pipe) {
|
||||
pipe_ctx = &pipes[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx == NULL)
|
||||
return false;
|
||||
|
||||
switch (dpcd_test_pattern.bits.PATTERN) {
|
||||
case LINK_TEST_PATTERN_COLOR_RAMP:
|
||||
test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
|
||||
break;
|
||||
case LINK_TEST_PATTERN_VERTICAL_BARS:
|
||||
test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
|
||||
break; /* black and white */
|
||||
case LINK_TEST_PATTERN_COLOR_SQUARES:
|
||||
test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
|
||||
TEST_DYN_RANGE_VESA ?
|
||||
DP_TEST_PATTERN_COLOR_SQUARES :
|
||||
DP_TEST_PATTERN_COLOR_SQUARES_CEA);
|
||||
break;
|
||||
default:
|
||||
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dpcd_test_params.bits.CLR_FORMAT == 0)
|
||||
test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
|
||||
else
|
||||
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
|
||||
|
||||
switch (dpcd_test_params.bits.BPC) {
|
||||
case 0: // 6 bits
|
||||
requestColorDepth = COLOR_DEPTH_666;
|
||||
break;
|
||||
case 1: // 8 bits
|
||||
requestColorDepth = COLOR_DEPTH_888;
|
||||
break;
|
||||
case 2: // 10 bits
|
||||
requestColorDepth = COLOR_DEPTH_101010;
|
||||
break;
|
||||
case 3: // 12 bits
|
||||
requestColorDepth = COLOR_DEPTH_121212;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dpcd_test_params.bits.CLR_FORMAT) {
|
||||
case 0:
|
||||
requestPixelEncoding = PIXEL_ENCODING_RGB;
|
||||
break;
|
||||
case 1:
|
||||
requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
|
||||
break;
|
||||
case 2:
|
||||
requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
|
||||
break;
|
||||
default:
|
||||
requestPixelEncoding = PIXEL_ENCODING_RGB;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
|
||||
&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
|
||||
|| (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
|
||||
&& pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
|
||||
DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",
|
||||
__func__,
|
||||
pipe_ctx->stream->timing.display_color_depth,
|
||||
pipe_ctx->stream->timing.pixel_encoding,
|
||||
requestColorDepth,
|
||||
requestPixelEncoding);
|
||||
pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
|
||||
pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
|
||||
|
||||
dp_update_dsc_config(pipe_ctx);
|
||||
|
||||
aconnector->timing_changed = true;
|
||||
/* store current timing */
|
||||
if (aconnector->timing_requested)
|
||||
*aconnector->timing_requested = pipe_ctx->stream->timing;
|
||||
else
|
||||
DC_LOG_ERROR("%s: timing storage failed\n", __func__);
|
||||
|
||||
}
|
||||
|
||||
dc_link_dp_set_test_pattern(
|
||||
(struct dc_link *) link,
|
||||
test_pattern,
|
||||
test_pattern_color_space,
|
||||
NULL,
|
||||
NULL,
|
||||
0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
|
||||
{
|
||||
// TODO
|
||||
|
@ -590,8 +590,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
return connector;
|
||||
}
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type)
|
||||
{
|
||||
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
uint8_t dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
uint8_t dpcd_bytes_to_read;
|
||||
const uint8_t max_process_count = 30;
|
||||
uint8_t process_count = 0;
|
||||
u8 retry;
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
|
||||
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
mutex_lock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
while (process_count < max_process_count) {
|
||||
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
|
||||
|
||||
process_count++;
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
if (dret != dpcd_bytes_to_read) {
|
||||
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
|
||||
switch (msg_rdy_type) {
|
||||
case DOWN_REP_MSG_RDY_EVENT:
|
||||
/* Only handle DOWN_REP_MSG_RDY case*/
|
||||
esi[1] &= DP_DOWN_REP_MSG_RDY;
|
||||
break;
|
||||
case UP_REQ_MSG_RDY_EVENT:
|
||||
/* Only handle UP_REQ_MSG_RDY case*/
|
||||
esi[1] &= DP_UP_REQ_MSG_RDY;
|
||||
break;
|
||||
default:
|
||||
/* Handle both cases*/
|
||||
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!esi[1])
|
||||
break;
|
||||
|
||||
/* handle MST irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
|
||||
esi,
|
||||
ack,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ssize_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
ack[1]);
|
||||
if (wret == 1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
|
||||
}
|
||||
|
||||
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
|
||||
.add_connector = dm_dp_add_mst_connector,
|
||||
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
|
||||
};
|
||||
|
||||
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
@ -673,15 +783,18 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
|
||||
int count,
|
||||
int k)
|
||||
{
|
||||
struct drm_connector *drm_connector;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
drm_connector = ¶ms[i].aconnector->base;
|
||||
|
||||
memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
|
||||
if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
|
||||
params[i].sink->ctx->dc->res_pool->dscs[0],
|
||||
¶ms[i].sink->dsc_caps.dsc_dec_caps,
|
||||
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
|
||||
drm_connector->display_info.max_dsc_bpp,
|
||||
0,
|
||||
params[i].timing,
|
||||
¶ms[i].timing->dsc_cfg)) {
|
||||
@ -723,12 +836,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
||||
struct dc_dsc_config dsc_config;
|
||||
u64 kbps;
|
||||
|
||||
struct drm_connector *drm_connector = ¶m.aconnector->base;
|
||||
uint32_t max_dsc_target_bpp_limit_override =
|
||||
drm_connector->display_info.max_dsc_bpp;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
|
||||
max_dsc_target_bpp_limit_override,
|
||||
(int) kbps, param.timing, &dsc_config);
|
||||
|
||||
return dsc_config.bits_per_pixel;
|
||||
|
@ -49,6 +49,13 @@
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
|
||||
|
||||
enum mst_msg_ready_type {
|
||||
NONE_MSG_RDY_EVENT = 0,
|
||||
DOWN_REP_MSG_RDY_EVENT = 1,
|
||||
UP_REQ_MSG_RDY_EVENT = 2,
|
||||
DOWN_OR_UP_MSG_RDY_EVENT = 3
|
||||
};
|
||||
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_dm_connector;
|
||||
|
||||
@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
void
|
||||
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type);
|
||||
|
||||
struct dsc_mst_fairness_vars {
|
||||
int pbn;
|
||||
bool dsc_enabled;
|
||||
|
@ -86,6 +86,11 @@ static int dcn31_get_active_display_cnt_wa(
|
||||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
|
||||
/* Checking stream / link detection ensuring that PHY is active*/
|
||||
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
|
||||
display_count++;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
|
@ -3115,7 +3115,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
|
||||
return max_link_cap;
|
||||
}
|
||||
|
||||
static enum dc_status read_hpd_rx_irq_data(
|
||||
enum dc_status read_hpd_rx_irq_data(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *irq_data)
|
||||
{
|
||||
@ -4264,124 +4264,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
|
||||
test_pattern_size);
|
||||
}
|
||||
|
||||
static void dp_test_send_link_test_pattern(struct dc_link *link)
|
||||
{
|
||||
union link_test_pattern dpcd_test_pattern;
|
||||
union test_misc dpcd_test_params;
|
||||
enum dp_test_pattern test_pattern;
|
||||
enum dp_test_pattern_color_space test_pattern_color_space =
|
||||
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
|
||||
enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
|
||||
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
int i;
|
||||
|
||||
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
|
||||
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream == NULL)
|
||||
continue;
|
||||
|
||||
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
|
||||
pipe_ctx = &pipes[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx == NULL)
|
||||
return;
|
||||
|
||||
/* get link test pattern and pattern parameters */
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_PATTERN,
|
||||
&dpcd_test_pattern.raw,
|
||||
sizeof(dpcd_test_pattern));
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_MISC0,
|
||||
&dpcd_test_params.raw,
|
||||
sizeof(dpcd_test_params));
|
||||
|
||||
switch (dpcd_test_pattern.bits.PATTERN) {
|
||||
case LINK_TEST_PATTERN_COLOR_RAMP:
|
||||
test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
|
||||
break;
|
||||
case LINK_TEST_PATTERN_VERTICAL_BARS:
|
||||
test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
|
||||
break; /* black and white */
|
||||
case LINK_TEST_PATTERN_COLOR_SQUARES:
|
||||
test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
|
||||
TEST_DYN_RANGE_VESA ?
|
||||
DP_TEST_PATTERN_COLOR_SQUARES :
|
||||
DP_TEST_PATTERN_COLOR_SQUARES_CEA);
|
||||
break;
|
||||
default:
|
||||
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dpcd_test_params.bits.CLR_FORMAT == 0)
|
||||
test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
|
||||
else
|
||||
test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
|
||||
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
|
||||
|
||||
switch (dpcd_test_params.bits.BPC) {
|
||||
case 0: // 6 bits
|
||||
requestColorDepth = COLOR_DEPTH_666;
|
||||
break;
|
||||
case 1: // 8 bits
|
||||
requestColorDepth = COLOR_DEPTH_888;
|
||||
break;
|
||||
case 2: // 10 bits
|
||||
requestColorDepth = COLOR_DEPTH_101010;
|
||||
break;
|
||||
case 3: // 12 bits
|
||||
requestColorDepth = COLOR_DEPTH_121212;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dpcd_test_params.bits.CLR_FORMAT) {
|
||||
case 0:
|
||||
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
|
||||
break;
|
||||
case 1:
|
||||
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422;
|
||||
break;
|
||||
case 2:
|
||||
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444;
|
||||
break;
|
||||
default:
|
||||
pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (requestColorDepth != COLOR_DEPTH_UNDEFINED
|
||||
&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
|
||||
DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
|
||||
__func__,
|
||||
pipe_ctx->stream->timing.display_color_depth,
|
||||
requestColorDepth);
|
||||
pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
|
||||
}
|
||||
|
||||
dp_update_dsc_config(pipe_ctx);
|
||||
|
||||
dc_link_dp_set_test_pattern(
|
||||
link,
|
||||
test_pattern,
|
||||
test_pattern_color_space,
|
||||
NULL,
|
||||
NULL,
|
||||
0);
|
||||
}
|
||||
|
||||
static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
|
||||
{
|
||||
union audio_test_mode dpcd_test_mode = {0};
|
||||
@ -4494,8 +4376,25 @@ void dc_link_dp_handle_automated_test(struct dc_link *link)
|
||||
test_response.bits.ACK = 0;
|
||||
}
|
||||
if (test_request.bits.LINK_TEST_PATTRN) {
|
||||
dp_test_send_link_test_pattern(link);
|
||||
test_response.bits.ACK = 1;
|
||||
union test_misc dpcd_test_params;
|
||||
union link_test_pattern dpcd_test_pattern;
|
||||
|
||||
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
|
||||
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
|
||||
|
||||
/* get link test pattern and pattern parameters */
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_PATTERN,
|
||||
&dpcd_test_pattern.raw,
|
||||
sizeof(dpcd_test_pattern));
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_MISC0,
|
||||
&dpcd_test_params.raw,
|
||||
sizeof(dpcd_test_params));
|
||||
test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link,
|
||||
dpcd_test_pattern, dpcd_test_params) ? 1 : 0;
|
||||
}
|
||||
|
||||
if (test_request.bits.AUDIO_TEST_PATTERN) {
|
||||
|
@ -3293,7 +3293,8 @@ void dcn10_wait_for_mpcc_disconnect(
|
||||
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
|
||||
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
if (pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
|
||||
hubp->funcs->set_blank(hubp, true);
|
||||
|
@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -156,6 +156,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
|
||||
struct dc_link *link,
|
||||
struct dc_sink *sink);
|
||||
|
||||
bool dm_helpers_dp_handle_test_pattern_request(
|
||||
struct dc_context *ctx,
|
||||
const struct dc_link *link,
|
||||
union link_test_pattern dpcd_test_pattern,
|
||||
union test_misc dpcd_test_params);
|
||||
|
||||
void dm_set_dcn_clocks(
|
||||
struct dc_context *ctx,
|
||||
struct dc_clocks *clks);
|
||||
|
@ -82,6 +82,10 @@ bool perform_link_training_with_retries(
|
||||
enum signal_type signal,
|
||||
bool do_fallback);
|
||||
|
||||
enum dc_status read_hpd_rx_irq_data(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *irq_data);
|
||||
|
||||
bool hpd_rx_irq_check_link_loss_status(
|
||||
struct dc_link *link,
|
||||
union hpd_irq_data *hpd_irq_dpcd_data);
|
||||
|
@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_MCLK:
|
||||
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
|
||||
ret = sienna_cichlid_get_smu_metrics_data(smu,
|
||||
METRICS_CURR_UCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_SCLK:
|
||||
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
|
||||
ret = sienna_cichlid_get_smu_metrics_data(smu,
|
||||
METRICS_AVERAGE_GFXCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
break;
|
||||
|
@ -940,7 +940,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_GFX_MCLK:
|
||||
ret = smu_v13_0_7_get_smu_metrics_data(smu,
|
||||
METRICS_AVERAGE_UCLK,
|
||||
METRICS_CURR_UCLK,
|
||||
(uint32_t *)data);
|
||||
*(uint32_t *)data *= 100;
|
||||
*size = 4;
|
||||
|
@ -4053,17 +4053,28 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
|
||||
* drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
|
||||
* @mgr: manager to notify irq for.
|
||||
* @esi: 4 bytes from SINK_COUNT_ESI
|
||||
* @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
|
||||
* @handled: whether the hpd interrupt was consumed or not
|
||||
*
|
||||
* This should be called from the driver when it detects a short IRQ,
|
||||
* This should be called from the driver when it detects a HPD IRQ,
|
||||
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
|
||||
* topology manager will process the sideband messages received as a result
|
||||
* of this.
|
||||
* topology manager will process the sideband messages received
|
||||
* as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
|
||||
* corresponding flags that Driver has to ack the DP receiver later.
|
||||
*
|
||||
* Note that driver shall also call
|
||||
* drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
|
||||
* after calling this function, to try to kick off a new request in
|
||||
* the queue if the previous message transaction is completed.
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_mst_hpd_irq_send_new_request()
|
||||
*/
|
||||
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
|
||||
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
|
||||
u8 *ack, bool *handled)
|
||||
{
|
||||
int ret = 0;
|
||||
int sc;
|
||||
@ -4078,18 +4089,47 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
|
||||
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
|
||||
ret = drm_dp_mst_handle_down_rep(mgr);
|
||||
*handled = true;
|
||||
ack[1] |= DP_DOWN_REP_MSG_RDY;
|
||||
}
|
||||
|
||||
if (esi[1] & DP_UP_REQ_MSG_RDY) {
|
||||
ret |= drm_dp_mst_handle_up_req(mgr);
|
||||
*handled = true;
|
||||
ack[1] |= DP_UP_REQ_MSG_RDY;
|
||||
}
|
||||
|
||||
drm_dp_mst_kick_tx(mgr);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
|
||||
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
|
||||
|
||||
/**
|
||||
* drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
|
||||
* @mgr: manager to notify irq for.
|
||||
*
|
||||
* This should be called from the driver when mst irq event is handled
|
||||
* and acked. Note that new down request should only be sent when
|
||||
* previous message transaction is completed. Source is not supposed to generate
|
||||
* interleaved message transactions.
|
||||
*/
|
||||
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
struct drm_dp_sideband_msg_tx *txmsg;
|
||||
bool kick = true;
|
||||
|
||||
mutex_lock(&mgr->qlock);
|
||||
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
|
||||
struct drm_dp_sideband_msg_tx, next);
|
||||
/* If last transaction is not completed yet*/
|
||||
if (!txmsg ||
|
||||
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
|
||||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
|
||||
kick = false;
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
if (kick)
|
||||
drm_dp_mst_kick_tx(mgr);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
|
||||
/**
|
||||
* drm_dp_mst_detect_port() - get connection status for an MST port
|
||||
* @connector: DRM connector for this port
|
||||
|
@ -315,6 +315,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
|
||||
can_clone = true;
|
||||
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
|
||||
|
||||
if (!dmt_mode)
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < connector_count; i++) {
|
||||
if (!enabled[i])
|
||||
continue;
|
||||
@ -330,11 +333,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
|
||||
if (!modes[i])
|
||||
can_clone = false;
|
||||
}
|
||||
kfree(dmt_mode);
|
||||
|
||||
if (can_clone) {
|
||||
DRM_DEBUG_KMS("can clone using 1024x768\n");
|
||||
return true;
|
||||
}
|
||||
fail:
|
||||
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
|
||||
return false;
|
||||
}
|
||||
@ -866,6 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(modeset->mode);
|
||||
modeset->mode = drm_mode_duplicate(dev, mode);
|
||||
drm_connector_get(connector);
|
||||
modeset->connectors[modeset->num_connectors++] = connector;
|
||||
|
@ -3804,9 +3804,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
|
||||
if (handled)
|
||||
ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
|
||||
drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
|
||||
|
||||
if (esi[1] & DP_CP_IRQ) {
|
||||
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
|
||||
@ -3881,6 +3879,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
|
||||
|
||||
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
|
||||
drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
|
||||
|
||||
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
|
||||
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
|
||||
}
|
||||
|
||||
return link_ok;
|
||||
|
@ -1473,22 +1473,26 @@ nv50_mstm_service(struct nouveau_drm *drm,
|
||||
u8 esi[8] = {};
|
||||
|
||||
while (handled) {
|
||||
u8 ack[8] = {};
|
||||
|
||||
rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
|
||||
if (rc != 8) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
|
||||
drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
|
||||
if (!handled)
|
||||
break;
|
||||
|
||||
rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
|
||||
3);
|
||||
if (rc != 3) {
|
||||
rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
|
||||
|
||||
if (rc != 1) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
@ -270,7 +270,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
{
|
||||
struct drm_radeon_cs *cs = data;
|
||||
uint64_t *chunk_array_ptr;
|
||||
unsigned size, i;
|
||||
u64 size;
|
||||
unsigned i;
|
||||
u32 ring = RADEON_CS_RING_GFX;
|
||||
s32 priority = 0;
|
||||
|
||||
|
@ -85,6 +85,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
if (pos->last != res) {
|
||||
if (pos->first == res)
|
||||
pos->first = list_next_entry(res, lru);
|
||||
list_move(&res->lru, &pos->last->lru);
|
||||
pos->last = res;
|
||||
}
|
||||
@ -110,7 +112,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
|
||||
{
|
||||
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
|
||||
|
||||
if (unlikely(pos->first == res && pos->last == res)) {
|
||||
if (unlikely(WARN_ON(!pos->first || !pos->last) ||
|
||||
(pos->first == res && pos->last == res))) {
|
||||
pos->first = NULL;
|
||||
pos->last = NULL;
|
||||
} else if (pos->first == res) {
|
||||
|
@ -614,6 +614,7 @@
|
||||
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
|
||||
|
||||
#define USB_VENDOR_ID_HP 0x03f0
|
||||
#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a
|
||||
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
|
||||
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
|
||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
|
||||
|
@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
|
||||
|
@ -227,6 +227,8 @@ static int
|
||||
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
||||
const u8 mode_req, bool nowait)
|
||||
{
|
||||
const struct can_bittiming *bt = &priv->can.bittiming;
|
||||
unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US;
|
||||
u32 con = 0, con_reqop, osc = 0;
|
||||
u8 mode;
|
||||
int err;
|
||||
@ -246,12 +248,16 @@ __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
||||
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
|
||||
return 0;
|
||||
|
||||
if (bt->bitrate)
|
||||
timeout_us = max_t(unsigned long, timeout_us,
|
||||
MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC /
|
||||
bt->bitrate);
|
||||
|
||||
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
|
||||
!mcp251xfd_reg_invalid(con) &&
|
||||
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
|
||||
con) == mode_req,
|
||||
MCP251XFD_POLL_SLEEP_US,
|
||||
MCP251XFD_POLL_TIMEOUT_US);
|
||||
MCP251XFD_POLL_SLEEP_US, timeout_us);
|
||||
if (err != -ETIMEDOUT && err != -EBADMSG)
|
||||
return err;
|
||||
|
||||
|
@ -387,6 +387,7 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
|
||||
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
|
||||
#define MCP251XFD_POLL_SLEEP_US (10)
|
||||
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
|
||||
#define MCP251XFD_FRAME_LEN_MAX_BITS (736)
|
||||
|
||||
/* Misc */
|
||||
#define MCP251XFD_NAPI_WEIGHT 32
|
||||
|
@ -833,6 +833,7 @@ static int gs_can_open(struct net_device *netdev)
|
||||
.mode = cpu_to_le32(GS_CAN_MODE_START),
|
||||
};
|
||||
struct gs_host_frame *hf;
|
||||
struct urb *urb = NULL;
|
||||
u32 ctrlmode;
|
||||
u32 flags = 0;
|
||||
int rc, i;
|
||||
@ -858,13 +859,14 @@ static int gs_can_open(struct net_device *netdev)
|
||||
|
||||
if (!parent->active_channels) {
|
||||
for (i = 0; i < GS_MAX_RX_URBS; i++) {
|
||||
struct urb *urb;
|
||||
u8 *buf;
|
||||
|
||||
/* alloc rx urb */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!urb)
|
||||
return -ENOMEM;
|
||||
if (!urb) {
|
||||
rc = -ENOMEM;
|
||||
goto out_usb_kill_anchored_urbs;
|
||||
}
|
||||
|
||||
/* alloc rx buffer */
|
||||
buf = kmalloc(dev->parent->hf_size_rx,
|
||||
@ -872,8 +874,8 @@ static int gs_can_open(struct net_device *netdev)
|
||||
if (!buf) {
|
||||
netdev_err(netdev,
|
||||
"No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
return -ENOMEM;
|
||||
rc = -ENOMEM;
|
||||
goto out_usb_free_urb;
|
||||
}
|
||||
|
||||
/* fill, anchor, and submit rx urb */
|
||||
@ -896,9 +898,7 @@ static int gs_can_open(struct net_device *netdev)
|
||||
netdev_err(netdev,
|
||||
"usb_submit failed (err=%d)\n", rc);
|
||||
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
goto out_usb_unanchor_urb;
|
||||
}
|
||||
|
||||
/* Drop reference,
|
||||
@ -944,7 +944,8 @@ static int gs_can_open(struct net_device *netdev)
|
||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||
gs_usb_timestamp_stop(dev);
|
||||
dev->can.state = CAN_STATE_STOPPED;
|
||||
return rc;
|
||||
|
||||
goto out_usb_kill_anchored_urbs;
|
||||
}
|
||||
|
||||
parent->active_channels++;
|
||||
@ -952,6 +953,18 @@ static int gs_can_open(struct net_device *netdev)
|
||||
netif_start_queue(netdev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_usb_unanchor_urb:
|
||||
usb_unanchor_urb(urb);
|
||||
out_usb_free_urb:
|
||||
usb_free_urb(urb);
|
||||
out_usb_kill_anchored_urbs:
|
||||
if (!parent->active_channels)
|
||||
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||
|
||||
close_candev(netdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int gs_can_close(struct net_device *netdev)
|
||||
|
@ -21,8 +21,6 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
|
||||
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
|
||||
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
|
||||
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
|
||||
int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
struct alu_struct *alu);
|
||||
void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
struct alu_struct *alu);
|
||||
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
|
||||
|
@ -406,8 +406,8 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
struct alu_struct *alu)
|
||||
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
struct alu_struct *alu, bool *valid)
|
||||
{
|
||||
u32 data_hi, data_lo;
|
||||
const u8 *shifts;
|
||||
@ -420,28 +420,38 @@ int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
|
||||
data_hi = data >> 32;
|
||||
data_lo = (u32)data;
|
||||
if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
|
||||
masks[STATIC_MAC_TABLE_OVERRIDE])) {
|
||||
alu->mac[5] = (u8)data_lo;
|
||||
alu->mac[4] = (u8)(data_lo >> 8);
|
||||
alu->mac[3] = (u8)(data_lo >> 16);
|
||||
alu->mac[2] = (u8)(data_lo >> 24);
|
||||
alu->mac[1] = (u8)data_hi;
|
||||
alu->mac[0] = (u8)(data_hi >> 8);
|
||||
alu->port_forward =
|
||||
(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
|
||||
shifts[STATIC_MAC_FWD_PORTS];
|
||||
alu->is_override =
|
||||
(data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
|
||||
data_hi >>= 1;
|
||||
alu->is_static = true;
|
||||
alu->is_use_fid =
|
||||
(data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
|
||||
alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
|
||||
shifts[STATIC_MAC_FID];
|
||||
|
||||
if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
|
||||
masks[STATIC_MAC_TABLE_OVERRIDE]))) {
|
||||
*valid = false;
|
||||
return 0;
|
||||
}
|
||||
return -ENXIO;
|
||||
|
||||
alu->mac[5] = (u8)data_lo;
|
||||
alu->mac[4] = (u8)(data_lo >> 8);
|
||||
alu->mac[3] = (u8)(data_lo >> 16);
|
||||
alu->mac[2] = (u8)(data_lo >> 24);
|
||||
alu->mac[1] = (u8)data_hi;
|
||||
alu->mac[0] = (u8)(data_hi >> 8);
|
||||
alu->port_forward =
|
||||
(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
|
||||
shifts[STATIC_MAC_FWD_PORTS];
|
||||
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
|
||||
|
||||
/* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
|
||||
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
|
||||
* static MAC table compared to doing write.
|
||||
*/
|
||||
if (ksz_is_ksz87xx(dev))
|
||||
data_hi >>= 1;
|
||||
alu->is_static = true;
|
||||
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
|
||||
alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
|
||||
shifts[STATIC_MAC_FID];
|
||||
|
||||
*valid = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
|
||||
@ -926,24 +936,29 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ksz8_mdb_add(struct ksz_device *dev, int port,
|
||||
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
||||
static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct alu_struct alu;
|
||||
int index;
|
||||
int index, ret;
|
||||
int empty = 0;
|
||||
|
||||
alu.port_forward = 0;
|
||||
for (index = 0; index < dev->info->num_statics; index++) {
|
||||
if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
|
||||
/* Found one already in static MAC table. */
|
||||
if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
|
||||
alu.fid == mdb->vid)
|
||||
break;
|
||||
/* Remember the first empty entry. */
|
||||
} else if (!empty) {
|
||||
empty = index + 1;
|
||||
bool valid;
|
||||
|
||||
ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!valid) {
|
||||
/* Remember the first empty entry. */
|
||||
if (!empty)
|
||||
empty = index + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
|
||||
break;
|
||||
}
|
||||
|
||||
/* no available entry */
|
||||
@ -954,34 +969,38 @@ int ksz8_mdb_add(struct ksz_device *dev, int port,
|
||||
if (index == dev->info->num_statics) {
|
||||
index = empty - 1;
|
||||
memset(&alu, 0, sizeof(alu));
|
||||
memcpy(alu.mac, mdb->addr, ETH_ALEN);
|
||||
memcpy(alu.mac, addr, ETH_ALEN);
|
||||
alu.is_static = true;
|
||||
}
|
||||
alu.port_forward |= BIT(port);
|
||||
if (mdb->vid) {
|
||||
if (vid) {
|
||||
alu.is_use_fid = true;
|
||||
|
||||
/* Need a way to map VID to FID. */
|
||||
alu.fid = mdb->vid;
|
||||
alu.fid = vid;
|
||||
}
|
||||
ksz8_w_sta_mac_table(dev, index, &alu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ksz8_mdb_del(struct ksz_device *dev, int port,
|
||||
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
||||
static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct alu_struct alu;
|
||||
int index;
|
||||
int index, ret;
|
||||
|
||||
for (index = 0; index < dev->info->num_statics; index++) {
|
||||
if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
|
||||
/* Found one already in static MAC table. */
|
||||
if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
|
||||
alu.fid == mdb->vid)
|
||||
break;
|
||||
}
|
||||
bool valid;
|
||||
|
||||
ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!valid)
|
||||
continue;
|
||||
|
||||
if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
|
||||
break;
|
||||
}
|
||||
|
||||
/* no available entry */
|
||||
@ -998,6 +1017,18 @@ int ksz8_mdb_del(struct ksz_device *dev, int port,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ksz8_mdb_add(struct ksz_device *dev, int port,
|
||||
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
||||
{
|
||||
return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
|
||||
}
|
||||
|
||||
int ksz8_mdb_del(struct ksz_device *dev, int port,
|
||||
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
|
||||
{
|
||||
return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
|
||||
}
|
||||
|
||||
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -286,13 +286,13 @@ static const u32 ksz8795_masks[] = {
|
||||
[STATIC_MAC_TABLE_VALID] = BIT(21),
|
||||
[STATIC_MAC_TABLE_USE_FID] = BIT(23),
|
||||
[STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
|
||||
[STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
|
||||
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
|
||||
[STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
|
||||
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
|
||||
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
|
||||
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
|
||||
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
|
||||
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
|
||||
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
|
||||
[DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
|
||||
[DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
|
||||
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
|
||||
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
|
||||
[P_MII_TX_FLOW_CTRL] = BIT(5),
|
||||
|
@ -512,6 +512,13 @@ static inline void ksz_regmap_unlock(void *__mtx)
|
||||
mutex_unlock(mtx);
|
||||
}
|
||||
|
||||
static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
|
||||
{
|
||||
return dev->chip_id == KSZ8795_CHIP_ID ||
|
||||
dev->chip_id == KSZ8794_CHIP_ID ||
|
||||
dev->chip_id == KSZ8765_CHIP_ID;
|
||||
}
|
||||
|
||||
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
|
||||
{
|
||||
return dev->chip_id == KSZ8830_CHIP_ID;
|
||||
|
@ -109,6 +109,13 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
err = mv88e6xxx_read(chip, addr, reg, &data);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if ((data & mask) == val)
|
||||
return 0;
|
||||
|
||||
dev_err(chip->dev, "Timeout while waiting for switch\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -435,19 +435,36 @@ static void hns3_dbg_fill_content(char *content, u16 len,
|
||||
const struct hns3_dbg_item *items,
|
||||
const char **result, u16 size)
|
||||
{
|
||||
#define HNS3_DBG_LINE_END_LEN 2
|
||||
char *pos = content;
|
||||
u16 item_len;
|
||||
u16 i;
|
||||
|
||||
memset(content, ' ', len);
|
||||
for (i = 0; i < size; i++) {
|
||||
if (result)
|
||||
strncpy(pos, result[i], strlen(result[i]));
|
||||
else
|
||||
strncpy(pos, items[i].name, strlen(items[i].name));
|
||||
|
||||
pos += strlen(items[i].name) + items[i].interval;
|
||||
if (!len) {
|
||||
return;
|
||||
} else if (len <= HNS3_DBG_LINE_END_LEN) {
|
||||
*pos++ = '\0';
|
||||
return;
|
||||
}
|
||||
|
||||
memset(content, ' ', len);
|
||||
len -= HNS3_DBG_LINE_END_LEN;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
item_len = strlen(items[i].name) + items[i].interval;
|
||||
if (len < item_len)
|
||||
break;
|
||||
|
||||
if (result) {
|
||||
if (item_len < strlen(result[i]))
|
||||
break;
|
||||
strscpy(pos, result[i], strlen(result[i]));
|
||||
} else {
|
||||
strscpy(pos, items[i].name, strlen(items[i].name));
|
||||
}
|
||||
pos += item_len;
|
||||
len -= item_len;
|
||||
}
|
||||
*pos++ = '\n';
|
||||
*pos++ = '\0';
|
||||
}
|
||||
|
@ -87,16 +87,35 @@ static void hclge_dbg_fill_content(char *content, u16 len,
|
||||
const struct hclge_dbg_item *items,
|
||||
const char **result, u16 size)
|
||||
{
|
||||
#define HCLGE_DBG_LINE_END_LEN 2
|
||||
char *pos = content;
|
||||
u16 item_len;
|
||||
u16 i;
|
||||
|
||||
if (!len) {
|
||||
return;
|
||||
} else if (len <= HCLGE_DBG_LINE_END_LEN) {
|
||||
*pos++ = '\0';
|
||||
return;
|
||||
}
|
||||
|
||||
memset(content, ' ', len);
|
||||
len -= HCLGE_DBG_LINE_END_LEN;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (result)
|
||||
strncpy(pos, result[i], strlen(result[i]));
|
||||
else
|
||||
strncpy(pos, items[i].name, strlen(items[i].name));
|
||||
pos += strlen(items[i].name) + items[i].interval;
|
||||
item_len = strlen(items[i].name) + items[i].interval;
|
||||
if (len < item_len)
|
||||
break;
|
||||
|
||||
if (result) {
|
||||
if (item_len < strlen(result[i]))
|
||||
break;
|
||||
strscpy(pos, result[i], strlen(result[i]));
|
||||
} else {
|
||||
strscpy(pos, items[i].name, strlen(items[i].name));
|
||||
}
|
||||
pos += item_len;
|
||||
len -= item_len;
|
||||
}
|
||||
*pos++ = '\n';
|
||||
*pos++ = '\0';
|
||||
|
@ -255,8 +255,10 @@ struct iavf_adapter {
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct reset_task;
|
||||
struct work_struct adminq_task;
|
||||
struct work_struct finish_config;
|
||||
struct delayed_work client_task;
|
||||
wait_queue_head_t down_waitqueue;
|
||||
wait_queue_head_t reset_waitqueue;
|
||||
wait_queue_head_t vc_waitqueue;
|
||||
struct iavf_q_vector *q_vectors;
|
||||
struct list_head vlan_filter_list;
|
||||
@ -518,14 +520,12 @@ int iavf_up(struct iavf_adapter *adapter);
|
||||
void iavf_down(struct iavf_adapter *adapter);
|
||||
int iavf_process_config(struct iavf_adapter *adapter);
|
||||
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
|
||||
void iavf_schedule_reset(struct iavf_adapter *adapter);
|
||||
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
|
||||
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
|
||||
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
|
||||
void iavf_reset(struct iavf_adapter *adapter);
|
||||
void iavf_set_ethtool_ops(struct net_device *netdev);
|
||||
void iavf_update_stats(struct iavf_adapter *adapter);
|
||||
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
|
||||
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter);
|
||||
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
|
||||
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
|
||||
|
||||
@ -579,17 +579,11 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
|
||||
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
|
||||
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
|
||||
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
|
||||
int iavf_replace_primary_mac(struct iavf_adapter *adapter,
|
||||
const u8 *new_mac);
|
||||
void
|
||||
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
|
||||
netdev_features_t prev_features,
|
||||
netdev_features_t features);
|
||||
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
|
||||
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
|
||||
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
|
||||
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
|
||||
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
const u8 *macaddr);
|
||||
int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
|
||||
int iavf_wait_for_reset(struct iavf_adapter *adapter);
|
||||
#endif /* _IAVF_H_ */
|
||||
|
@ -484,6 +484,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
u32 orig_flags, new_flags, changed_flags;
|
||||
int ret = 0;
|
||||
u32 i;
|
||||
|
||||
orig_flags = READ_ONCE(adapter->flags);
|
||||
@ -531,12 +532,14 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
/* issue a reset to force legacy-rx change to take effect */
|
||||
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
|
||||
if (netif_running(netdev)) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
ret = iavf_wait_for_reset(adapter);
|
||||
if (ret)
|
||||
netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -627,6 +630,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
u32 new_rx_count, new_tx_count;
|
||||
int ret = 0;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
return -EINVAL;
|
||||
@ -671,11 +675,13 @@ static int iavf_set_ringparam(struct net_device *netdev,
|
||||
}
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
ret = iavf_wait_for_reset(adapter);
|
||||
if (ret)
|
||||
netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1830,7 +1836,7 @@ static int iavf_set_channels(struct net_device *netdev,
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
u32 num_req = ch->combined_count;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
|
||||
adapter->num_tc) {
|
||||
@ -1852,22 +1858,13 @@ static int iavf_set_channels(struct net_device *netdev,
|
||||
|
||||
adapter->num_req_queues = num_req;
|
||||
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
iavf_schedule_reset(adapter);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
|
||||
/* wait for the reset is done */
|
||||
for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
|
||||
msleep(IAVF_RESET_WAIT_MS);
|
||||
if (adapter->flags & IAVF_FLAG_RESET_PENDING)
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
adapter->num_active_queues = num_req;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
ret = iavf_wait_for_reset(adapter);
|
||||
if (ret)
|
||||
netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,6 +166,45 @@ static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
|
||||
return netdev_priv(pci_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_is_reset_in_progress - Check if a reset is in progress
|
||||
* @adapter: board private structure
|
||||
*/
|
||||
static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
|
||||
{
|
||||
if (adapter->state == __IAVF_RESETTING ||
|
||||
adapter->flags & (IAVF_FLAG_RESET_PENDING |
|
||||
IAVF_FLAG_RESET_NEEDED))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_wait_for_reset - Wait for reset to finish.
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Returns 0 if reset finished successfully, negative on timeout or interrupt.
|
||||
*/
|
||||
int iavf_wait_for_reset(struct iavf_adapter *adapter)
|
||||
{
|
||||
int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
|
||||
!iavf_is_reset_in_progress(adapter),
|
||||
msecs_to_jiffies(5000));
|
||||
|
||||
/* If ret < 0 then it means wait was interrupted.
|
||||
* If ret == 0 then it means we got a timeout while waiting
|
||||
* for reset to finish.
|
||||
* If ret > 0 it means reset has finished.
|
||||
*/
|
||||
if (ret > 0)
|
||||
return 0;
|
||||
else if (ret < 0)
|
||||
return -EINTR;
|
||||
else
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
|
||||
* @hw: pointer to the HW structure
|
||||
@ -253,7 +292,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
|
||||
*
|
||||
* Returns 0 on success, negative on failure
|
||||
**/
|
||||
int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
|
||||
static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
|
||||
{
|
||||
unsigned int wait, delay = 10;
|
||||
|
||||
@ -270,12 +309,14 @@ int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
|
||||
/**
|
||||
* iavf_schedule_reset - Set the flags and schedule a reset event
|
||||
* @adapter: board private structure
|
||||
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
|
||||
**/
|
||||
void iavf_schedule_reset(struct iavf_adapter *adapter)
|
||||
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
|
||||
{
|
||||
if (!(adapter->flags &
|
||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
||||
if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
|
||||
!(adapter->flags &
|
||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
|
||||
adapter->flags |= flags;
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
}
|
||||
}
|
||||
@ -303,7 +344,7 @@ static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
adapter->tx_timeout_count++;
|
||||
iavf_schedule_reset(adapter);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -362,7 +403,7 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
|
||||
* iavf_irq_enable_queues - Enable interrupt for all queues
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
void iavf_irq_enable_queues(struct iavf_adapter *adapter)
|
||||
static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int i;
|
||||
@ -1003,8 +1044,8 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
*
|
||||
* Do not call this with mac_vlan_list_lock!
|
||||
**/
|
||||
int iavf_replace_primary_mac(struct iavf_adapter *adapter,
|
||||
const u8 *new_mac)
|
||||
static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
|
||||
const u8 *new_mac)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
struct iavf_mac_filter *f;
|
||||
@ -1663,10 +1704,10 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
|
||||
adapter->msix_entries[vector].entry = vector;
|
||||
|
||||
err = iavf_acquire_msix_vectors(adapter, v_budget);
|
||||
if (!err)
|
||||
iavf_schedule_finish_config(adapter);
|
||||
|
||||
out:
|
||||
netif_set_real_num_rx_queues(adapter->netdev, pairs);
|
||||
netif_set_real_num_tx_queues(adapter->netdev, pairs);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1840,19 +1881,16 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
|
||||
static void iavf_free_q_vectors(struct iavf_adapter *adapter)
|
||||
{
|
||||
int q_idx, num_q_vectors;
|
||||
int napi_vectors;
|
||||
|
||||
if (!adapter->q_vectors)
|
||||
return;
|
||||
|
||||
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
||||
napi_vectors = adapter->num_active_queues;
|
||||
|
||||
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
|
||||
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
|
||||
|
||||
if (q_idx < napi_vectors)
|
||||
netif_napi_del(&q_vector->napi);
|
||||
netif_napi_del(&q_vector->napi);
|
||||
}
|
||||
kfree(adapter->q_vectors);
|
||||
adapter->q_vectors = NULL;
|
||||
@ -1863,7 +1901,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
|
||||
* @adapter: board private structure
|
||||
*
|
||||
**/
|
||||
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
|
||||
static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
|
||||
{
|
||||
if (!adapter->msix_entries)
|
||||
return;
|
||||
@ -1878,7 +1916,7 @@ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
|
||||
* @adapter: board private structure to initialize
|
||||
*
|
||||
**/
|
||||
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
|
||||
static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -1889,9 +1927,7 @@ int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
|
||||
goto err_alloc_queues;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
err = iavf_set_interrupt_capability(adapter);
|
||||
rtnl_unlock();
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Unable to setup interrupt capabilities\n");
|
||||
@ -1944,15 +1980,16 @@ static void iavf_free_rss(struct iavf_adapter *adapter)
|
||||
/**
|
||||
* iavf_reinit_interrupt_scheme - Reallocate queues and vectors
|
||||
* @adapter: board private structure
|
||||
* @running: true if adapter->state == __IAVF_RUNNING
|
||||
*
|
||||
* Returns 0 on success, negative on failure
|
||||
**/
|
||||
static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
||||
static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int err;
|
||||
|
||||
if (netif_running(netdev))
|
||||
if (running)
|
||||
iavf_free_traffic_irqs(adapter);
|
||||
iavf_free_misc_irq(adapter);
|
||||
iavf_reset_interrupt_capability(adapter);
|
||||
@ -1976,6 +2013,78 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_finish_config - do all netdev work that needs RTNL
|
||||
* @work: our work_struct
|
||||
*
|
||||
* Do work that needs both RTNL and crit_lock.
|
||||
**/
|
||||
static void iavf_finish_config(struct work_struct *work)
|
||||
{
|
||||
struct iavf_adapter *adapter;
|
||||
int pairs, err;
|
||||
|
||||
adapter = container_of(work, struct iavf_adapter, finish_config);
|
||||
|
||||
/* Always take RTNL first to prevent circular lock dependency */
|
||||
rtnl_lock();
|
||||
mutex_lock(&adapter->crit_lock);
|
||||
|
||||
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
|
||||
adapter->netdev_registered &&
|
||||
!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
|
||||
netdev_update_features(adapter->netdev);
|
||||
adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
||||
}
|
||||
|
||||
switch (adapter->state) {
|
||||
case __IAVF_DOWN:
|
||||
if (!adapter->netdev_registered) {
|
||||
err = register_netdevice(adapter->netdev);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
|
||||
err);
|
||||
|
||||
/* go back and try again.*/
|
||||
iavf_free_rss(adapter);
|
||||
iavf_free_misc_irq(adapter);
|
||||
iavf_reset_interrupt_capability(adapter);
|
||||
iavf_change_state(adapter,
|
||||
__IAVF_INIT_CONFIG_ADAPTER);
|
||||
goto out;
|
||||
}
|
||||
adapter->netdev_registered = true;
|
||||
}
|
||||
|
||||
/* Set the real number of queues when reset occurs while
|
||||
* state == __IAVF_DOWN
|
||||
*/
|
||||
fallthrough;
|
||||
case __IAVF_RUNNING:
|
||||
pairs = adapter->num_active_queues;
|
||||
netif_set_real_num_rx_queues(adapter->netdev, pairs);
|
||||
netif_set_real_num_tx_queues(adapter->netdev, pairs);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_schedule_finish_config - Set the flags and schedule a reset event
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
void iavf_schedule_finish_config(struct iavf_adapter *adapter)
|
||||
{
|
||||
if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
|
||||
queue_work(adapter->wq, &adapter->finish_config);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_process_aq_command - process aq_required flags
|
||||
* and sends aq command
|
||||
@ -2176,7 +2285,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
|
||||
* the watchdog if any changes are requested to expedite the request via
|
||||
* virtchnl.
|
||||
**/
|
||||
void
|
||||
static void
|
||||
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
|
||||
netdev_features_t prev_features,
|
||||
netdev_features_t features)
|
||||
@ -2383,7 +2492,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
|
||||
adapter->vsi_res->num_queue_pairs);
|
||||
adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
|
||||
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
|
||||
iavf_schedule_reset(adapter);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -2613,22 +2722,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
adapter->link_up = false;
|
||||
|
||||
/* set the semaphore to prevent any callbacks after device registration
|
||||
* up to time when state of driver will be set to __IAVF_DOWN
|
||||
*/
|
||||
rtnl_lock();
|
||||
if (!adapter->netdev_registered) {
|
||||
err = register_netdevice(netdev);
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
goto err_register;
|
||||
}
|
||||
}
|
||||
|
||||
adapter->netdev_registered = true;
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
if (CLIENT_ALLOWED(adapter)) {
|
||||
err = iavf_lan_add_device(adapter);
|
||||
if (err)
|
||||
@ -2641,7 +2736,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
||||
|
||||
iavf_change_state(adapter, __IAVF_DOWN);
|
||||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||
rtnl_unlock();
|
||||
|
||||
iavf_misc_irq_enable(adapter);
|
||||
wake_up(&adapter->down_waitqueue);
|
||||
@ -2661,10 +2755,11 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
||||
/* request initial VLAN offload settings */
|
||||
iavf_set_vlan_offload_features(adapter, 0, netdev->features);
|
||||
|
||||
iavf_schedule_finish_config(adapter);
|
||||
return;
|
||||
|
||||
err_mem:
|
||||
iavf_free_rss(adapter);
|
||||
err_register:
|
||||
iavf_free_misc_irq(adapter);
|
||||
err_sw_init:
|
||||
iavf_reset_interrupt_capability(adapter);
|
||||
@ -2694,14 +2789,6 @@ static void iavf_watchdog_task(struct work_struct *work)
|
||||
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
||||
iavf_change_state(adapter, __IAVF_COMM_FAILED);
|
||||
|
||||
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
|
||||
adapter->aq_required = 0;
|
||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (adapter->state) {
|
||||
case __IAVF_STARTUP:
|
||||
iavf_startup(adapter);
|
||||
@ -2829,11 +2916,10 @@ static void iavf_watchdog_task(struct work_struct *work)
|
||||
/* check for hw reset */
|
||||
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
|
||||
if (!reg_val) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_PENDING;
|
||||
adapter->aq_required = 0;
|
||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
queue_delayed_work(adapter->wq,
|
||||
&adapter->watchdog_task, HZ * 2);
|
||||
@ -3059,7 +3145,7 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
|
||||
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
|
||||
(adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
|
||||
err = iavf_reinit_interrupt_scheme(adapter);
|
||||
err = iavf_reinit_interrupt_scheme(adapter, running);
|
||||
if (err)
|
||||
goto reset_err;
|
||||
}
|
||||
@ -3154,6 +3240,7 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
|
||||
wake_up(&adapter->reset_waitqueue);
|
||||
mutex_unlock(&adapter->client_lock);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
|
||||
@ -3230,27 +3317,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||
} while (pending);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
|
||||
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
|
||||
if (adapter->netdev_registered ||
|
||||
!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
rtnl_lock();
|
||||
netdev_update_features(netdev);
|
||||
rtnl_unlock();
|
||||
/* Request VLAN offload settings */
|
||||
if (VLAN_V2_ALLOWED(adapter))
|
||||
iavf_set_vlan_offload_features
|
||||
(adapter, 0, netdev->features);
|
||||
|
||||
iavf_set_queue_vlan_tag_loc(adapter);
|
||||
}
|
||||
|
||||
adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
||||
}
|
||||
if ((adapter->flags &
|
||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
|
||||
adapter->state == __IAVF_RESETTING)
|
||||
if (iavf_is_reset_in_progress(adapter))
|
||||
goto freedom;
|
||||
|
||||
/* check for error indications */
|
||||
@ -4336,6 +4403,7 @@ static int iavf_close(struct net_device *netdev)
|
||||
static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
int ret = 0;
|
||||
|
||||
netdev_dbg(netdev, "changing MTU from %d to %d\n",
|
||||
netdev->mtu, new_mtu);
|
||||
@ -4346,11 +4414,15 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
}
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||
ret = iavf_wait_for_reset(adapter);
|
||||
if (ret < 0)
|
||||
netdev_warn(netdev, "MTU change interrupted waiting for reset");
|
||||
else if (ret)
|
||||
netdev_warn(netdev, "MTU change timed out waiting for reset");
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
|
||||
@ -4945,6 +5017,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
INIT_WORK(&adapter->reset_task, iavf_reset_task);
|
||||
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
|
||||
INIT_WORK(&adapter->finish_config, iavf_finish_config);
|
||||
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
|
||||
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
|
||||
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
|
||||
@ -4953,6 +5026,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
/* Setup the wait queue for indicating transition to down status */
|
||||
init_waitqueue_head(&adapter->down_waitqueue);
|
||||
|
||||
/* Setup the wait queue for indicating transition to running state */
|
||||
init_waitqueue_head(&adapter->reset_waitqueue);
|
||||
|
||||
/* Setup the wait queue for indicating virtchannel events */
|
||||
init_waitqueue_head(&adapter->vc_waitqueue);
|
||||
|
||||
@ -5085,13 +5161,15 @@ static void iavf_remove(struct pci_dev *pdev)
|
||||
usleep_range(500, 1000);
|
||||
}
|
||||
cancel_delayed_work_sync(&adapter->watchdog_task);
|
||||
cancel_work_sync(&adapter->finish_config);
|
||||
|
||||
rtnl_lock();
|
||||
if (adapter->netdev_registered) {
|
||||
rtnl_lock();
|
||||
unregister_netdevice(netdev);
|
||||
adapter->netdev_registered = false;
|
||||
rtnl_unlock();
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
if (CLIENT_ALLOWED(adapter)) {
|
||||
err = iavf_lan_del_device(adapter);
|
||||
if (err)
|
||||
|
@ -54,7 +54,7 @@ static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
|
||||
* iavf_clean_tx_ring - Free any empty Tx buffers
|
||||
* @tx_ring: ring to be cleaned
|
||||
**/
|
||||
void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
|
||||
static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
|
||||
{
|
||||
unsigned long bi_size;
|
||||
u16 i;
|
||||
@ -110,7 +110,7 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring)
|
||||
* Since there is no access to the ring head register
|
||||
* in XL710, we need to use our local copies
|
||||
**/
|
||||
u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
|
||||
static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
@ -127,6 +127,24 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_force_wb - Issue SW Interrupt so HW does a wb
|
||||
* @vsi: the VSI we care about
|
||||
* @q_vector: the vector on which to force writeback
|
||||
**/
|
||||
static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
|
||||
{
|
||||
u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
|
||||
IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
|
||||
/* allow 00 to be written to the index */;
|
||||
|
||||
wr32(&vsi->back->hw,
|
||||
IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
|
||||
val);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_detect_recover_hung - Function to detect and recover hung_queues
|
||||
* @vsi: pointer to vsi struct with tx queues
|
||||
@ -352,25 +370,6 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
|
||||
q_vector->arm_wb_state = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_force_wb - Issue SW Interrupt so HW does a wb
|
||||
* @vsi: the VSI we care about
|
||||
* @q_vector: the vector on which to force writeback
|
||||
*
|
||||
**/
|
||||
void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
|
||||
{
|
||||
u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
|
||||
IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
|
||||
/* allow 00 to be written to the index */;
|
||||
|
||||
wr32(&vsi->back->hw,
|
||||
IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
|
||||
val);
|
||||
}
|
||||
|
||||
static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
|
||||
struct iavf_ring_container *rc)
|
||||
{
|
||||
@ -687,7 +686,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
|
||||
* iavf_clean_rx_ring - Free Rx buffers
|
||||
* @rx_ring: ring to be cleaned
|
||||
**/
|
||||
void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
|
||||
static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
|
||||
{
|
||||
unsigned long bi_size;
|
||||
u16 i;
|
||||
|
@ -442,15 +442,11 @@ static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
|
||||
|
||||
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
|
||||
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
|
||||
void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
|
||||
void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
|
||||
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
|
||||
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
|
||||
void iavf_free_tx_resources(struct iavf_ring *tx_ring);
|
||||
void iavf_free_rx_resources(struct iavf_ring *rx_ring);
|
||||
int iavf_napi_poll(struct napi_struct *napi, int budget);
|
||||
void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
|
||||
u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
|
||||
void iavf_detect_recover_hung(struct iavf_vsi *vsi);
|
||||
int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
|
||||
bool __iavf_chk_linearize(struct sk_buff *skb);
|
||||
|
@ -1961,9 +1961,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
case VIRTCHNL_EVENT_RESET_IMPENDING:
|
||||
dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
|
||||
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_PENDING;
|
||||
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
|
||||
queue_work(adapter->wq, &adapter->reset_task);
|
||||
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -2237,6 +2236,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
|
||||
iavf_process_config(adapter);
|
||||
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
||||
iavf_schedule_finish_config(adapter);
|
||||
|
||||
iavf_set_queue_vlan_tag_loc(adapter);
|
||||
|
||||
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
|
||||
adapter->hw.mac.addr);
|
||||
|
||||
@ -2282,6 +2285,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
case VIRTCHNL_OP_ENABLE_QUEUES:
|
||||
/* enable transmits */
|
||||
iavf_irq_enable(adapter, true);
|
||||
wake_up(&adapter->reset_waitqueue);
|
||||
adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
|
||||
break;
|
||||
case VIRTCHNL_OP_DISABLE_QUEUES:
|
||||
|
@ -9585,6 +9585,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (state == pci_channel_io_normal) {
|
||||
dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
|
@ -2402,6 +2402,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
|
||||
nq = txring_txq(ring);
|
||||
|
||||
__netif_tx_lock(nq, cpu);
|
||||
/* Avoid transmit queue timeout since we share it with the slow path */
|
||||
txq_trans_cond_update(nq);
|
||||
res = igc_xdp_init_tx_descriptor(ring, xdpf);
|
||||
__netif_tx_unlock(nq);
|
||||
return res;
|
||||
@ -2795,15 +2797,18 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
|
||||
struct netdev_queue *nq = txring_txq(ring);
|
||||
union igc_adv_tx_desc *tx_desc = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
u16 ntu = ring->next_to_use;
|
||||
struct xdp_desc xdp_desc;
|
||||
u16 budget;
|
||||
u16 budget, ntu;
|
||||
|
||||
if (!netif_carrier_ok(ring->netdev))
|
||||
return;
|
||||
|
||||
__netif_tx_lock(nq, cpu);
|
||||
|
||||
/* Avoid transmit queue timeout since we share it with the slow path */
|
||||
txq_trans_cond_update(nq);
|
||||
|
||||
ntu = ring->next_to_use;
|
||||
budget = igc_desc_unused(ring);
|
||||
|
||||
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
|
||||
@ -6297,6 +6302,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
|
||||
|
||||
__netif_tx_lock(nq, cpu);
|
||||
|
||||
/* Avoid transmit queue timeout since we share it with the slow path */
|
||||
txq_trans_cond_update(nq);
|
||||
|
||||
drops = 0;
|
||||
for (i = 0; i < num_frames; i++) {
|
||||
int err;
|
||||
|
@ -78,8 +78,7 @@ static int liteeth_rx(struct net_device *netdev)
|
||||
memcpy_fromio(data, priv->rx_base + rx_slot * priv->slot_size, len);
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += len;
|
||||
dev_sw_netstats_rx_add(netdev, len);
|
||||
|
||||
return netif_rx(skb);
|
||||
|
||||
@ -185,8 +184,7 @@ static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
|
||||
litex_write16(priv->base + LITEETH_READER_LENGTH, skb->len);
|
||||
litex_write8(priv->base + LITEETH_READER_START, 1);
|
||||
|
||||
netdev->stats.tx_bytes += skb->len;
|
||||
netdev->stats.tx_packets++;
|
||||
dev_sw_netstats_tx_add(netdev, 1, skb->len);
|
||||
|
||||
priv->tx_slot = (priv->tx_slot + 1) % priv->num_tx_slots;
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -194,9 +192,17 @@ static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void
|
||||
liteeth_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
netdev_stats_to_stats64(stats, &netdev->stats);
|
||||
dev_fetch_sw_netstats(stats, netdev->tstats);
|
||||
}
|
||||
|
||||
static const struct net_device_ops liteeth_netdev_ops = {
|
||||
.ndo_open = liteeth_open,
|
||||
.ndo_stop = liteeth_stop,
|
||||
.ndo_get_stats64 = liteeth_get_stats64,
|
||||
.ndo_start_xmit = liteeth_start_xmit,
|
||||
};
|
||||
|
||||
@ -242,6 +248,11 @@ static int liteeth_probe(struct platform_device *pdev)
|
||||
priv->netdev = netdev;
|
||||
priv->dev = &pdev->dev;
|
||||
|
||||
netdev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
|
||||
struct pcpu_sw_netstats);
|
||||
if (!netdev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
@ -1452,8 +1452,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
|
||||
if (err)
|
||||
goto err_free_npa_lf;
|
||||
|
||||
/* Enable backpressure */
|
||||
otx2_nix_config_bp(pf, true);
|
||||
/* Enable backpressure for CGX mapped PF/VFs */
|
||||
if (!is_otx2_lbkvf(pf->pdev))
|
||||
otx2_nix_config_bp(pf, true);
|
||||
|
||||
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
|
||||
err = otx2_rq_aura_pool_init(pf);
|
||||
|
@ -3425,23 +3425,6 @@ static int mtk_hw_deinit(struct mtk_eth *eth)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init mtk_init(struct net_device *dev)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
struct mtk_eth *eth = mac->hw;
|
||||
int ret;
|
||||
|
||||
ret = of_get_ethdev_address(mac->of_node, dev);
|
||||
if (ret) {
|
||||
/* If the mac address is invalid, use random mac address */
|
||||
eth_hw_addr_random(dev);
|
||||
dev_err(eth->dev, "generated random MAC address %pM\n",
|
||||
dev->dev_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_uninit(struct net_device *dev)
|
||||
{
|
||||
struct mtk_mac *mac = netdev_priv(dev);
|
||||
@ -3789,7 +3772,6 @@ static const struct ethtool_ops mtk_ethtool_ops = {
|
||||
};
|
||||
|
||||
static const struct net_device_ops mtk_netdev_ops = {
|
||||
.ndo_init = mtk_init,
|
||||
.ndo_uninit = mtk_uninit,
|
||||
.ndo_open = mtk_open,
|
||||
.ndo_stop = mtk_stop,
|
||||
@ -3845,6 +3827,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
||||
mac->hw = eth;
|
||||
mac->of_node = np;
|
||||
|
||||
err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
|
||||
if (err == -EPROBE_DEFER)
|
||||
return err;
|
||||
|
||||
if (err) {
|
||||
/* If the mac address is invalid, use random mac address */
|
||||
eth_hw_addr_random(eth->netdev[id]);
|
||||
dev_err(eth->dev, "generated random MAC address %pM\n",
|
||||
eth->netdev[id]->dev_addr);
|
||||
}
|
||||
|
||||
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
|
||||
mac->hwlro_ip_cnt = 0;
|
||||
|
||||
|
@ -106,23 +106,37 @@ struct cpsw_ale_dev_id {
|
||||
|
||||
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
|
||||
{
|
||||
int idx;
|
||||
int idx, idx2;
|
||||
u32 hi_val = 0;
|
||||
|
||||
idx = start / 32;
|
||||
idx2 = (start + bits - 1) / 32;
|
||||
/* Check if bits to be fetched exceed a word */
|
||||
if (idx != idx2) {
|
||||
idx2 = 2 - idx2; /* flip */
|
||||
hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
|
||||
}
|
||||
start -= idx * 32;
|
||||
idx = 2 - idx; /* flip */
|
||||
return (ale_entry[idx] >> start) & BITMASK(bits);
|
||||
return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
|
||||
}
|
||||
|
||||
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
|
||||
u32 value)
|
||||
{
|
||||
int idx;
|
||||
int idx, idx2;
|
||||
|
||||
value &= BITMASK(bits);
|
||||
idx = start / 32;
|
||||
idx = start / 32;
|
||||
idx2 = (start + bits - 1) / 32;
|
||||
/* Check if bits to be set exceed a word */
|
||||
if (idx != idx2) {
|
||||
idx2 = 2 - idx2; /* flip */
|
||||
ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
|
||||
ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
|
||||
}
|
||||
start -= idx * 32;
|
||||
idx = 2 - idx; /* flip */
|
||||
idx = 2 - idx; /* flip */
|
||||
ale_entry[idx] &= ~(BITMASK(bits) << start);
|
||||
ale_entry[idx] |= (value << start);
|
||||
}
|
||||
|
@ -3252,23 +3252,30 @@ static int __init phy_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
|
||||
|
||||
rc = mdio_bus_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
goto err_ethtool_phy_ops;
|
||||
|
||||
ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
|
||||
features_init();
|
||||
|
||||
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
|
||||
if (rc)
|
||||
goto err_c45;
|
||||
goto err_mdio_bus;
|
||||
|
||||
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
|
||||
if (rc) {
|
||||
phy_driver_unregister(&genphy_c45_driver);
|
||||
if (rc)
|
||||
goto err_c45;
|
||||
|
||||
return 0;
|
||||
|
||||
err_c45:
|
||||
mdio_bus_exit();
|
||||
}
|
||||
phy_driver_unregister(&genphy_c45_driver);
|
||||
err_mdio_bus:
|
||||
mdio_bus_exit();
|
||||
err_ethtool_phy_ops:
|
||||
ethtool_set_ethtool_phy_ops(NULL);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -870,7 +870,8 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
|
||||
}
|
||||
|
||||
static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
|
||||
size_t name_len, bool with_variant)
|
||||
size_t name_len, bool with_variant,
|
||||
bool bus_type_mode)
|
||||
{
|
||||
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
|
||||
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
|
||||
@ -881,15 +882,20 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
|
||||
|
||||
switch (ab->id.bdf_search) {
|
||||
case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
|
||||
scnprintf(name, name_len,
|
||||
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
|
||||
ath11k_bus_str(ab->hif.bus),
|
||||
ab->id.vendor, ab->id.device,
|
||||
ab->id.subsystem_vendor,
|
||||
ab->id.subsystem_device,
|
||||
ab->qmi.target.chip_id,
|
||||
ab->qmi.target.board_id,
|
||||
variant);
|
||||
if (bus_type_mode)
|
||||
scnprintf(name, name_len,
|
||||
"bus=%s",
|
||||
ath11k_bus_str(ab->hif.bus));
|
||||
else
|
||||
scnprintf(name, name_len,
|
||||
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
|
||||
ath11k_bus_str(ab->hif.bus),
|
||||
ab->id.vendor, ab->id.device,
|
||||
ab->id.subsystem_vendor,
|
||||
ab->id.subsystem_device,
|
||||
ab->qmi.target.chip_id,
|
||||
ab->qmi.target.board_id,
|
||||
variant);
|
||||
break;
|
||||
default:
|
||||
scnprintf(name, name_len,
|
||||
@ -908,13 +914,19 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
|
||||
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
|
||||
size_t name_len)
|
||||
{
|
||||
return __ath11k_core_create_board_name(ab, name, name_len, true);
|
||||
return __ath11k_core_create_board_name(ab, name, name_len, true, false);
|
||||
}
|
||||
|
||||
static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
|
||||
size_t name_len)
|
||||
{
|
||||
return __ath11k_core_create_board_name(ab, name, name_len, false);
|
||||
return __ath11k_core_create_board_name(ab, name, name_len, false, false);
|
||||
}
|
||||
|
||||
static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name,
|
||||
size_t name_len)
|
||||
{
|
||||
return __ath11k_core_create_board_name(ab, name, name_len, false, true);
|
||||
}
|
||||
|
||||
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
|
||||
@ -1218,7 +1230,7 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
|
||||
|
||||
int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
|
||||
{
|
||||
char boardname[BOARD_NAME_SIZE];
|
||||
char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
|
||||
int ret;
|
||||
|
||||
ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
|
||||
@ -1235,6 +1247,21 @@ int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd
|
||||
if (!ret)
|
||||
goto exit;
|
||||
|
||||
ret = ath11k_core_create_bus_type_board_name(ab, default_boardname,
|
||||
BOARD_NAME_SIZE);
|
||||
if (ret) {
|
||||
ath11k_dbg(ab, ATH11K_DBG_BOOT,
|
||||
"failed to create default board name for regdb: %d", ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname,
|
||||
ATH11K_BD_IE_REGDB,
|
||||
ATH11K_BD_IE_REGDB_NAME,
|
||||
ATH11K_BD_IE_REGDB_DATA);
|
||||
if (!ret)
|
||||
goto exit;
|
||||
|
||||
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
|
||||
if (ret)
|
||||
ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
|
||||
|
@ -8715,7 +8715,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
|
||||
}
|
||||
|
||||
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
|
||||
if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) {
|
||||
if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
|
||||
channels = kmemdup(ath11k_6ghz_channels,
|
||||
sizeof(ath11k_6ghz_channels), GFP_KERNEL);
|
||||
if (!channels) {
|
||||
@ -9279,6 +9279,7 @@ void ath11k_mac_destroy(struct ath11k_base *ab)
|
||||
if (!ar)
|
||||
continue;
|
||||
|
||||
ath11k_fw_stats_free(&ar->fw_stats);
|
||||
ieee80211_free_hw(ar->hw);
|
||||
pdev->ar = NULL;
|
||||
}
|
||||
|
@ -7590,6 +7590,11 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
|
||||
* at this point, no need to free the individual list.
|
||||
*/
|
||||
return;
|
||||
|
||||
free:
|
||||
ath11k_fw_stats_free(&stats);
|
||||
}
|
||||
|
@ -2738,7 +2738,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
}
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm) && start) {
|
||||
u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
|
||||
u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
|
||||
|
||||
/* sparse doesn't like the __align() so don't check */
|
||||
#ifndef __CHECKER__
|
||||
|
@ -495,6 +495,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
|
||||
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
|
||||
|
||||
@ -543,6 +544,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
|
||||
IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
|
||||
@ -681,6 +683,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
|
||||
IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
|
||||
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
|
||||
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 - 2022 Intel Corporation
|
||||
* Copyright (C) 2018 - 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1753,7 +1753,7 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
|
||||
|
||||
WARN_ON(is_multicast_ether_addr(hdr->addr1));
|
||||
|
||||
if (WARN_ON_ONCE(!sta->valid_links))
|
||||
if (WARN_ON_ONCE(!sta || !sta->valid_links))
|
||||
return &vif->bss_conf;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
|
||||
|
@ -557,7 +557,7 @@ static int __init of_platform_default_populate_init(void)
|
||||
if (!of_get_property(node, "linux,opened", NULL) ||
|
||||
!of_get_property(node, "linux,boot-display", NULL))
|
||||
continue;
|
||||
dev = of_platform_device_create(node, "of-display.0", NULL);
|
||||
dev = of_platform_device_create(node, "of-display", NULL);
|
||||
of_node_put(node);
|
||||
if (WARN_ON(!dev))
|
||||
return -ENOMEM;
|
||||
|
@ -246,6 +246,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map,
|
||||
|
||||
static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
struct device_node *np,
|
||||
struct device_node *parent,
|
||||
struct pinctrl_map **map,
|
||||
unsigned int *num_maps,
|
||||
unsigned int *index)
|
||||
@ -263,6 +264,7 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
struct property *prop;
|
||||
int ret, gsel, fsel;
|
||||
const char **pin_fn;
|
||||
const char *name;
|
||||
const char *pin;
|
||||
|
||||
pinmux = of_find_property(np, "pinmux", NULL);
|
||||
@ -346,8 +348,19 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
psel_val[i] = MUX_FUNC(value);
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
|
||||
parent, np);
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
name = np->name;
|
||||
}
|
||||
|
||||
/* Register a single pin group listing all the pins we read from DT */
|
||||
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
|
||||
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
|
||||
if (gsel < 0) {
|
||||
ret = gsel;
|
||||
goto done;
|
||||
@ -357,17 +370,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
* Register a single group function where the 'data' is an array PSEL
|
||||
* register values read from DT.
|
||||
*/
|
||||
pin_fn[0] = np->name;
|
||||
fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
|
||||
psel_val);
|
||||
pin_fn[0] = name;
|
||||
fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
|
||||
if (fsel < 0) {
|
||||
ret = fsel;
|
||||
goto remove_group;
|
||||
}
|
||||
|
||||
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
|
||||
maps[idx].data.mux.group = np->name;
|
||||
maps[idx].data.mux.function = np->name;
|
||||
maps[idx].data.mux.group = name;
|
||||
maps[idx].data.mux.function = name;
|
||||
idx++;
|
||||
|
||||
dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
|
||||
@ -414,7 +426,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
index = 0;
|
||||
|
||||
for_each_child_of_node(np, child) {
|
||||
ret = rzg2l_dt_subnode_to_map(pctldev, child, map,
|
||||
ret = rzg2l_dt_subnode_to_map(pctldev, child, np, map,
|
||||
num_maps, &index);
|
||||
if (ret < 0) {
|
||||
of_node_put(child);
|
||||
@ -423,7 +435,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
}
|
||||
|
||||
if (*num_maps == 0) {
|
||||
ret = rzg2l_dt_subnode_to_map(pctldev, np, map,
|
||||
ret = rzg2l_dt_subnode_to_map(pctldev, np, NULL, map,
|
||||
num_maps, &index);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
@ -207,6 +207,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map,
|
||||
|
||||
static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
struct device_node *np,
|
||||
struct device_node *parent,
|
||||
struct pinctrl_map **map,
|
||||
unsigned int *num_maps,
|
||||
unsigned int *index)
|
||||
@ -224,6 +225,7 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
struct property *prop;
|
||||
int ret, gsel, fsel;
|
||||
const char **pin_fn;
|
||||
const char *name;
|
||||
const char *pin;
|
||||
|
||||
pinmux = of_find_property(np, "pinmux", NULL);
|
||||
@ -307,8 +309,19 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
psel_val[i] = MUX_FUNC(value);
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
|
||||
parent, np);
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
name = np->name;
|
||||
}
|
||||
|
||||
/* Register a single pin group listing all the pins we read from DT */
|
||||
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
|
||||
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
|
||||
if (gsel < 0) {
|
||||
ret = gsel;
|
||||
goto done;
|
||||
@ -318,17 +331,16 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
|
||||
* Register a single group function where the 'data' is an array PSEL
|
||||
* register values read from DT.
|
||||
*/
|
||||
pin_fn[0] = np->name;
|
||||
fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
|
||||
psel_val);
|
||||
pin_fn[0] = name;
|
||||
fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
|
||||
if (fsel < 0) {
|
||||
ret = fsel;
|
||||
goto remove_group;
|
||||
}
|
||||
|
||||
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
|
||||
maps[idx].data.mux.group = np->name;
|
||||
maps[idx].data.mux.function = np->name;
|
||||
maps[idx].data.mux.group = name;
|
||||
maps[idx].data.mux.function = name;
|
||||
idx++;
|
||||
|
||||
dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
|
||||
@ -375,7 +387,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
index = 0;
|
||||
|
||||
for_each_child_of_node(np, child) {
|
||||
ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
|
||||
ret = rzv2m_dt_subnode_to_map(pctldev, child, np, map,
|
||||
num_maps, &index);
|
||||
if (ret < 0) {
|
||||
of_node_put(child);
|
||||
@ -384,7 +396,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
}
|
||||
|
||||
if (*num_maps == 0) {
|
||||
ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
|
||||
ret = rzv2m_dt_subnode_to_map(pctldev, np, NULL, map,
|
||||
num_maps, &index);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
@ -126,7 +126,7 @@ enum bcm63xx_regs_spi {
|
||||
SPI_MSG_DATA_SIZE,
|
||||
};
|
||||
|
||||
#define BCM63XX_SPI_MAX_PREPEND 15
|
||||
#define BCM63XX_SPI_MAX_PREPEND 7
|
||||
|
||||
#define BCM63XX_SPI_MAX_CS 8
|
||||
#define BCM63XX_SPI_BUS_NUM 0
|
||||
|
@ -222,6 +222,24 @@ static int dw_spi_intel_init(struct platform_device *pdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA-based mem ops are not configured for this device and are not tested.
|
||||
*/
|
||||
static int dw_spi_mountevans_imc_init(struct platform_device *pdev,
|
||||
struct dw_spi_mmio *dwsmmio)
|
||||
{
|
||||
/*
|
||||
* The Intel Mount Evans SoC's Integrated Management Complex DW
|
||||
* apb_ssi_v4.02a controller has an errata where a full TX FIFO can
|
||||
* result in data corruption. The suggested workaround is to never
|
||||
* completely fill the FIFO. The TX FIFO has a size of 32 so the
|
||||
* fifo_len is set to 31.
|
||||
*/
|
||||
dwsmmio->dws.fifo_len = 31;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_spi_canaan_k210_init(struct platform_device *pdev,
|
||||
struct dw_spi_mmio *dwsmmio)
|
||||
{
|
||||
@ -350,6 +368,10 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
|
||||
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
|
||||
{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
|
||||
{ .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
|
||||
{
|
||||
.compatible = "intel,mountevans-imc-ssi",
|
||||
.data = dw_spi_mountevans_imc_init,
|
||||
},
|
||||
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
|
||||
{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
|
||||
{ /* end of table */}
|
||||
|
@ -668,6 +668,8 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
|
||||
|
||||
if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
|
||||
val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
|
||||
else
|
||||
val &= ~S3C64XX_SPI_MODE_SELF_LOOPBACK;
|
||||
|
||||
writel(val, regs + S3C64XX_SPI_MODE_CFG);
|
||||
|
||||
|
@ -1732,6 +1732,9 @@ static int au1200fb_drv_probe(struct platform_device *dev)
|
||||
|
||||
/* Now hook interrupt too */
|
||||
irq = platform_get_irq(dev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
ret = request_irq(irq, au1200fb_handle_irq,
|
||||
IRQF_SHARED, "lcd", (void *)dev);
|
||||
if (ret) {
|
||||
|
@ -613,10 +613,10 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
|
||||
if (var->hsync_len < 1 || var->hsync_len > 64)
|
||||
printk(KERN_ERR "%s: invalid hsync_len %d\n",
|
||||
info->fix.id, var->hsync_len);
|
||||
if (var->left_margin > 255)
|
||||
if (var->left_margin < 3 || var->left_margin > 255)
|
||||
printk(KERN_ERR "%s: invalid left_margin %d\n",
|
||||
info->fix.id, var->left_margin);
|
||||
if (var->right_margin > 255)
|
||||
if (var->right_margin < 1 || var->right_margin > 255)
|
||||
printk(KERN_ERR "%s: invalid right_margin %d\n",
|
||||
info->fix.id, var->right_margin);
|
||||
if (var->yres < 1 || var->yres > ymax_mask)
|
||||
@ -1043,7 +1043,6 @@ static int imxfb_probe(struct platform_device *pdev)
|
||||
failed_map:
|
||||
failed_ioremap:
|
||||
failed_getclock:
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
failed_of_parse:
|
||||
kfree(info->pseudo_palette);
|
||||
failed_init:
|
||||
|
@ -1894,6 +1894,7 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
|
||||
|
||||
/* Shouldn't have super stripes in sequential zones */
|
||||
if (zoned && nr) {
|
||||
kfree(logical);
|
||||
btrfs_err(fs_info,
|
||||
"zoned: block group %llu must not contain super block",
|
||||
cache->start);
|
||||
|
@ -4913,9 +4913,6 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ret = set_page_extent_mapped(page);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
ret = btrfs_read_folio(NULL, page_folio(page));
|
||||
@ -4930,6 +4927,17 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We unlock the page after the io is completed and then re-lock it
|
||||
* above. release_folio() could have come in between that and cleared
|
||||
* PagePrivate(), but left the page in the mapping. Set the page mapped
|
||||
* here to make sure it's properly set for the subpage stuff.
|
||||
*/
|
||||
ret = set_page_extent_mapped(page);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
wait_on_page_writeback(page);
|
||||
|
||||
lock_extent(io_tree, block_start, block_end, &cached_state);
|
||||
|
@ -4410,4 +4410,5 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
|
||||
ulist_free(entry->old_roots);
|
||||
kfree(entry);
|
||||
}
|
||||
*root = RB_ROOT;
|
||||
}
|
||||
|
@ -4092,14 +4092,6 @@ static int alloc_profile_is_valid(u64 flags, int extended)
|
||||
return has_single_bit_set(flags);
|
||||
}
|
||||
|
||||
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
/* cancel requested || normal exit path */
|
||||
return atomic_read(&fs_info->balance_cancel_req) ||
|
||||
(atomic_read(&fs_info->balance_pause_req) == 0 &&
|
||||
atomic_read(&fs_info->balance_cancel_req) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate target profile against allowed profiles and return true if it's OK.
|
||||
* Otherwise print the error message and return false.
|
||||
@ -4289,6 +4281,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
||||
u64 num_devices;
|
||||
unsigned seq;
|
||||
bool reducing_redundancy;
|
||||
bool paused = false;
|
||||
int i;
|
||||
|
||||
if (btrfs_fs_closing(fs_info) ||
|
||||
@ -4419,6 +4412,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
||||
if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
|
||||
btrfs_info(fs_info, "balance: paused");
|
||||
btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
|
||||
paused = true;
|
||||
}
|
||||
/*
|
||||
* Balance can be canceled by:
|
||||
@ -4447,8 +4441,8 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
||||
btrfs_update_ioctl_balance_args(fs_info, bargs);
|
||||
}
|
||||
|
||||
if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
|
||||
balance_need_close(fs_info)) {
|
||||
/* We didn't pause, we can clean everything up. */
|
||||
if (!paused) {
|
||||
reset_balance_state(fs_info);
|
||||
btrfs_exclop_finish(fs_info);
|
||||
}
|
||||
@ -6601,11 +6595,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
||||
if (patch_the_first_stripe_for_dev_replace) {
|
||||
smap->dev = dev_replace->tgtdev;
|
||||
smap->physical = physical_to_patch_in_first_stripe;
|
||||
*mirror_num_ret = map->num_stripes + 1;
|
||||
if (mirror_num_ret)
|
||||
*mirror_num_ret = map->num_stripes + 1;
|
||||
} else {
|
||||
set_io_stripe(smap, map, stripe_index, stripe_offset,
|
||||
stripe_nr);
|
||||
*mirror_num_ret = mirror_num;
|
||||
if (mirror_num_ret)
|
||||
*mirror_num_ret = mirror_num;
|
||||
}
|
||||
*bioc_ret = NULL;
|
||||
ret = 0;
|
||||
|
@ -1732,6 +1732,20 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
|
||||
memmove(here, (void *)here + size,
|
||||
(void *)last - (void *)here + sizeof(__u32));
|
||||
memset(last, 0, size);
|
||||
|
||||
/*
|
||||
* Update i_inline_off - moved ibody region might contain
|
||||
* system.data attribute. Handling a failure here won't
|
||||
* cause other complications for setting an xattr.
|
||||
*/
|
||||
if (!is_block && ext4_has_inline_data(inode)) {
|
||||
ret = ext4_find_inline_data_nolock(inode);
|
||||
if (ret) {
|
||||
ext4_warning_inode(inode,
|
||||
"unable to update i_inline_off");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
} else if (s->not_found) {
|
||||
/* Insert new name. */
|
||||
size_t size = EXT4_XATTR_LEN(name_len);
|
||||
|
@ -1303,7 +1303,10 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
|
||||
process_init_limits(fc, arg);
|
||||
|
||||
if (arg->minor >= 6) {
|
||||
u64 flags = arg->flags | (u64) arg->flags2 << 32;
|
||||
u64 flags = arg->flags;
|
||||
|
||||
if (flags & FUSE_INIT_EXT)
|
||||
flags |= (u64) arg->flags2 << 32;
|
||||
|
||||
ra_pages = arg->max_readahead / PAGE_SIZE;
|
||||
if (flags & FUSE_ASYNC_READ)
|
||||
|
@ -9,14 +9,23 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/fileattr.h>
|
||||
|
||||
static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args)
|
||||
static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
|
||||
struct fuse_ioctl_out *outarg)
|
||||
{
|
||||
ssize_t ret = fuse_simple_request(fm, args);
|
||||
ssize_t ret;
|
||||
|
||||
args->out_args[0].size = sizeof(*outarg);
|
||||
args->out_args[0].value = outarg;
|
||||
|
||||
ret = fuse_simple_request(fm, args);
|
||||
|
||||
/* Translate ENOSYS, which shouldn't be returned from fs */
|
||||
if (ret == -ENOSYS)
|
||||
ret = -ENOTTY;
|
||||
|
||||
if (ret >= 0 && outarg->result == -ENOSYS)
|
||||
outarg->result = -ENOTTY;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -264,13 +273,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
|
||||
}
|
||||
|
||||
ap.args.out_numargs = 2;
|
||||
ap.args.out_args[0].size = sizeof(outarg);
|
||||
ap.args.out_args[0].value = &outarg;
|
||||
ap.args.out_args[1].size = out_size;
|
||||
ap.args.out_pages = true;
|
||||
ap.args.out_argvar = true;
|
||||
|
||||
transferred = fuse_send_ioctl(fm, &ap.args);
|
||||
transferred = fuse_send_ioctl(fm, &ap.args, &outarg);
|
||||
err = transferred;
|
||||
if (transferred < 0)
|
||||
goto out;
|
||||
@ -408,12 +415,10 @@ static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff,
|
||||
args.in_args[1].size = inarg.in_size;
|
||||
args.in_args[1].value = ptr;
|
||||
args.out_numargs = 2;
|
||||
args.out_args[0].size = sizeof(outarg);
|
||||
args.out_args[0].value = &outarg;
|
||||
args.out_args[1].size = inarg.out_size;
|
||||
args.out_args[1].value = ptr;
|
||||
|
||||
err = fuse_send_ioctl(fm, &args);
|
||||
err = fuse_send_ioctl(fm, &args, &outarg);
|
||||
if (!err) {
|
||||
if (outarg.result < 0)
|
||||
err = outarg.result;
|
||||
|
@ -57,28 +57,6 @@ static inline void __buffer_unlink(struct journal_head *jh)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Move a buffer from the checkpoint list to the checkpoint io list
|
||||
*
|
||||
* Called with j_list_lock held
|
||||
*/
|
||||
static inline void __buffer_relink_io(struct journal_head *jh)
|
||||
{
|
||||
transaction_t *transaction = jh->b_cp_transaction;
|
||||
|
||||
__buffer_unlink_first(jh);
|
||||
|
||||
if (!transaction->t_checkpoint_io_list) {
|
||||
jh->b_cpnext = jh->b_cpprev = jh;
|
||||
} else {
|
||||
jh->b_cpnext = transaction->t_checkpoint_io_list;
|
||||
jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
|
||||
jh->b_cpprev->b_cpnext = jh;
|
||||
jh->b_cpnext->b_cpprev = jh;
|
||||
}
|
||||
transaction->t_checkpoint_io_list = jh;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check a checkpoint buffer could be release or not.
|
||||
*
|
||||
@ -183,6 +161,7 @@ __flush_batch(journal_t *journal, int *batch_count)
|
||||
struct buffer_head *bh = journal->j_chkpt_bhs[i];
|
||||
BUFFER_TRACE(bh, "brelse");
|
||||
__brelse(bh);
|
||||
journal->j_chkpt_bhs[i] = NULL;
|
||||
}
|
||||
*batch_count = 0;
|
||||
}
|
||||
@ -242,6 +221,11 @@ int jbd2_log_do_checkpoint(journal_t *journal)
|
||||
jh = transaction->t_checkpoint_list;
|
||||
bh = jh2bh(jh);
|
||||
|
||||
/*
|
||||
* The buffer may be writing back, or flushing out in the
|
||||
* last couple of cycles, or re-adding into a new transaction,
|
||||
* need to check it again until it's unlocked.
|
||||
*/
|
||||
if (buffer_locked(bh)) {
|
||||
get_bh(bh);
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
@ -287,28 +271,32 @@ int jbd2_log_do_checkpoint(journal_t *journal)
|
||||
}
|
||||
if (!buffer_dirty(bh)) {
|
||||
BUFFER_TRACE(bh, "remove from checkpoint");
|
||||
if (__jbd2_journal_remove_checkpoint(jh))
|
||||
/* The transaction was released; we're done */
|
||||
/*
|
||||
* If the transaction was released or the checkpoint
|
||||
* list was empty, we're done.
|
||||
*/
|
||||
if (__jbd2_journal_remove_checkpoint(jh) ||
|
||||
!transaction->t_checkpoint_list)
|
||||
goto out;
|
||||
continue;
|
||||
} else {
|
||||
/*
|
||||
* We are about to write the buffer, it could be
|
||||
* raced by some other transaction shrink or buffer
|
||||
* re-log logic once we release the j_list_lock,
|
||||
* leave it on the checkpoint list and check status
|
||||
* again to make sure it's clean.
|
||||
*/
|
||||
BUFFER_TRACE(bh, "queue");
|
||||
get_bh(bh);
|
||||
J_ASSERT_BH(bh, !buffer_jwrite(bh));
|
||||
journal->j_chkpt_bhs[batch_count++] = bh;
|
||||
transaction->t_chp_stats.cs_written++;
|
||||
transaction->t_checkpoint_list = jh->b_cpnext;
|
||||
}
|
||||
/*
|
||||
* Important: we are about to write the buffer, and
|
||||
* possibly block, while still holding the journal
|
||||
* lock. We cannot afford to let the transaction
|
||||
* logic start messing around with this buffer before
|
||||
* we write it to disk, as that would break
|
||||
* recoverability.
|
||||
*/
|
||||
BUFFER_TRACE(bh, "queue");
|
||||
get_bh(bh);
|
||||
J_ASSERT_BH(bh, !buffer_jwrite(bh));
|
||||
journal->j_chkpt_bhs[batch_count++] = bh;
|
||||
__buffer_relink_io(jh);
|
||||
transaction->t_chp_stats.cs_written++;
|
||||
|
||||
if ((batch_count == JBD2_NR_BATCH) ||
|
||||
need_resched() ||
|
||||
spin_needbreak(&journal->j_list_lock))
|
||||
need_resched() || spin_needbreak(&journal->j_list_lock) ||
|
||||
jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
|
||||
goto unlock_and_flush;
|
||||
}
|
||||
|
||||
@ -322,38 +310,6 @@ int jbd2_log_do_checkpoint(journal_t *journal)
|
||||
goto restart;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we issued all of the transaction's buffers, let's deal
|
||||
* with the buffers that are out for I/O.
|
||||
*/
|
||||
restart2:
|
||||
/* Did somebody clean up the transaction in the meanwhile? */
|
||||
if (journal->j_checkpoint_transactions != transaction ||
|
||||
transaction->t_tid != this_tid)
|
||||
goto out;
|
||||
|
||||
while (transaction->t_checkpoint_io_list) {
|
||||
jh = transaction->t_checkpoint_io_list;
|
||||
bh = jh2bh(jh);
|
||||
if (buffer_locked(bh)) {
|
||||
get_bh(bh);
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
wait_on_buffer(bh);
|
||||
/* the journal_head may have gone by now */
|
||||
BUFFER_TRACE(bh, "brelse");
|
||||
__brelse(bh);
|
||||
spin_lock(&journal->j_list_lock);
|
||||
goto restart2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now in whatever state the buffer currently is, we
|
||||
* know that it has been written out and so we can
|
||||
* drop it from the list
|
||||
*/
|
||||
if (__jbd2_journal_remove_checkpoint(jh))
|
||||
break;
|
||||
}
|
||||
out:
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
result = jbd2_cleanup_journal_tail(journal);
|
||||
|
@ -1959,6 +1959,9 @@ dbAllocDmapLev(struct bmap * bmp,
|
||||
if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
|
||||
return -ENOSPC;
|
||||
|
||||
if (leafidx < 0)
|
||||
return -EIO;
|
||||
|
||||
/* determine the block number within the file system corresponding
|
||||
* to the leaf at which free space was found.
|
||||
*/
|
||||
|
@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag)
|
||||
jfs_info("txBegin: flag = 0x%x", flag);
|
||||
log = JFS_SBI(sb)->log;
|
||||
|
||||
if (!log) {
|
||||
jfs_error(sb, "read-only filesystem\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
TXN_LOCK();
|
||||
|
||||
INCREMENT(TxStat.txBegin);
|
||||
|
@ -799,6 +799,11 @@ static int jfs_link(struct dentry *old_dentry,
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (isReadOnly(ip)) {
|
||||
jfs_error(ip->i_sb, "read-only filesystem\n");
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
tid = txBegin(ip->i_sb, 0);
|
||||
|
||||
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
|
||||
|
@ -33,6 +33,7 @@ struct ovl_sb {
|
||||
};
|
||||
|
||||
struct ovl_layer {
|
||||
/* ovl_free_fs() relies on @mnt being the first member! */
|
||||
struct vfsmount *mnt;
|
||||
/* Trap in ovl inode cache */
|
||||
struct inode *trap;
|
||||
@ -43,6 +44,14 @@ struct ovl_layer {
|
||||
int fsid;
|
||||
};
|
||||
|
||||
/*
|
||||
* ovl_free_fs() relies on @mnt being the first member when unmounting
|
||||
* the private mounts created for each layer. Let's check both the
|
||||
* offset and type.
|
||||
*/
|
||||
static_assert(offsetof(struct ovl_layer, mnt) == 0);
|
||||
static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
|
||||
|
||||
struct ovl_path {
|
||||
const struct ovl_layer *layer;
|
||||
struct dentry *dentry;
|
||||
|
@ -555,7 +555,7 @@ static void invalidate_dquots(struct super_block *sb, int type)
|
||||
continue;
|
||||
/* Wait for dquot users */
|
||||
if (atomic_read(&dquot->dq_count)) {
|
||||
dqgrab(dquot);
|
||||
atomic_inc(&dquot->dq_count);
|
||||
spin_unlock(&dq_list_lock);
|
||||
/*
|
||||
* Once dqput() wakes us up, we know it's time to free
|
||||
@ -2420,7 +2420,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
|
||||
|
||||
error = add_dquot_ref(sb, type);
|
||||
if (error)
|
||||
dquot_disable(sb, type, flags);
|
||||
dquot_disable(sb, type,
|
||||
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
||||
|
||||
return error;
|
||||
out_fmt:
|
||||
|
@ -59,7 +59,7 @@ extern bool disable_legacy_dialects;
|
||||
#define TLINK_IDLE_EXPIRE (600 * HZ)
|
||||
|
||||
/* Drop the connection to not overload the server */
|
||||
#define NUM_STATUS_IO_TIMEOUT 5
|
||||
#define MAX_STATUS_IO_TIMEOUT 5
|
||||
|
||||
struct mount_ctx {
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
@ -1162,6 +1162,7 @@ cifs_demultiplex_thread(void *p)
|
||||
struct mid_q_entry *mids[MAX_COMPOUND];
|
||||
char *bufs[MAX_COMPOUND];
|
||||
unsigned int noreclaim_flag, num_io_timeout = 0;
|
||||
bool pending_reconnect = false;
|
||||
|
||||
noreclaim_flag = memalloc_noreclaim_save();
|
||||
cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
|
||||
@ -1201,6 +1202,8 @@ cifs_demultiplex_thread(void *p)
|
||||
cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
|
||||
if (!is_smb_response(server, buf[0]))
|
||||
continue;
|
||||
|
||||
pending_reconnect = false;
|
||||
next_pdu:
|
||||
server->pdu_size = pdu_length;
|
||||
|
||||
@ -1258,10 +1261,13 @@ cifs_demultiplex_thread(void *p)
|
||||
if (server->ops->is_status_io_timeout &&
|
||||
server->ops->is_status_io_timeout(buf)) {
|
||||
num_io_timeout++;
|
||||
if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
|
||||
cifs_reconnect(server, false);
|
||||
if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
|
||||
cifs_server_dbg(VFS,
|
||||
"Number of request timeouts exceeded %d. Reconnecting",
|
||||
MAX_STATUS_IO_TIMEOUT);
|
||||
|
||||
pending_reconnect = true;
|
||||
num_io_timeout = 0;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1308,6 +1314,11 @@ cifs_demultiplex_thread(void *p)
|
||||
buf = server->smallbuf;
|
||||
goto next_pdu;
|
||||
}
|
||||
|
||||
/* do this reconnect at the very end after processing all MIDs */
|
||||
if (pending_reconnect)
|
||||
cifs_reconnect(server, true);
|
||||
|
||||
} /* end while !EXITING */
|
||||
|
||||
/* buffer usually freed in free_mid - need to free it here on exit */
|
||||
|
@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb,
|
||||
}
|
||||
|
||||
if (translate) {
|
||||
if (str_o_len <= 2 && str_o[0] == '.' &&
|
||||
if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
|
||||
(str_o_len == 1 || str_o[1] == '.'))
|
||||
needsCRC = 1;
|
||||
if (needsCRC) {
|
||||
|
@ -815,8 +815,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
|
||||
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
|
||||
|
||||
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
|
||||
|
||||
int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
|
||||
const u8 *esi,
|
||||
u8 *ack,
|
||||
bool *handled);
|
||||
void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
|
||||
|
||||
int
|
||||
drm_dp_mst_detect_port(struct drm_connector *connector,
|
||||
|
@ -23,8 +23,9 @@ void psi_memstall_enter(unsigned long *flags);
|
||||
void psi_memstall_leave(unsigned long *flags);
|
||||
|
||||
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
|
||||
struct psi_trigger *psi_trigger_create(struct psi_group *group,
|
||||
char *buf, enum psi_res res);
|
||||
struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
|
||||
enum psi_res res, struct file *file,
|
||||
struct kernfs_open_file *of);
|
||||
void psi_trigger_destroy(struct psi_trigger *t);
|
||||
|
||||
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
|
||||
|
@ -72,6 +72,9 @@ enum psi_states {
|
||||
/* Use one bit in the state mask to track TSK_ONCPU */
|
||||
#define PSI_ONCPU (1 << NR_PSI_STATES)
|
||||
|
||||
/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
|
||||
#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
|
||||
|
||||
enum psi_aggregators {
|
||||
PSI_AVGS = 0,
|
||||
PSI_POLL,
|
||||
@ -134,6 +137,9 @@ struct psi_trigger {
|
||||
/* Wait queue for polling */
|
||||
wait_queue_head_t event_wait;
|
||||
|
||||
/* Kernfs file for cgroup triggers */
|
||||
struct kernfs_open_file *of;
|
||||
|
||||
/* Pending event flag */
|
||||
int event;
|
||||
|
||||
@ -148,6 +154,9 @@ struct psi_trigger {
|
||||
|
||||
/* Deferred event(s) from previous ratelimit window */
|
||||
bool pending_event;
|
||||
|
||||
/* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
|
||||
enum psi_aggregators aggregator;
|
||||
};
|
||||
|
||||
struct psi_group {
|
||||
@ -168,30 +177,34 @@ struct psi_group {
|
||||
/* Aggregator work control */
|
||||
struct delayed_work avgs_work;
|
||||
|
||||
/* Unprivileged triggers against N*PSI_FREQ windows */
|
||||
struct list_head avg_triggers;
|
||||
u32 avg_nr_triggers[NR_PSI_STATES - 1];
|
||||
|
||||
/* Total stall times and sampled pressure averages */
|
||||
u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
|
||||
unsigned long avg[NR_PSI_STATES - 1][3];
|
||||
|
||||
/* Monitor work control */
|
||||
struct task_struct __rcu *poll_task;
|
||||
struct timer_list poll_timer;
|
||||
wait_queue_head_t poll_wait;
|
||||
atomic_t poll_wakeup;
|
||||
atomic_t poll_scheduled;
|
||||
/* Monitor RT polling work control */
|
||||
struct task_struct __rcu *rtpoll_task;
|
||||
struct timer_list rtpoll_timer;
|
||||
wait_queue_head_t rtpoll_wait;
|
||||
atomic_t rtpoll_wakeup;
|
||||
atomic_t rtpoll_scheduled;
|
||||
|
||||
/* Protects data used by the monitor */
|
||||
struct mutex trigger_lock;
|
||||
struct mutex rtpoll_trigger_lock;
|
||||
|
||||
/* Configured polling triggers */
|
||||
struct list_head triggers;
|
||||
u32 nr_triggers[NR_PSI_STATES - 1];
|
||||
u32 poll_states;
|
||||
u64 poll_min_period;
|
||||
/* Configured RT polling triggers */
|
||||
struct list_head rtpoll_triggers;
|
||||
u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
|
||||
u32 rtpoll_states;
|
||||
u64 rtpoll_min_period;
|
||||
|
||||
/* Total stall times at the start of monitor activation */
|
||||
u64 polling_total[NR_PSI_STATES - 1];
|
||||
u64 polling_next_update;
|
||||
u64 polling_until;
|
||||
/* Total stall times at the start of RT polling monitor activation */
|
||||
u64 rtpoll_total[NR_PSI_STATES - 1];
|
||||
u64 rtpoll_next_update;
|
||||
u64 rtpoll_until;
|
||||
};
|
||||
|
||||
#else /* CONFIG_PSI */
|
||||
|
@ -513,7 +513,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
|
||||
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
||||
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
|
||||
|
||||
queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
|
||||
WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
|
||||
}
|
||||
|
||||
static inline void tcp_move_syn(struct tcp_sock *tp,
|
||||
|
@ -820,6 +820,7 @@ struct hci_conn_params {
|
||||
|
||||
struct hci_conn *conn;
|
||||
bool explicit_connect;
|
||||
/* Accessed without hdev->lock: */
|
||||
hci_conn_flags_t flags;
|
||||
u8 privacy_mode;
|
||||
|
||||
@ -1551,7 +1552,11 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
||||
bdaddr_t *addr, u8 addr_type);
|
||||
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
|
||||
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
||||
void hci_conn_params_free(struct hci_conn_params *param);
|
||||
|
||||
void hci_pend_le_list_del_init(struct hci_conn_params *param);
|
||||
void hci_pend_le_list_add(struct hci_conn_params *param,
|
||||
struct list_head *list);
|
||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type);
|
||||
|
@ -284,7 +284,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
||||
const struct ip_options *sopt,
|
||||
__be32 daddr, __be32 saddr,
|
||||
const struct ip_reply_arg *arg,
|
||||
unsigned int len, u64 transmit_time);
|
||||
unsigned int len, u64 transmit_time, u32 txhash);
|
||||
|
||||
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
|
||||
#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
|
||||
|
@ -1511,25 +1511,38 @@ void tcp_leave_memory_pressure(struct sock *sk);
|
||||
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
|
||||
{
|
||||
struct net *net = sock_net((struct sock *)tp);
|
||||
int val;
|
||||
|
||||
return tp->keepalive_intvl ? :
|
||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
|
||||
/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
|
||||
* and do_tcp_setsockopt().
|
||||
*/
|
||||
val = READ_ONCE(tp->keepalive_intvl);
|
||||
|
||||
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
|
||||
}
|
||||
|
||||
static inline int keepalive_time_when(const struct tcp_sock *tp)
|
||||
{
|
||||
struct net *net = sock_net((struct sock *)tp);
|
||||
int val;
|
||||
|
||||
return tp->keepalive_time ? :
|
||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
|
||||
/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
|
||||
val = READ_ONCE(tp->keepalive_time);
|
||||
|
||||
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
|
||||
}
|
||||
|
||||
static inline int keepalive_probes(const struct tcp_sock *tp)
|
||||
{
|
||||
struct net *net = sock_net((struct sock *)tp);
|
||||
int val;
|
||||
|
||||
return tp->keepalive_probes ? :
|
||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
|
||||
/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
|
||||
* and do_tcp_setsockopt().
|
||||
*/
|
||||
val = READ_ONCE(tp->keepalive_probes);
|
||||
|
||||
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
|
||||
}
|
||||
|
||||
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
|
||||
@ -2046,7 +2059,11 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
|
||||
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
|
||||
{
|
||||
struct net *net = sock_net((struct sock *)tp);
|
||||
return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
|
||||
u32 val;
|
||||
|
||||
val = READ_ONCE(tp->notsent_lowat);
|
||||
|
||||
return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
|
||||
}
|
||||
|
||||
bool tcp_stream_memory_free(const struct sock *sk, int wake);
|
||||
|
@ -1803,6 +1803,14 @@ void io_wq_submit_work(struct io_wq_work *work)
|
||||
ret = io_issue_sqe(req, issue_flags);
|
||||
if (ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If REQ_F_NOWAIT is set, then don't wait or retry with
|
||||
* poll. -EAGAIN is final for that case.
|
||||
*/
|
||||
if (req->flags & REQ_F_NOWAIT)
|
||||
break;
|
||||
|
||||
/*
|
||||
* We can get EAGAIN for iopolled IO even though we're
|
||||
* forcing a sync submission from here, since we can't
|
||||
|
@ -41,7 +41,12 @@ static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
|
||||
/* bpf_lru_node helpers */
|
||||
static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
|
||||
{
|
||||
return node->ref;
|
||||
return READ_ONCE(node->ref);
|
||||
}
|
||||
|
||||
static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
|
||||
{
|
||||
WRITE_ONCE(node->ref, 0);
|
||||
}
|
||||
|
||||
static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
|
||||
@ -89,7 +94,7 @@ static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
|
||||
|
||||
bpf_lru_list_count_inc(l, tgt_type);
|
||||
node->type = tgt_type;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_move(&node->list, &l->lists[tgt_type]);
|
||||
}
|
||||
|
||||
@ -110,7 +115,7 @@ static void __bpf_lru_node_move(struct bpf_lru_list *l,
|
||||
bpf_lru_list_count_inc(l, tgt_type);
|
||||
node->type = tgt_type;
|
||||
}
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
|
||||
/* If the moving node is the next_inactive_rotation candidate,
|
||||
* move the next_inactive_rotation pointer also.
|
||||
@ -353,7 +358,7 @@ static void __local_list_add_pending(struct bpf_lru *lru,
|
||||
*(u32 *)((void *)node + lru->hash_offset) = hash;
|
||||
node->cpu = cpu;
|
||||
node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, local_pending_list(loc_l));
|
||||
}
|
||||
|
||||
@ -419,7 +424,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
|
||||
if (!list_empty(free_list)) {
|
||||
node = list_first_entry(free_list, struct bpf_lru_node, list);
|
||||
*(u32 *)((void *)node + lru->hash_offset) = hash;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
|
||||
}
|
||||
|
||||
@ -522,7 +527,7 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
|
||||
}
|
||||
|
||||
node->type = BPF_LRU_LOCAL_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_move(&node->list, local_free_list(loc_l));
|
||||
|
||||
raw_spin_unlock_irqrestore(&loc_l->lock, flags);
|
||||
@ -568,7 +573,7 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
|
||||
|
||||
node = (struct bpf_lru_node *)(buf + node_offset);
|
||||
node->type = BPF_LRU_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
|
||||
buf += elem_size;
|
||||
}
|
||||
@ -594,7 +599,7 @@ static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
|
||||
node = (struct bpf_lru_node *)(buf + node_offset);
|
||||
node->cpu = cpu;
|
||||
node->type = BPF_LRU_LIST_T_FREE;
|
||||
node->ref = 0;
|
||||
bpf_lru_node_clear_ref(node);
|
||||
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
|
||||
i++;
|
||||
buf += elem_size;
|
||||
|
@ -64,11 +64,8 @@ struct bpf_lru {
|
||||
|
||||
static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
|
||||
{
|
||||
/* ref is an approximation on access frequency. It does not
|
||||
* have to be very accurate. Hence, no protection is used.
|
||||
*/
|
||||
if (!node->ref)
|
||||
node->ref = 1;
|
||||
if (!READ_ONCE(node->ref))
|
||||
WRITE_ONCE(node->ref, 1);
|
||||
}
|
||||
|
||||
int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
|
||||
|
@ -5293,7 +5293,8 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
|
||||
*(int *)table->data = unpriv_enable;
|
||||
}
|
||||
|
||||
unpriv_ebpf_notify(unpriv_enable);
|
||||
if (write)
|
||||
unpriv_ebpf_notify(unpriv_enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -511,6 +511,15 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
|
||||
return func_id == BPF_FUNC_dynptr_data;
|
||||
}
|
||||
|
||||
static bool is_callback_calling_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_for_each_map_elem ||
|
||||
func_id == BPF_FUNC_timer_set_callback ||
|
||||
func_id == BPF_FUNC_find_vma ||
|
||||
func_id == BPF_FUNC_loop ||
|
||||
func_id == BPF_FUNC_user_ringbuf_drain;
|
||||
}
|
||||
|
||||
static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
|
||||
const struct bpf_map *map)
|
||||
{
|
||||
@ -1693,7 +1702,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
|
||||
reg->type = SCALAR_VALUE;
|
||||
reg->var_off = tnum_unknown;
|
||||
reg->frameno = 0;
|
||||
reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
|
||||
reg->precise = !env->bpf_capable;
|
||||
__mark_reg_unbounded(reg);
|
||||
}
|
||||
|
||||
@ -2670,6 +2679,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
*/
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
|
||||
return -ENOTSUPP;
|
||||
/* BPF helpers that invoke callback subprogs are
|
||||
* equivalent to BPF_PSEUDO_CALL above
|
||||
*/
|
||||
if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
|
||||
return -ENOTSUPP;
|
||||
/* regular helper call sets R0 */
|
||||
*reg_mask &= ~1;
|
||||
if (*reg_mask & 0x3f) {
|
||||
@ -2774,8 +2788,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
|
||||
|
||||
/* big hammer: mark all scalars precise in this path.
|
||||
* pop_stack may still get !precise scalars.
|
||||
* We also skip current state and go straight to first parent state,
|
||||
* because precision markings in current non-checkpointed state are
|
||||
* not needed. See why in the comment in __mark_chain_precision below.
|
||||
*/
|
||||
for (; st; st = st->parent)
|
||||
for (st = st->parent; st; st = st->parent) {
|
||||
for (i = 0; i <= st->curframe; i++) {
|
||||
func = st->frame[i];
|
||||
for (j = 0; j < BPF_REG_FP; j++) {
|
||||
@ -2793,8 +2810,121 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
|
||||
reg->precise = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
|
||||
{
|
||||
struct bpf_func_state *func;
|
||||
struct bpf_reg_state *reg;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i <= st->curframe; i++) {
|
||||
func = st->frame[i];
|
||||
for (j = 0; j < BPF_REG_FP; j++) {
|
||||
reg = &func->regs[j];
|
||||
if (reg->type != SCALAR_VALUE)
|
||||
continue;
|
||||
reg->precise = false;
|
||||
}
|
||||
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
|
||||
if (!is_spilled_reg(&func->stack[j]))
|
||||
continue;
|
||||
reg = &func->stack[j].spilled_ptr;
|
||||
if (reg->type != SCALAR_VALUE)
|
||||
continue;
|
||||
reg->precise = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* __mark_chain_precision() backtracks BPF program instruction sequence and
|
||||
* chain of verifier states making sure that register *regno* (if regno >= 0)
|
||||
* and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
|
||||
* SCALARS, as well as any other registers and slots that contribute to
|
||||
* a tracked state of given registers/stack slots, depending on specific BPF
|
||||
* assembly instructions (see backtrack_insns() for exact instruction handling
|
||||
* logic). This backtracking relies on recorded jmp_history and is able to
|
||||
* traverse entire chain of parent states. This process ends only when all the
|
||||
* necessary registers/slots and their transitive dependencies are marked as
|
||||
* precise.
|
||||
*
|
||||
* One important and subtle aspect is that precise marks *do not matter* in
|
||||
* the currently verified state (current state). It is important to understand
|
||||
* why this is the case.
|
||||
*
|
||||
* First, note that current state is the state that is not yet "checkpointed",
|
||||
* i.e., it is not yet put into env->explored_states, and it has no children
|
||||
* states as well. It's ephemeral, and can end up either a) being discarded if
|
||||
* compatible explored state is found at some point or BPF_EXIT instruction is
|
||||
* reached or b) checkpointed and put into env->explored_states, branching out
|
||||
* into one or more children states.
|
||||
*
|
||||
* In the former case, precise markings in current state are completely
|
||||
* ignored by state comparison code (see regsafe() for details). Only
|
||||
* checkpointed ("old") state precise markings are important, and if old
|
||||
* state's register/slot is precise, regsafe() assumes current state's
|
||||
* register/slot as precise and checks value ranges exactly and precisely. If
|
||||
* states turn out to be compatible, current state's necessary precise
|
||||
* markings and any required parent states' precise markings are enforced
|
||||
* after the fact with propagate_precision() logic, after the fact. But it's
|
||||
* important to realize that in this case, even after marking current state
|
||||
* registers/slots as precise, we immediately discard current state. So what
|
||||
* actually matters is any of the precise markings propagated into current
|
||||
* state's parent states, which are always checkpointed (due to b) case above).
|
||||
* As such, for scenario a) it doesn't matter if current state has precise
|
||||
* markings set or not.
|
||||
*
|
||||
* Now, for the scenario b), checkpointing and forking into child(ren)
|
||||
* state(s). Note that before current state gets to checkpointing step, any
|
||||
* processed instruction always assumes precise SCALAR register/slot
|
||||
* knowledge: if precise value or range is useful to prune jump branch, BPF
|
||||
* verifier takes this opportunity enthusiastically. Similarly, when
|
||||
* register's value is used to calculate offset or memory address, exact
|
||||
* knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
|
||||
* what we mentioned above about state comparison ignoring precise markings
|
||||
* during state comparison, BPF verifier ignores and also assumes precise
|
||||
* markings *at will* during instruction verification process. But as verifier
|
||||
* assumes precision, it also propagates any precision dependencies across
|
||||
* parent states, which are not yet finalized, so can be further restricted
|
||||
* based on new knowledge gained from restrictions enforced by their children
|
||||
* states. This is so that once those parent states are finalized, i.e., when
|
||||
* they have no more active children state, state comparison logic in
|
||||
* is_state_visited() would enforce strict and precise SCALAR ranges, if
|
||||
* required for correctness.
|
||||
*
|
||||
* To build a bit more intuition, note also that once a state is checkpointed,
|
||||
* the path we took to get to that state is not important. This is crucial
|
||||
* property for state pruning. When state is checkpointed and finalized at
|
||||
* some instruction index, it can be correctly and safely used to "short
|
||||
* circuit" any *compatible* state that reaches exactly the same instruction
|
||||
* index. I.e., if we jumped to that instruction from a completely different
|
||||
* code path than original finalized state was derived from, it doesn't
|
||||
* matter, current state can be discarded because from that instruction
|
||||
* forward having a compatible state will ensure we will safely reach the
|
||||
* exit. States describe preconditions for further exploration, but completely
|
||||
* forget the history of how we got here.
|
||||
*
|
||||
* This also means that even if we needed precise SCALAR range to get to
|
||||
* finalized state, but from that point forward *that same* SCALAR register is
|
||||
* never used in a precise context (i.e., it's precise value is not needed for
|
||||
* correctness), it's correct and safe to mark such register as "imprecise"
|
||||
* (i.e., precise marking set to false). This is what we rely on when we do
|
||||
* not set precise marking in current state. If no child state requires
|
||||
* precision for any given SCALAR register, it's safe to dictate that it can
|
||||
* be imprecise. If any child state does require this register to be precise,
|
||||
* we'll mark it precise later retroactively during precise markings
|
||||
* propagation from child state to parent states.
|
||||
*
|
||||
* Skipping precise marking setting in current state is a mild version of
|
||||
* relying on the above observation. But we can utilize this property even
|
||||
* more aggressively by proactively forgetting any precise marking in the
|
||||
* current state (which we inherited from the parent state), right before we
|
||||
* checkpoint it and branch off into new child state. This is done by
|
||||
* mark_all_scalars_imprecise() to hopefully get more permissive and generic
|
||||
* finalized states which help in short circuiting more future states.
|
||||
*/
|
||||
static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
|
||||
int spi)
|
||||
{
|
||||
@ -2812,6 +2942,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
if (!env->bpf_capable)
|
||||
return 0;
|
||||
|
||||
/* Do sanity checks against current state of register and/or stack
|
||||
* slot, but don't set precise flag in current state, as precision
|
||||
* tracking in the current state is unnecessary.
|
||||
*/
|
||||
func = st->frame[frame];
|
||||
if (regno >= 0) {
|
||||
reg = &func->regs[regno];
|
||||
@ -2819,11 +2953,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
WARN_ONCE(1, "backtracing misuse");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!reg->precise)
|
||||
new_marks = true;
|
||||
else
|
||||
reg_mask = 0;
|
||||
reg->precise = true;
|
||||
new_marks = true;
|
||||
}
|
||||
|
||||
while (spi >= 0) {
|
||||
@ -2836,11 +2966,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
stack_mask = 0;
|
||||
break;
|
||||
}
|
||||
if (!reg->precise)
|
||||
new_marks = true;
|
||||
else
|
||||
stack_mask = 0;
|
||||
reg->precise = true;
|
||||
new_marks = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2848,12 +2974,42 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
|
||||
return 0;
|
||||
if (!reg_mask && !stack_mask)
|
||||
return 0;
|
||||
|
||||
for (;;) {
|
||||
DECLARE_BITMAP(mask, 64);
|
||||
u32 history = st->jmp_history_cnt;
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL2)
|
||||
verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
|
||||
|
||||
if (last_idx < 0) {
|
||||
/* we are at the entry into subprog, which
|
||||
* is expected for global funcs, but only if
|
||||
* requested precise registers are R1-R5
|
||||
* (which are global func's input arguments)
|
||||
*/
|
||||
if (st->curframe == 0 &&
|
||||
st->frame[0]->subprogno > 0 &&
|
||||
st->frame[0]->callsite == BPF_MAIN_FUNC &&
|
||||
stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
|
||||
bitmap_from_u64(mask, reg_mask);
|
||||
for_each_set_bit(i, mask, 32) {
|
||||
reg = &st->frame[0]->regs[i];
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
reg_mask &= ~(1u << i);
|
||||
continue;
|
||||
}
|
||||
reg->precise = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
|
||||
st->frame[0]->subprogno, reg_mask, stack_mask);
|
||||
WARN_ONCE(1, "verifier backtracking bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = last_idx;;) {
|
||||
if (skip_first) {
|
||||
err = 0;
|
||||
@ -4288,16 +4444,17 @@ static int update_stack_depth(struct bpf_verifier_env *env,
|
||||
* Since recursion is prevented by check_cfg() this algorithm
|
||||
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
|
||||
*/
|
||||
static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
|
||||
{
|
||||
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
|
||||
struct bpf_subprog_info *subprog = env->subprog_info;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
int depth = 0, frame = 0, i, subprog_end;
|
||||
bool tail_call_reachable = false;
|
||||
int ret_insn[MAX_CALL_FRAMES];
|
||||
int ret_prog[MAX_CALL_FRAMES];
|
||||
int j;
|
||||
|
||||
i = subprog[idx].start;
|
||||
process_func:
|
||||
/* protect against potential stack overflow that might happen when
|
||||
* bpf2bpf calls get combined with tailcalls. Limit the caller's stack
|
||||
@ -4336,7 +4493,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
continue_func:
|
||||
subprog_end = subprog[idx + 1].start;
|
||||
for (; i < subprog_end; i++) {
|
||||
int next_insn;
|
||||
int next_insn, sidx;
|
||||
|
||||
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
|
||||
continue;
|
||||
@ -4346,14 +4503,14 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
|
||||
/* find the callee */
|
||||
next_insn = i + insn[i].imm + 1;
|
||||
idx = find_subprog(env, next_insn);
|
||||
if (idx < 0) {
|
||||
sidx = find_subprog(env, next_insn);
|
||||
if (sidx < 0) {
|
||||
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
|
||||
next_insn);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (subprog[idx].is_async_cb) {
|
||||
if (subprog[idx].has_tail_call) {
|
||||
if (subprog[sidx].is_async_cb) {
|
||||
if (subprog[sidx].has_tail_call) {
|
||||
verbose(env, "verifier bug. subprog has tail_call and async cb\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -4362,6 +4519,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
continue;
|
||||
}
|
||||
i = next_insn;
|
||||
idx = sidx;
|
||||
|
||||
if (subprog[idx].has_tail_call)
|
||||
tail_call_reachable = true;
|
||||
@ -4397,6 +4555,22 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
goto continue_func;
|
||||
}
|
||||
|
||||
static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_subprog_info *si = env->subprog_info;
|
||||
int ret;
|
||||
|
||||
for (int i = 0; i < env->subprog_cnt; i++) {
|
||||
if (!i || si[i].is_async_cb) {
|
||||
ret = check_max_stack_depth_subprog(env, i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, int idx)
|
||||
@ -6714,6 +6888,10 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *callee,
|
||||
int insn_idx);
|
||||
|
||||
static int set_callee_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee, int insn_idx);
|
||||
|
||||
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx, int subprog,
|
||||
set_callee_state_fn set_callee_state_cb)
|
||||
@ -6764,6 +6942,16 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
}
|
||||
}
|
||||
|
||||
/* set_callee_state is used for direct subprog calls, but we are
|
||||
* interested in validating only BPF helpers that can call subprogs as
|
||||
* callbacks
|
||||
*/
|
||||
if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
|
||||
verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == 0 &&
|
||||
insn->imm == BPF_FUNC_timer_set_callback) {
|
||||
@ -11592,7 +11780,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
if (env->explore_alu_limits)
|
||||
return false;
|
||||
if (rcur->type == SCALAR_VALUE) {
|
||||
if (!rold->precise && !rcur->precise)
|
||||
if (!rold->precise)
|
||||
return true;
|
||||
/* new val must satisfy old val knowledge */
|
||||
return range_within(rold, rcur) &&
|
||||
@ -12141,6 +12329,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
||||
env->prev_jmps_processed = env->jmps_processed;
|
||||
env->prev_insn_processed = env->insn_processed;
|
||||
|
||||
/* forget precise markings we inherited, see __mark_chain_precision */
|
||||
if (env->bpf_capable)
|
||||
mark_all_scalars_imprecise(env, cur);
|
||||
|
||||
/* add new state to the head of linked list */
|
||||
new = &new_sl->state;
|
||||
err = copy_verifier_state(new, cur);
|
||||
@ -14695,6 +14887,8 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
BPF_MAIN_FUNC /* callsite */,
|
||||
0 /* frameno */,
|
||||
subprog);
|
||||
state->first_insn_idx = env->subprog_info[subprog].start;
|
||||
state->last_insn_idx = -1;
|
||||
|
||||
regs = state->frame[state->curframe]->regs;
|
||||
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
|
||||
|
@ -3777,7 +3777,7 @@ static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
|
||||
}
|
||||
|
||||
psi = cgroup_psi(cgrp);
|
||||
new = psi_trigger_create(psi, buf, res);
|
||||
new = psi_trigger_create(psi, buf, res, of->file, of);
|
||||
if (IS_ERR(new)) {
|
||||
cgroup_put(cgrp);
|
||||
return PTR_ERR(new);
|
||||
|
@ -174,11 +174,10 @@ static bool cleanup_symbol_name(char *s)
|
||||
* LLVM appends various suffixes for local functions and variables that
|
||||
* must be promoted to global scope as part of LTO. This can break
|
||||
* hooking of static functions with kprobes. '.' is not a valid
|
||||
* character in an identifier in C. Suffixes observed:
|
||||
* character in an identifier in C. Suffixes only in LLVM LTO observed:
|
||||
* - foo.llvm.[0-9a-f]+
|
||||
* - foo.[0-9a-f]+
|
||||
*/
|
||||
res = strchr(s, '.');
|
||||
res = strstr(s, ".llvm.");
|
||||
if (res) {
|
||||
*res = '\0';
|
||||
return true;
|
||||
@ -187,26 +186,90 @@ static bool cleanup_symbol_name(char *s)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int compare_symbol_name(const char *name, char *namebuf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = strcmp(name, namebuf);
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kallsyms_lookup_names(const char *name,
|
||||
unsigned int *start,
|
||||
unsigned int *end)
|
||||
{
|
||||
int ret;
|
||||
int low, mid, high;
|
||||
unsigned int seq, off;
|
||||
char namebuf[KSYM_NAME_LEN];
|
||||
|
||||
low = 0;
|
||||
high = kallsyms_num_syms - 1;
|
||||
|
||||
while (low <= high) {
|
||||
mid = low + (high - low) / 2;
|
||||
seq = kallsyms_seqs_of_names[mid];
|
||||
off = get_symbol_offset(seq);
|
||||
kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
|
||||
ret = compare_symbol_name(name, namebuf);
|
||||
if (ret > 0)
|
||||
low = mid + 1;
|
||||
else if (ret < 0)
|
||||
high = mid - 1;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (low > high)
|
||||
return -ESRCH;
|
||||
|
||||
low = mid;
|
||||
while (low) {
|
||||
seq = kallsyms_seqs_of_names[low - 1];
|
||||
off = get_symbol_offset(seq);
|
||||
kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
|
||||
if (compare_symbol_name(name, namebuf))
|
||||
break;
|
||||
low--;
|
||||
}
|
||||
*start = low;
|
||||
|
||||
if (end) {
|
||||
high = mid;
|
||||
while (high < kallsyms_num_syms - 1) {
|
||||
seq = kallsyms_seqs_of_names[high + 1];
|
||||
off = get_symbol_offset(seq);
|
||||
kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
|
||||
if (compare_symbol_name(name, namebuf))
|
||||
break;
|
||||
high++;
|
||||
}
|
||||
*end = high;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Lookup the address for this symbol. Returns 0 if not found. */
|
||||
unsigned long kallsyms_lookup_name(const char *name)
|
||||
{
|
||||
char namebuf[KSYM_NAME_LEN];
|
||||
unsigned long i;
|
||||
unsigned int off;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
/* Skip the search for empty string. */
|
||||
if (!*name)
|
||||
return 0;
|
||||
|
||||
for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
|
||||
off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
|
||||
ret = kallsyms_lookup_names(name, &i, NULL);
|
||||
if (!ret)
|
||||
return kallsyms_sym_address(kallsyms_seqs_of_names[i]);
|
||||
|
||||
if (strcmp(namebuf, name) == 0)
|
||||
return kallsyms_sym_address(i);
|
||||
|
||||
if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0)
|
||||
return kallsyms_sym_address(i);
|
||||
}
|
||||
return module_kallsyms_lookup_name(name);
|
||||
}
|
||||
|
||||
|
@ -26,5 +26,6 @@ extern const char kallsyms_token_table[] __weak;
|
||||
extern const u16 kallsyms_token_index[] __weak;
|
||||
|
||||
extern const unsigned int kallsyms_markers[] __weak;
|
||||
extern const unsigned int kallsyms_seqs_of_names[] __weak;
|
||||
|
||||
#endif // LINUX_KALLSYMS_INTERNAL_H_
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user