Merge ddee5b4b6a ("mptcp: pm: avoid possible UaF when selecting endp") into android12-5.10-lts

Steps on the way to 5.10.226

Change-Id: I4ee8b8e793b6ecb98c7078125d994b46b1165c2e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-09-13 05:14:07 +00:00
commit b22678f8ef
92 changed files with 865 additions and 500 deletions

View File

@ -85,6 +85,17 @@ is already free).
Should be called from a process context (might sleep).
::
int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
After verifying the owner of the hwspinlock, release a previously acquired
hwspinlock; returns 0 on success, or an appropriate error code on failure
(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
hwspinlock).
Should be called from a process context (might sleep).
::
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);

View File

@ -216,6 +216,7 @@ bool bio_integrity_prep(struct bio *bio)
unsigned int bytes, offset, i;
unsigned int intervals;
blk_status_t status;
gfp_t gfp = GFP_NOIO;
if (!bi)
return true;
@ -238,12 +239,20 @@ bool bio_integrity_prep(struct bio *bio)
if (!bi->profile->generate_fn ||
!(bi->flags & BLK_INTEGRITY_GENERATE))
return true;
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk. For PI this only affects the app tag, but
* for non-integrity metadata it affects the entire metadata
* buffer.
*/
gfp |= __GFP_ZERO;
}
intervals = bio_integrity_intervals(bi, bio_sectors(bio));
/* Allocate kernel buffer for protection data */
len = intervals * bi->tuple_size;
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
buf = kmalloc(len, gfp | q->bounce_gfp);
status = BLK_STS_RESOURCE;
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");

View File

@ -431,8 +431,6 @@ void blk_integrity_unregister(struct gendisk *disk)
if (!bi->profile)
return;
/* ensure all bios are off the integrity workqueue */
blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(bi, 0, sizeof(*bi));
}

View File

@ -5429,8 +5429,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
}
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
if (!dr) {
kfree(host);
goto err_out;
}
devres_add(dev, dr);
dev_set_drvdata(dev, host);

View File

@ -38,7 +38,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20
@ -1321,8 +1321,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
return regmap_update_bits(regmap, PLL_USER_CTL(pll),
PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
val << PLL_POST_DIV_SHIFT);
PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
val << pll->post_div_shift);
}
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {

View File

@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
res.clock = clock;
return res;
}

View File

@ -1626,6 +1626,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
(u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
if (i == 0)
continue;
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
}

View File

@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
struct amdgpu_firmware_info *ucode;
id = fw_type_convert(cgs_device, type);
if (id >= AMDGPU_UCODE_ID_MAXIMUM)
return -EINVAL;
ucode = &adev->firmware.ucode[id];
if (ucode->fw == NULL)
return -EINVAL;

View File

@ -260,7 +260,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
if (!ring->no_scheduler) {
if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
hw_ip = ring->funcs->type;
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
@ -368,8 +368,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
int r, i;
uint32_t value, result, early[3];
loff_t i;
int r;
if (*pos & 3 || size & 3)
return -EINVAL;

View File

@ -404,6 +404,8 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
uint64_t retired_page;
uint32_t bp_idx, bp_cnt;
memset(&bp, 0, sizeof(bp));
if (bp_block_size) {
bp_cnt = bp_block_size / sizeof(uint64_t);
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
@ -550,7 +552,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
vf2pf_info, sizeof(*vf2pf_info), 0, 0);
return 0;
}

View File

@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
int fb_channel_number;
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
fb_channel_number = 0;
return df_v1_7_channel_number[fb_channel_number];
}

View File

@ -313,7 +313,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
RAS_CNTLR_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
if (!ras->disable_ras_err_cnt_harvest) {
if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
/*
* clear error status after ras_controller_intr
* according to hw team and count ue number

View File

@ -42,8 +42,6 @@
#define CRAT_OEMTABLEID_LENGTH 8
#define CRAT_RESERVED_LENGTH 6
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
/* Compute Unit flags */
#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */

View File

@ -906,8 +906,7 @@ static void kfd_update_system_properties(void)
dev = list_last_entry(&topology_device_list,
struct kfd_topology_device, list);
if (dev) {
sys_props.platform_id =
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
sys_props.platform_id = dev->oem_id64;
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
sys_props.platform_rev = dev->oem_revision;
}

View File

@ -182,7 +182,10 @@ struct kfd_topology_device {
struct attribute attr_gpuid;
struct attribute attr_name;
struct attribute attr_props;
uint8_t oem_id[CRAT_OEMID_LENGTH];
union {
uint8_t oem_id[CRAT_OEMID_LENGTH];
uint64_t oem_id64;
};
uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
uint32_t oem_revision;
};

View File

@ -3341,7 +3341,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
if (primary_planes > AMDGPU_MAX_PLANES) {
DRM_ERROR("DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
/*
* Initialize primary planes, implicit planes for legacy IOCTLS.

View File

@ -455,7 +455,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* Modify previous watermark range to cover up to max */
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
if (num_valid_sets > 0)
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
}
num_valid_sets++;
}

View File

@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
int pair;
uint16_t odd_coef, even_coef;
if (!filter)
return;
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
for (pair = 0; pair < tap_pairs; pair++) {
even_coef = filter[phase * taps + 2 * pair];

View File

@ -58,7 +58,7 @@ struct gpio_service *dal_gpio_service_create(
struct dc_context *ctx)
{
struct gpio_service *service;
uint32_t index_of_id;
int32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@ -114,7 +114,7 @@ struct gpio_service *dal_gpio_service_create(
return service;
failure_2:
while (index_of_id) {
while (index_of_id > 0) {
--index_of_id;
kfree(service->busyness[index_of_id]);
}
@ -241,6 +241,9 @@ static bool is_pin_busy(
enum gpio_id id,
uint32_t en)
{
if (id == GPIO_ID_UNKNOWN)
return false;
return service->busyness[id][en];
}
@ -249,6 +252,9 @@ static void set_pin_busy(
enum gpio_id id,
uint32_t en)
{
if (id == GPIO_ID_UNKNOWN)
return;
service->busyness[id][en] = true;
}
@ -257,6 +263,9 @@ static void set_pin_free(
enum gpio_id id,
uint32_t en)
{
if (id == GPIO_ID_UNKNOWN)
return;
service->busyness[id][en] = false;
}
@ -265,7 +274,7 @@ enum gpio_result dal_gpio_service_lock(
enum gpio_id id,
uint32_t en)
{
if (!service->busyness[id]) {
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_OPEN_FAILED;
}
@ -279,7 +288,7 @@ enum gpio_result dal_gpio_service_unlock(
enum gpio_id id,
uint32_t en)
{
if (!service->busyness[id]) {
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
ASSERT_CRITICAL(false);
return GPIO_RESULT_OPEN_FAILED;
}

View File

@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
struct i2c_command i2c_command;
uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
uint8_t offset;
struct i2c_payload i2c_payloads[] = {
{ true, 0, 1, &offset },
{ true, 0, 1, 0 },
/* actual hdcp payload, will be filled later, zeroed for now*/
{ 0 }
};
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
return false;
}
offset = hdcp_i2c_offsets[message_info->msg_id];
i2c_payloads[0].data = &offset;
switch (message_info->link) {
case HDCP_LINK_SECONDARY:
i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
struct dc_link *link,
struct hdcp_protection_message *message_info)
{
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
return false;
}
return dpcd_access_helper(
link,
message_info->length,

View File

@ -156,11 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
return MOD_HDCP_STATUS_DDC_FAILURE;
}
if (is_dp_hdcp(hdcp)) {
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
sizeof(hdcp_dpcd_addrs[0]);
if (msg_id >= num_dpcd_addrs)
return MOD_HDCP_STATUS_DDC_FAILURE;
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
@ -175,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
data_offset += cur_size;
}
} else {
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
sizeof(hdcp_i2c_offsets[0]);
if (msg_id >= num_i2c_offsets)
return MOD_HDCP_STATUS_DDC_FAILURE;
success = hdcp->config.ddc.funcs.read_i2c(
hdcp->config.ddc.handle,
HDCP_I2C_ADDR,
@ -219,11 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
uint32_t cur_size = 0;
uint32_t data_offset = 0;
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
return MOD_HDCP_STATUS_DDC_FAILURE;
}
if (is_dp_hdcp(hdcp)) {
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
sizeof(hdcp_dpcd_addrs[0]);
if (msg_id >= num_dpcd_addrs)
return MOD_HDCP_STATUS_DDC_FAILURE;
while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
success = hdcp->config.ddc.funcs.write_dpcd(
@ -239,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
data_offset += cur_size;
}
} else {
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
sizeof(hdcp_i2c_offsets[0]);
if (msg_id >= num_i2c_offsets)
return MOD_HDCP_STATUS_DDC_FAILURE;
hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
memmove(&hdcp->buf[1], buf, buf_len);
success = hdcp->config.ddc.funcs.write_i2c(

View File

@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
{
int result;
unsigned int i;
unsigned int table_entries;
struct pp_power_state *state;
int size;
int size, table_entries;
if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
return 0;
@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
if (hwmgr->hwmgr_func->get_power_state_size == NULL)
return 0;
hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
sizeof(struct pp_power_state);
if (table_entries == 0 || size == 0) {
if (table_entries <= 0 || size == 0) {
pr_warn("Please check whether power state management is supported on this asic\n");
hwmgr->num_ps = 0;
hwmgr->ps_size = 0;
return 0;
}
hwmgr->num_ps = table_entries;
hwmgr->ps_size = size;
hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
if (hwmgr->ps == NULL)

View File

@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
j++;
} else if ((table->mc_reg_address[i].uc_pre_reg_data &
LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
table->mc_reg_table_entry[num_ranges].mc_data[i] =
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
if (i)
table->mc_reg_table_entry[num_ranges].mc_data[i] =
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
}
}
num_ranges++;

View File

@ -5190,7 +5190,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
if (size < 8 && size != 0)
if (size != 8 && size != 0)
return -EINVAL;
/* If only CUSTOM is passed in, use the saved values. Check
* that we actually have a CUSTOM profile by ensuring that

View File

@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
int ret;
if (NULL == table || table->count <= 0)
return -EINVAL;
@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
data->uvd_dpm.soft_min_clk = 0;
data->uvd_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
if (ret)
return ret;
if (level < table->count)
clock = table->entries[level].vclk;
@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
int ret;
if (NULL == table || table->count <= 0)
return -EINVAL;
@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
data->vce_dpm.soft_min_clk = 0;
data->vce_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
if (ret)
return ret;
if (level < table->count)
clock = table->entries[level].ecclk;
@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
int ret;
if (NULL == table || table->count <= 0)
return -EINVAL;
@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
data->acp_dpm.soft_min_clk = 0;
data->acp_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
if (ret)
return ret;
if (level < table->count)
clock = table->entries[level].acpclk;

View File

@ -355,13 +355,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
return 0;
}
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
int i;
uint32_t sub_vendor_id, hw_revision;
uint32_t top32, bottom32;
struct amdgpu_device *adev = hwmgr->adev;
int ret, i;
vega10_initialize_power_tune_defaults(hwmgr);
@ -486,9 +486,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
smum_send_msg_to_smc(hwmgr,
ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetSmuVersion,
&hwmgr->smu_version);
if (ret)
return ret;
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@ -506,10 +509,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_PCC_LIMIT].supported = true;
/* Get the SN to turn into a Unique ID */
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
if (ret)
return ret;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
if (ret)
return ret;
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
return 0;
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@ -883,7 +892,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
vega10_set_features_platform_caps(hwmgr);
vega10_init_dpm_defaults(hwmgr);
result = vega10_init_dpm_defaults(hwmgr);
if (result)
return result;
#ifdef PPLIB_VEGA10_EVV_SUPPORT
/* Get leakage voltage based on leakage ID. */
@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t agc_btc_response;
int ret;
if (data->smu_features[GNLD_ACG].supported) {
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
if (ret)
return ret;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
if (ret)
agc_btc_response = 0;
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
@ -2572,8 +2588,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
}
}
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
PP_ASSERT_WITH_CODE(!result,
"Failed to get voltage table!",
return result);
pp_table->MaxVidStep = voltage_table.max_vid_step;
pp_table->GfxDpmVoltageMode =
@ -3391,13 +3410,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
uint32_t sclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock;
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
uint32_t mclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock;
uint32_t sclk, mclk;
uint32_t i;
if (vega10_ps == NULL)
return -EINVAL;
sclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock;
mclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock;
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@ -3704,6 +3727,9 @@ static int vega10_generate_dpm_level_enable_mask(
cast_const_phw_vega10_power_state(states->pnew_state);
int i;
if (vega10_ps == NULL)
return -EINVAL;
PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
"Attempt to Trim DPM States Failed!",
return -1);
@ -3876,11 +3902,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
uint32_t *query)
{
uint32_t value;
int ret;
if (!query)
return -EINVAL;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
if (ret)
return ret;
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
*query = value << 8;
@ -4633,14 +4662,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0;
int i, ret, now, size = 0, count = 0;
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (ret)
break;
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@ -4656,7 +4687,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
if (ret)
break;
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@ -4667,7 +4700,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.socclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
if (ret)
break;
for (i = 0; i < soc_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@ -4678,8 +4713,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.dcefclk_dpm_key_disabled)
break;
smum_send_msg_to_smc_with_parameter(hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
if (ret)
break;
for (i = 0; i < dcef_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@ -4828,6 +4865,9 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
psa = cast_const_phw_vega10_power_state(pstate1);
psb = cast_const_phw_vega10_power_state(pstate2);
if (psa == NULL || psb == NULL)
return -EINVAL;
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
if (psa->performance_level_count != psb->performance_level_count) {
*equal = false;
@ -4953,6 +4993,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (vega10_ps == NULL)
return -EINVAL;
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock =
@ -5004,6 +5046,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (vega10_ps == NULL)
return -EINVAL;
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock =
@ -5239,6 +5283,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
return;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (vega10_ps == NULL)
return;
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
@ -5261,6 +5308,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (vega10_ps == NULL)
return;
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
@ -5451,6 +5501,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return -EINVAL;
ps = cast_const_phw_vega10_power_state(state);
if (ps == NULL)
return -EINVAL;
i = index > ps->performance_level_count - 1 ?
ps->performance_level_count - 1 : index;

View File

@ -4095,9 +4095,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
if (size == 0 && !data->is_custom_profile_set)
if (size != 10 && size != 0)
return -EINVAL;
if (size < 10 && size != 0)
if (size == 0 && !data->is_custom_profile_set)
return -EINVAL;
result = vega20_get_activity_monitor_coeff(hwmgr,
@ -4159,6 +4161,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
activity_monitor.Fclk_PD_Data_error_coeff = input[8];
activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
break;
default:
return -EINVAL;
}
result = vega20_set_activity_monitor_coeff(hwmgr,

View File

@ -131,13 +131,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
uint64_t *features_enabled)
{
uint32_t enabled_features;
int ret;
if (features_enabled == NULL)
return -EINVAL;
smum_send_msg_to_smc(hwmgr,
ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeatures,
&enabled_features);
if (ret)
return ret;
*features_enabled = enabled_features;
return 0;

View File

@ -318,6 +318,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),

View File

@ -529,6 +529,7 @@ int meson_plane_create(struct meson_drm *priv)
struct meson_plane *meson_plane;
struct drm_plane *plane;
const uint64_t *format_modifiers = format_modifiers_default;
int ret;
meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
GFP_KERNEL);
@ -543,12 +544,16 @@ int meson_plane_create(struct meson_drm *priv)
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
format_modifiers = format_modifiers_afbc_g12a;
drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
format_modifiers,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
ret = drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
format_modifiers,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
if (ret) {
devm_kfree(priv->drm->dev, meson_plane);
return ret;
}
drm_plane_helper_add(plane, &meson_plane_helper_funcs);

View File

@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
/**
* hwspin_lock_bust() - bust a specific hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to bust
* @id: identifier of the remote lock holder, if applicable
*
* This function will bust a hwspinlock that was previously acquired as
* long as the current owner of the lock matches the id given by the caller.
*
* Context: Process context.
*
* Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
* the bust operation fails, and -EOPNOTSUPP if the bust operation is not
* defined for the hwspinlock.
*/
int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
{
if (WARN_ON(!hwlock))
return -EINVAL;
if (!hwlock->bank->ops->bust) {
pr_err("bust operation not defined\n");
return -EOPNOTSUPP;
}
return hwlock->bank->ops->bust(hwlock, id);
}
EXPORT_SYMBOL_GPL(hwspin_lock_bust);
/**
* of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
* @bank: the hwspinlock device bank

View File

@ -21,6 +21,8 @@ struct hwspinlock_device;
* @trylock: make a single attempt to take the lock. returns 0 on
* failure and true on success. may _not_ sleep.
* @unlock: release the lock. always succeed. may _not_ sleep.
* @bust: optional, platform-specific bust handler, called by hwspinlock
* core to bust a specific lock.
* @relax: optional, platform-specific relax handler, called by hwspinlock
* core while spinning on a lock, between two successive
* invocations of @trylock. may _not_ sleep.
@ -28,6 +30,7 @@ struct hwspinlock_device;
struct hwspinlock_ops {
int (*trylock)(struct hwspinlock *lock);
void (*unlock)(struct hwspinlock *lock);
int (*bust)(struct hwspinlock *lock, unsigned int id);
void (*relax)(struct hwspinlock *lock);
};

View File

@ -442,12 +442,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
&res, 0);
if (ret) {
of_node_put(child);
if (ret)
break;
}
}
if (ret && child)
of_node_put(child);
if (!ret)
ret = gicv2m_allocate_domains(parent);
if (ret)

View File

@ -948,16 +948,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
goto error;
}
size = nformats * sizeof(*format) + nframes * sizeof(*frame)
/*
* Allocate memory for the formats, the frames and the intervals,
* plus any required padding to guarantee that everything has the
* correct alignment.
*/
size = nformats * sizeof(*format);
size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
size = ALIGN(size, __alignof__(*interval))
+ nintervals * sizeof(*interval);
format = kzalloc(size, GFP_KERNEL);
if (format == NULL) {
if (!format) {
ret = -ENOMEM;
goto error;
}
frame = (struct uvc_frame *)&format[nformats];
interval = (u32 *)&frame[nframes];
frame = (void *)format + nformats * sizeof(*format);
frame = PTR_ALIGN(frame, __alignof__(*frame));
interval = (void *)frame + nframes * sizeof(*frame);
interval = PTR_ALIGN(interval, __alignof__(*interval));
streaming->format = format;
streaming->nformats = nformats;

View File

@ -2826,8 +2826,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
mmc->max_seg_size = mmc->max_req_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;

View File

@ -236,6 +236,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
{ .compatible = "aspeed,ast2600-sdhci", },
{ }
};
MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
static struct platform_driver aspeed_sdhci_driver = {
.driver = {

View File

@ -755,7 +755,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
disable_irq(spi->irq);
disable_irq_nosync(spi->irq);
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
/* Wait for oscillator startup timer after wake up */

View File

@ -210,7 +210,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
name = dev_name(dev);
snprintf(intr->name, sizeof(intr->name),
"%s-%s-%s", IONIC_DRV_NAME, name, q->name);
"%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
return devm_request_irq(dev, intr->vector, ionic_isr,
0, intr->name, &qcq->napi);

View File

@ -1365,6 +1365,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
{QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@ -1497,7 +1497,7 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
return false;
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int index = vq2rxq(rq->vq);
@ -1508,7 +1508,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
free_old_xmit_skbs(sq, true);
free_old_xmit_skbs(sq, !!budget);
__netif_tx_unlock(txq);
}
@ -1525,7 +1525,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
unsigned int received;
unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
virtnet_poll_cleantx(rq, budget);
received = virtnet_receive(rq, budget, &xdp_xmit);
@ -1598,7 +1598,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
free_old_xmit_skbs(sq, !!budget);
opaque = virtqueue_enable_cb_prepare(sq->vq);

View File

@ -275,8 +275,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
.data = { NULL, },
};
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx))
if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)

View File

@ -72,7 +72,6 @@
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
bool (*d3_debug_enable)(void *ctx);
};

View File

@ -577,11 +577,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_fwrt_fw_running(void *ctx)
{
return iwl_mvm_firmware_running(ctx);
}
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
@ -602,7 +597,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};

View File

@ -250,18 +250,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
static void al_pcie_config_prepare(struct al_pcie *pcie)
static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct pcie_port *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
struct resource_entry *ft;
u32 cfg_control_offset;
struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
if (!ft)
return -ENODEV;
bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
@ -295,6 +301,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
return 0;
}
static int al_pcie_host_init(struct pcie_port *pp)
@ -313,7 +321,9 @@ static int al_pcie_host_init(struct pcie_port *pp)
if (rc)
return rc;
al_pcie_config_prepare(pcie);
rc = al_pcie_config_prepare(pcie);
if (rc)
return rc;
return 0;
}

View File

@ -391,7 +391,7 @@ ucsi_register_displayport(struct ucsi_connector *con,
bool override, int offset,
struct typec_altmode_desc *desc)
{
return NULL;
return typec_port_register_altmode(con->port, desc);
}
static inline void

View File

@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
return 0;
return err;
}
static int tweak_reset_device_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
int err;
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
err = usb_lock_device_for_reset(sdev->udev, NULL);
if (err < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
return err;
}
usb_reset_device(sdev->udev);
err = usb_reset_device(sdev->udev);
usb_unlock_device(sdev->udev);
return 0;
return err;
}
/*
* clear_halt, set_interface, and set_configuration require special tricks.
* Returns 1 if request was tweaked, 0 otherwise.
*/
static void tweak_special_requests(struct urb *urb)
static int tweak_special_requests(struct urb *urb)
{
int err;
if (!urb || !urb->setup_packet)
return;
return 0;
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
return;
return 0;
if (is_clear_halt_cmd(urb))
/* tweak clear_halt */
tweak_clear_halt_cmd(urb);
err = tweak_clear_halt_cmd(urb);
else if (is_set_interface_cmd(urb))
/* tweak set_interface */
tweak_set_interface_cmd(urb);
err = tweak_set_interface_cmd(urb);
else if (is_set_configuration_cmd(urb))
/* tweak set_configuration */
tweak_set_configuration_cmd(urb);
err = tweak_set_configuration_cmd(urb);
else if (is_reset_device_cmd(urb))
tweak_reset_device_cmd(urb);
else
err = tweak_reset_device_cmd(urb);
else {
usbip_dbg_stub_rx("no need to tweak\n");
return 0;
}
return !err;
}
/*
@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
int support_sg = 1;
int np = 0;
int ret, i;
int is_tweaked;
if (pipe == -1)
return;
@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
priv->urbs[i]->pipe = pipe;
priv->urbs[i]->complete = stub_complete;
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urbs[i]);
/*
* all URBs belong to a single PDU, so a global is_tweaked flag is
* enough
*/
is_tweaked = tweak_special_requests(priv->urbs[i]);
masking_bogus_flags(priv->urbs[i]);
}
@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
/* urb is now ready to submit */
for (i = 0; i < priv->num_urbs; i++) {
ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
if (!is_tweaked) {
ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&udev->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urbs[i]);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&udev->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urbs[i]);
/*
* Pessimistic.
* This connection will be discarded.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
break;
}
} else {
/*
* Pessimistic.
* This connection will be discarded.
* An identical URB was already submitted in
* tweak_special_requests(). Skip submitting this URB to not
* duplicate the request.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
break;
priv->urbs[i]->status = 0;
stub_complete(priv->urbs[i]);
}
}

View File

@ -853,10 +853,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto fail;
}
spin_lock(&fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
spin_unlock(&fs_info->trans_lock);
trans->pending_snapshot = pending_snapshot;
ret = btrfs_commit_transaction(trans);
if (ret)

View File

@ -2075,6 +2075,27 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
}
}
/*
* Add a pending snapshot associated with the given transaction handle to the
* respective handle. This must be called after the transaction commit started
* and while holding fs_info->trans_lock.
* This serves to guarantee a caller of btrfs_commit_transaction() that it can
* safely free the pending snapshot pointer in case btrfs_commit_transaction()
* returns an error.
*/
static void add_pending_snapshot(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
if (!trans->pending_snapshot)
return;
lockdep_assert_held(&trans->fs_info->trans_lock);
ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@ -2161,6 +2182,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
spin_lock(&fs_info->trans_lock);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
add_pending_snapshot(trans);
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans);
@ -2243,6 +2266,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&fs_info->trans_lock);
add_pending_snapshot(trans);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,

View File

@ -122,6 +122,8 @@ struct btrfs_trans_handle {
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
/* Set by a task that wants to create a snapshot. */
struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count;
unsigned int type;
/*

View File

@ -493,6 +493,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* A hole? We can safely clear the dirty bit */
if (!buffer_mapped(bh))
clear_buffer_dirty(bh);
/*
* Keeping dirty some buffer we cannot write? Make
* sure to redirty the page. This happens e.g. when
* doing writeout for transaction commit.
*/
if (buffer_dirty(bh) && !PageDirty(page))
redirty_page_for_writepage(wbc, page);
if (io->io_bio)
ext4_io_submit(io);
continue;
@ -500,6 +507,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (buffer_new(bh))
clear_buffer_new(bh);
set_buffer_async_write(bh);
clear_buffer_dirty(bh);
nr_to_submit++;
} while ((bh = bh->b_this_page) != head);
@ -542,7 +550,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
if (buffer_async_write(bh)) {
clear_buffer_async_write(bh);
set_buffer_dirty(bh);
}
bh = bh->b_this_page;
} while (bh != head);
goto unlock;
@ -555,7 +566,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
continue;
io_submit_add_bh(io, inode, page, bounce_page, bh);
nr_submitted++;
clear_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
unlock:

View File

@ -1698,10 +1698,16 @@ __acquires(fi->lock)
fuse_writepage_finish(fm, wpa);
spin_unlock(&fi->lock);
/* After fuse_writepage_finish() aux request list is private */
/* After rb_erase() aux request list is private */
for (aux = wpa->next; aux; aux = next) {
struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
next = aux->next;
aux->next = NULL;
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(aux);
}

View File

@ -79,7 +79,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
if (ret == -ENOSYS) {
fm->fc->no_getxattr = 1;
ret = -EOPNOTSUPP;
@ -141,7 +141,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
}
ret = fuse_simple_request(fm, &args);
if (!ret && !size)
ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
if (ret > 0 && size)
ret = fuse_verify_xattr_list(list, ret);
if (ret == -ENOSYS) {

View File

@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
brelse(bh);
}
/**
* nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
* @nilfs: nilfs object
*/
static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
{
struct nilfs_inode_info *ii, *n;
LIST_HEAD(head);
/* Abandon inodes that have read recovery data */
spin_lock(&nilfs->ns_inode_lock);
list_splice_init(&nilfs->ns_dirty_files, &head);
spin_unlock(&nilfs->ns_inode_lock);
if (list_empty(&head))
return;
set_nilfs_purging(nilfs);
list_for_each_entry_safe(ii, n, &head, i_dirty) {
spin_lock(&nilfs->ns_inode_lock);
list_del_init(&ii->i_dirty);
spin_unlock(&nilfs->ns_inode_lock);
iput(&ii->vfs_inode);
}
clear_nilfs_purging(nilfs);
}
/**
* nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
* @nilfs: nilfs object
@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
if (unlikely(err)) {
nilfs_err(sb, "error %d writing segment for recovery",
err);
goto failed;
goto put_root;
}
nilfs_finish_roll_forward(nilfs, ri);
}
failed:
put_root:
nilfs_put_root(root);
return err;
failed:
nilfs_abort_roll_forward(nilfs);
goto put_root;
}
/**

View File

@ -1833,6 +1833,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
nilfs_abort_logs(&logs, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs);
if (list_empty(&logs))
return; /* if the first segment buffer preparation failed */
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
nilfs_free_incomplete_logs(&logs, nilfs);
@ -2077,7 +2080,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
err = nilfs_segctor_begin_construction(sci, nilfs);
if (unlikely(err))
goto out;
goto failed;
/* Update time stamp */
sci->sc_seg_ctime = ktime_get_real_seconds();
@ -2140,10 +2143,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
return err;
failed_to_write:
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
nilfs_redirty_inodes(&sci->sc_dirty_files);
failed:
if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
nilfs_redirty_inodes(&sci->sc_dirty_files);
if (nilfs_doing_gc())
nilfs_redirty_inodes(&sci->sc_gc_inodes);
nilfs_segctor_abort_construction(sci, nilfs, err);

View File

@ -105,17 +105,13 @@ void fsnotify_sb_delete(struct super_block *sb)
* parent cares. Thus when an event happens on a child it can quickly tell if
* if there is a need to find a parent and send the event to the parent.
*/
void __fsnotify_update_child_dentry_flags(struct inode *inode)
void fsnotify_set_children_dentry_flags(struct inode *inode)
{
struct dentry *alias;
int watched;
if (!S_ISDIR(inode->i_mode))
return;
/* determine if the children should tell inode about their events */
watched = fsnotify_inode_watches_children(inode);
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
@ -131,10 +127,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
continue;
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (watched)
child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
spin_unlock(&alias->d_lock);
@ -142,6 +135,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_unlock(&inode->i_lock);
}
/*
* Lazily clear false positive PARENT_WATCHED flag for child whose parent had
* stopped watching children.
*/
static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
/*
* d_lock is a sufficient barrier to prevent observing a non-watched
* parent state from before the fsnotify_set_children_dentry_flags()
* or fsnotify_update_flags() call that had set PARENT_WATCHED.
*/
if (!fsnotify_inode_watches_children(pinode))
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&dentry->d_lock);
}
/* Are inode/sb/mount interested in parent and name info with this event? */
static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt,
__u32 mask)
@ -210,7 +221,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
p_inode = parent->d_inode;
p_mask = fsnotify_inode_watches_children(p_inode);
if (unlikely(parent_watched && !p_mask))
__fsnotify_update_child_dentry_flags(p_inode);
fsnotify_clear_child_dentry_flag(p_inode, dentry);
/*
* Include parent/name in notification either if some notification

View File

@ -59,7 +59,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
*/
extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
extern void fsnotify_set_children_dentry_flags(struct inode *inode);
/* allocate and destroy and event holder to attach events to notification/access queues */
extern struct fsnotify_event_holder *fsnotify_alloc_event_holder(void);

View File

@ -132,6 +132,24 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
*fsnotify_conn_mask_p(conn) = new_mask;
}
static bool fsnotify_conn_watches_children(
struct fsnotify_mark_connector *conn)
{
if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
return false;
return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
}
static void fsnotify_conn_set_children_dentry_flags(
struct fsnotify_mark_connector *conn)
{
if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
return;
fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
}
/*
* Calculate mask of events for a list of marks. The caller must make sure
* connector and connector->obj cannot disappear under us. Callers achieve
@ -140,15 +158,23 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
*/
void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
{
bool update_children;
if (!conn)
return;
spin_lock(&conn->lock);
update_children = !fsnotify_conn_watches_children(conn);
__fsnotify_recalc_mask(conn);
update_children &= fsnotify_conn_watches_children(conn);
spin_unlock(&conn->lock);
if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
__fsnotify_update_child_dentry_flags(
fsnotify_conn_inode(conn));
/*
* Set children's PARENT_WATCHED flags only if parent started watching.
* When parent stops watching, we clear false positive PARENT_WATCHED
* flags lazily in __fsnotify_parent().
*/
if (update_children)
fsnotify_conn_set_children_dentry_flags(conn);
}
/* Free all connectors queued for freeing once SRCU period ends */

View File

@ -86,6 +86,13 @@ enum {
#define UDF_MAX_LVID_NESTING 1000
enum { UDF_MAX_LINKS = 0xffff };
/*
* We limit filesize to 4TB. This is arbitrary as the on-disk format supports
* more but because the file space is described by a linked list of extents,
* each of which can have at most 1GB, the creation and handling of extents
* gets unusably slow beyond certain point...
*/
#define UDF_MAX_FILESIZE (1ULL << 42)
/* These are the "meat" - everything else is stuffing */
static int udf_fill_super(struct super_block *, void *, int);
@ -2301,7 +2308,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
ret = -ENOMEM;
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_maxbytes = UDF_MAX_FILESIZE;
sb->s_max_links = UDF_MAX_LINKS;
return 0;

View File

@ -771,107 +771,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
* On legacy hierarchies, net_prio and net_cls controllers directly set
* attributes on each sock which can then be tested by the network layer.
* On the default hierarchy, each sock is associated with the cgroup it was
* created in and the networking layer can match the cgroup directly.
*
* To avoid carrying all three cgroup related fields separately in sock,
* sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
* On boot, sock_cgroup_data records the cgroup that the sock was created
* in so that cgroup2 matches can be made; however, once either net_prio or
* net_cls starts being used, the area is overriden to carry prioidx and/or
* classid. The two modes are distinguished by whether the lowest bit is
* set. Clear bit indicates cgroup pointer while set bit prioidx and
* classid.
*
* While userland may start using net_prio or net_cls at any time, once
* either is used, cgroup2 matching no longer works. There is no reason to
* mix the two and this is in line with how legacy and v2 compatibility is
* handled. On mode switch, cgroup references which are already being
* pointed to by socks may be leaked. While this can be remedied by adding
* synchronization around sock_cgroup_data, given that the number of leaked
* cgroups is bound and highly unlikely to be high, this seems to be the
* better trade-off.
* On legacy hierarchies, net_prio and net_cls controllers directly
* set attributes on each sock which can then be tested by the network
* layer. On the default hierarchy, each sock is associated with the
* cgroup it was created in and the networking layer can match the
* cgroup directly.
*/
struct sock_cgroup_data {
union {
#ifdef __LITTLE_ENDIAN
struct {
u8 is_data : 1;
u8 no_refcnt : 1;
u8 unused : 6;
u8 padding;
u16 prioidx;
u32 classid;
} __packed;
#else
struct {
u32 classid;
u16 prioidx;
u8 padding;
u8 unused : 6;
u8 no_refcnt : 1;
u8 is_data : 1;
} __packed;
struct cgroup *cgroup; /* v2 */
#ifdef CONFIG_CGROUP_NET_CLASSID
u32 classid; /* v1 */
#endif
#ifdef CONFIG_CGROUP_NET_PRIO
u16 prioidx; /* v1 */
#endif
u64 val;
};
};
/*
* There's a theoretical window where the following accessors race with
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
#ifdef CONFIG_CGROUP_NET_PRIO
return READ_ONCE(skcd->prioidx);
#else
return 1;
#endif
}
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
#ifdef CONFIG_CGROUP_NET_CLASSID
return READ_ONCE(skcd->classid);
#else
return 0;
#endif
}
/*
* If invoked concurrently, the updaters may clobber each other. The
* caller is responsible for synchronization.
*/
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
return;
if (!(skcd_buf.is_data & 1)) {
skcd_buf.val = 0;
skcd_buf.is_data = 1;
}
skcd_buf.prioidx = prioidx;
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
#ifdef CONFIG_CGROUP_NET_PRIO
WRITE_ONCE(skcd->prioidx, prioidx);
#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
if (sock_cgroup_classid(&skcd_buf) == classid)
return;
if (!(skcd_buf.is_data & 1)) {
skcd_buf.val = 0;
skcd_buf.is_data = 1;
}
skcd_buf.classid = classid;
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
#ifdef CONFIG_CGROUP_NET_CLASSID
WRITE_ONCE(skcd->classid, classid);
#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */

View File

@ -827,33 +827,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
extern spinlock_t cgroup_sk_update_lock;
#endif
void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
unsigned long v;
/*
* @skcd->val is 64bit but the following is safe on 32bit too as we
* just need the lower ulong to be written and read atomically.
*/
v = READ_ONCE(skcd->val);
if (v & 3)
return &cgrp_dfl_root.cgrp;
return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
#else
return (struct cgroup *)(unsigned long)skcd->val;
#endif
return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */

View File

@ -437,12 +437,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
__u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
/* FS_EVENT_ON_CHILD is set if the inode may care */
if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
@ -456,7 +458,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
* d_lock, the following __fsnotify_update_child_dentry_flags call will
* d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/

View File

@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
{
return 0;
}
static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
{
return 0;

View File

@ -991,7 +991,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
struct acpi_resource;
struct acpi_resource_i2c_serialbus;
#if IS_ENABLED(CONFIG_ACPI)
#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
u32 i2c_acpi_find_bus_speed(struct device *dev);

View File

@ -175,7 +175,6 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
u8 link_type;
u8 type;
u8 val[16];
};
@ -185,7 +184,6 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@ -200,7 +198,6 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
u8 link_type;
u8 val[16];
};
@ -208,8 +205,6 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;

View File

@ -6626,74 +6626,51 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
DEFINE_SPINLOCK(cgroup_sk_update_lock);
static bool cgroup_sk_alloc_disabled __read_mostly;
void cgroup_sk_alloc_disable(void)
{
if (cgroup_sk_alloc_disabled)
return;
pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
cgroup_sk_alloc_disabled = true;
}
#else
#define cgroup_sk_alloc_disabled false
#endif
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
if (cgroup_sk_alloc_disabled) {
skcd->no_refcnt = 1;
return;
}
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt())
return;
struct cgroup *cgroup;
rcu_read_lock();
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt()) {
cgroup = &cgrp_dfl_root.cgrp;
cgroup_get(cgroup);
goto out;
}
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
skcd->val = (unsigned long)cset->dfl_cgrp;
cgroup_bpf_get(cset->dfl_cgrp);
cgroup = cset->dfl_cgrp;
break;
}
cpu_relax();
}
out:
skcd->cgroup = cgroup;
cgroup_bpf_get(cgroup);
rcu_read_unlock();
}
void cgroup_sk_clone(struct sock_cgroup_data *skcd)
{
if (skcd->val) {
if (skcd->no_refcnt)
return;
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(sock_cgroup_ptr(skcd));
cgroup_bpf_get(sock_cgroup_ptr(skcd));
}
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(cgrp);
cgroup_bpf_get(cgrp);
}
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
if (skcd->no_refcnt)
return;
cgroup_bpf_put(cgrp);
cgroup_put(cgrp);
}

View File

@ -447,8 +447,11 @@ void debug_dma_dump_mappings(struct device *dev)
* dma_active_cacheline entry to track per event. dma_map_sg(), on the
* other hand, consumes a single dma_debug_entry, but inserts 'nents'
* entries into the tree.
*
* Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
* up right back in the DMA debugging code, leading to a deadlock.
*/
static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
static DEFINE_SPINLOCK(radix_lock);
#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)

View File

@ -1240,7 +1240,7 @@ static void show_rcu_tasks_trace_gp_kthread(void)
{
char buf[64];
sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
snprintf(buf, sizeof(buf), "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
data_race(n_heavy_reader_ofl_updates),
data_race(n_heavy_reader_updates),
data_race(n_heavy_reader_attempts));

View File

@ -3686,6 +3686,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
break;
entries++;
ring_buffer_iter_advance(buf_iter);
/* This could be a big loop */
cond_resched();
}
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;

View File

@ -2370,16 +2370,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
key_count);
for (i = 0; i < key_count; i++) {
struct mgmt_link_key_info *key = &cp->keys[i];
/* Considering SMP over BREDR/LE, there is no need to check addr_type */
if (key->type > 0x08)
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_LOAD_LINK_KEYS,
MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@ -2404,6 +2394,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
if (key->addr.type != BDADDR_BREDR) {
bt_dev_warn(hdev,
"Invalid link address type %u for %pMR",
key->addr.type, &key->addr.bdaddr);
continue;
}
if (key->type > 0x08) {
bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
key->type, &key->addr.bdaddr);
continue;
}
/* Always ignore debug keys and require a new pairing if
* the user wants to use them.
*/
@ -5919,7 +5922,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i];
u8 addr_type = le_addr_type(irk->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_IRK,
@ -5929,12 +5931,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
continue;
}
/* When using SMP over BR/EDR, the addr type should be set to BREDR */
if (irk->addr.type == BDADDR_BREDR)
addr_type = BDADDR_BREDR;
hci_add_irk(hdev, &irk->addr.bdaddr,
addr_type, irk->val,
le_addr_type(irk->addr.type), irk->val,
BDADDR_ANY);
}
@ -5999,15 +5997,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "key_count %u", key_count);
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
if (!ltk_is_valid(key))
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_LOAD_LONG_TERM_KEYS,
MGMT_STATUS_INVALID_PARAMS);
}
hci_dev_lock(hdev);
hci_smp_ltks_clear(hdev);
@ -6015,7 +6004,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, authenticated;
u8 addr_type = le_addr_type(key->addr.type);
if (hci_is_blocked_key(hdev,
HCI_BLOCKED_KEY_TYPE_LTK,
@ -6025,6 +6013,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
if (!ltk_is_valid(key)) {
bt_dev_warn(hdev, "Invalid LTK for %pMR",
&key->addr.bdaddr);
continue;
}
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
@ -6050,12 +6044,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue;
}
/* When using SMP over BR/EDR, the addr type should be set to BREDR */
if (key->addr.type == BDADDR_BREDR)
addr_type = BDADDR_BREDR;
hci_add_ltk(hdev, &key->addr.bdaddr,
addr_type, type, authenticated,
le_addr_type(key->addr.type), type, authenticated,
key->val, key->enc_size, key->ediv, key->rand);
}
@ -8058,7 +8048,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
@ -8109,7 +8099,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@ -8138,7 +8128,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
memcpy(ev.irk.val, irk->val, sizeof(irk->val));
mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
@ -8167,7 +8157,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
ev.key.type = csrk->type;
memcpy(ev.key.val, csrk->val, sizeof(csrk->val));

View File

@ -1060,7 +1060,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->remote_irk) {
smp->remote_irk->link_type = hcon->type;
mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the
@ -1075,28 +1074,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
}
if (smp->csrk) {
smp->csrk->link_type = hcon->type;
smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->csrk, persistent);
}
if (smp->responder_csrk) {
smp->responder_csrk->link_type = hcon->type;
smp->responder_csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
}
if (smp->ltk) {
smp->ltk->link_type = hcon->type;
smp->ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->ltk, persistent);
}
if (smp->responder_ltk) {
smp->responder_ltk->link_type = hcon->type;
smp->responder_ltk->bdaddr_type = hcon->dst_type;
bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
@ -1116,8 +1111,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
smp->link_key, type, 0, &persistent);
if (key) {
key->link_type = hcon->type;
key->bdaddr_type = hcon->dst_type;
mgmt_new_link_key(hdev, key, persistent);
/* Don't keep debug keys around if the relevant

View File

@ -72,11 +72,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file, &err);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
if (sock)
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
spin_unlock(&cgroup_sk_update_lock);
}
if (--ctx->batch == 0) {
ctx->batch = UPDATE_CLASSID_BATCH;
return n + 1;
@ -122,8 +119,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
struct css_task_iter it;
struct task_struct *p;
cgroup_sk_alloc_disable();
cs->classid = (u32)value;
css_task_iter_start(css, 0, &it);

View File

@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
if (!dev)
return -ENODEV;
cgroup_sk_alloc_disable();
rtnl_lock();
ret = netprio_set_prio(of_css(of), dev, prio);
@ -222,12 +220,10 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
if (sock)
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
(unsigned long)v);
spin_unlock(&cgroup_sk_update_lock);
}
return 0;
}
@ -236,8 +232,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p;
struct cgroup_subsys_state *css;
cgroup_sk_alloc_disable();
cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->id;

View File

@ -653,6 +653,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
if (err)
goto unlock;
}
sock_set_flag(sk, SOCK_RCU_FREE);
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
@ -660,7 +661,6 @@ int __inet_hash(struct sock *sk, struct sock *osk)
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
inet_hash2(hashinfo, sk);
ilb->count++;
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
spin_unlock(&ilb->lock);

View File

@ -108,6 +108,7 @@ int ila_lwt_init(void);
void ila_lwt_fini(void);
int ila_xlat_init_net(struct net *net);
void ila_xlat_pre_exit_net(struct net *net);
void ila_xlat_exit_net(struct net *net);
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);

View File

@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net)
return err;
}
static __net_exit void ila_pre_exit_net(struct net *net)
{
ila_xlat_pre_exit_net(net);
}
static __net_exit void ila_exit_net(struct net *net)
{
ila_xlat_exit_net(net);
@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
static struct pernet_operations ila_net_ops = {
.init = ila_init_net,
.pre_exit = ila_pre_exit_net,
.exit = ila_exit_net,
.id = &ila_net_id,
.size = sizeof(struct ila_net),

View File

@ -616,6 +616,15 @@ int ila_xlat_init_net(struct net *net)
return 0;
}
void ila_xlat_pre_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
if (ilan->xlat.hooks_registered)
nf_unregister_net_hooks(net, ila_nf_hook_ops,
ARRAY_SIZE(ila_nf_hook_ops));
}
void ila_xlat_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@ -623,10 +632,6 @@ void ila_xlat_exit_net(struct net *net)
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
free_bucket_spinlocks(ilan->xlat.locks);
if (ilan->xlat.hooks_registered)
nf_unregister_net_hooks(net, ila_nf_hook_ops,
ARRAY_SIZE(ila_nf_hook_ops));
}
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)

View File

@ -96,7 +96,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
}
pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d\n",
version, flags, opsize, mp_opt->sndr_key,
mp_opt->rcvr_key, mp_opt->data_len);
break;
@ -110,7 +110,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->token, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
@ -120,20 +120,20 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 8;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->thmac, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
ptr += 2;
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
pr_debug("MP_JOIN hmac");
pr_debug("MP_JOIN hmac\n");
} else {
mp_opt->mp_join = 0;
}
break;
case MPTCPOPT_DSS:
pr_debug("DSS");
pr_debug("DSS\n");
ptr++;
/* we must clear 'mpc_map' be able to detect MP_CAPABLE
@ -148,7 +148,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
mp_opt->data_fin, mp_opt->dsn64,
mp_opt->use_map, mp_opt->ack64,
mp_opt->use_ack);
@ -189,7 +189,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 4;
}
pr_debug("data_ack=%llu", mp_opt->data_ack);
pr_debug("data_ack=%llu\n", mp_opt->data_ack);
}
if (mp_opt->use_map) {
@ -207,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
pr_debug("data_seq=%llu subflow_seq=%u data_len=%u\n",
mp_opt->data_seq, mp_opt->subflow_seq,
mp_opt->data_len);
}
@ -242,7 +242,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->add_addr = 1;
mp_opt->addr_id = *ptr++;
pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
pr_debug("ADD_ADDR: id=%d, echo=%d\n", mp_opt->addr_id, mp_opt->echo);
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
ptr += 4;
@ -277,7 +277,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->rm_addr = 1;
mp_opt->rm_id = *ptr++;
pr_debug("RM_ADDR: id=%d", mp_opt->rm_id);
pr_debug("RM_ADDR: id=%d\n", mp_opt->rm_id);
break;
default:
@ -344,7 +344,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
subflow->local_nonce);
opts->suboptions = OPTION_MPTCP_MPJ_SYN;
opts->join_id = subflow->local_id;
@ -436,7 +436,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
else
*size = TCPOLEN_MPTCP_MPC_ACK;
pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
subflow, subflow->local_key, subflow->remote_key,
data_len);
@ -445,7 +445,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
opts->suboptions = OPTION_MPTCP_MPJ_ACK;
memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
*size = TCPOLEN_MPTCP_MPJ_ACK;
pr_debug("subflow=%p", subflow);
pr_debug("subflow=%p\n", subflow);
schedule_3rdack_retransmission(sk);
return true;
@ -619,7 +619,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk,
}
}
#endif
pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo);
pr_debug("addr_id=%d, ahmac=%llu, echo=%d\n", opts->addr_id, opts->ahmac, echo);
return true;
}
@ -644,7 +644,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
opts->suboptions |= OPTION_MPTCP_RM_ADDR;
opts->rm_id = rm_id;
pr_debug("rm_id=%d", opts->rm_id);
pr_debug("rm_id=%d\n", opts->rm_id);
return true;
}
@ -703,7 +703,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
opts->sndr_key = subflow_req->local_key;
*size = TCPOLEN_MPTCP_MPC_SYNACK;
pr_debug("subflow_req=%p, local_key=%llu",
pr_debug("subflow_req=%p, local_key=%llu\n",
subflow_req, subflow_req->local_key);
return true;
} else if (subflow_req->mp_join) {
@ -712,7 +712,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
subflow_req, opts->backup, opts->join_id,
opts->thmac, opts->nonce);
*size = TCPOLEN_MPTCP_MPJ_SYNACK;

View File

@ -16,7 +16,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo)
{
pr_debug("msk=%p, local_id=%d", msk, addr->id);
pr_debug("msk=%p, local_id=%d\n", msk, addr->id);
msk->pm.local = *addr;
WRITE_ONCE(msk->pm.add_addr_echo, echo);
@ -26,7 +26,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
{
pr_debug("msk=%p, local_id=%d", msk, local_id);
pr_debug("msk=%p, local_id=%d\n", msk, local_id);
msk->pm.rm_id = local_id;
WRITE_ONCE(msk->pm.rm_addr_signal, true);
@ -35,7 +35,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
{
pr_debug("msk=%p, local_id=%d", msk, local_id);
pr_debug("msk=%p, local_id=%d\n", msk, local_id);
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, local_id);
@ -49,7 +49,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
WRITE_ONCE(pm->server_side, server_side);
}
@ -59,7 +59,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
struct mptcp_pm_data *pm = &msk->pm;
int ret = 0;
pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
pm->subflows_max, READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
@ -83,7 +83,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
enum mptcp_pm_status new_status)
{
pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
BIT(new_status));
if (msk->pm.status & BIT(new_status))
return false;
@ -98,7 +98,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
/* try to avoid acquiring the lock below */
if (!READ_ONCE(pm->work_pending))
@ -114,7 +114,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
void mptcp_pm_connection_closed(struct mptcp_sock *msk)
{
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
}
void mptcp_pm_subflow_established(struct mptcp_sock *msk,
@ -122,7 +122,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
if (!READ_ONCE(pm->work_pending))
return;
@ -137,7 +137,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
{
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
}
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
@ -145,7 +145,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
READ_ONCE(pm->accept_addr));
spin_lock_bh(&pm->lock);
@ -162,7 +162,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
{
struct mptcp_pm_data *pm = &msk->pm;
pr_debug("msk=%p remote_id=%d", msk, rm_id);
pr_debug("msk=%p remote_id=%d\n", msk, rm_id);
spin_lock_bh(&pm->lock);
mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);

View File

@ -127,11 +127,13 @@ static bool lookup_subflow_by_saddr(const struct list_head *list,
return false;
}
static struct mptcp_pm_addr_entry *
static bool
select_local_address(const struct pm_nl_pernet *pernet,
struct mptcp_sock *msk)
struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *new_entry)
{
struct mptcp_pm_addr_entry *entry, *ret = NULL;
struct mptcp_pm_addr_entry *entry;
bool found = false;
rcu_read_lock();
spin_lock_bh(&msk->join_list_lock);
@ -145,19 +147,23 @@ select_local_address(const struct pm_nl_pernet *pernet,
if (entry->addr.family == ((struct sock *)msk)->sk_family &&
!lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
!lookup_subflow_by_saddr(&msk->join_list, &entry->addr)) {
ret = entry;
*new_entry = *entry;
found = true;
break;
}
}
spin_unlock_bh(&msk->join_list_lock);
rcu_read_unlock();
return ret;
return found;
}
static struct mptcp_pm_addr_entry *
select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
static bool
select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos,
struct mptcp_pm_addr_entry *new_entry)
{
struct mptcp_pm_addr_entry *entry, *ret = NULL;
struct mptcp_pm_addr_entry *entry;
bool found = false;
int i = 0;
rcu_read_lock();
@ -170,12 +176,14 @@ select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
if (i++ == pos) {
ret = entry;
*new_entry = *entry;
found = true;
break;
}
}
rcu_read_unlock();
return ret;
return found;
}
static void check_work_pending(struct mptcp_sock *msk)
@ -206,7 +214,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
if (!msk)
return;
@ -225,7 +233,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal(msk)) {
pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
mptcp_pm_announce_addr(msk, &entry->addr, false);
entry->retrans_times++;
}
@ -289,7 +297,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
LIST_HEAD(free_list);
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
spin_lock_bh(&msk->pm.lock);
list_splice_init(&msk->pm.anno_list, &free_list);
@ -305,7 +313,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct mptcp_addr_info remote = { 0 };
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *local;
struct mptcp_pm_addr_entry local;
struct pm_nl_pernet *pernet;
pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
@ -317,13 +325,11 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check first for announce */
if (msk->pm.add_addr_signaled < msk->pm.add_addr_signal_max) {
local = select_signal_address(pernet,
msk->pm.add_addr_signaled);
if (local) {
if (mptcp_pm_alloc_anno_list(msk, local)) {
if (select_signal_address(pernet, msk->pm.add_addr_signaled,
&local)) {
if (mptcp_pm_alloc_anno_list(msk, &local)) {
msk->pm.add_addr_signaled++;
mptcp_pm_announce_addr(msk, &local->addr, false);
mptcp_pm_announce_addr(msk, &local.addr, false);
}
} else {
/* pick failed, avoid fourther attempts later */
@ -338,13 +344,12 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
msk->pm.subflows < msk->pm.subflows_max) {
remote_address((struct sock_common *)sk, &remote);
local = select_local_address(pernet, msk);
if (local) {
if (select_local_address(pernet, msk, &local)) {
msk->pm.local_addr_used++;
msk->pm.subflows++;
check_work_pending(msk);
spin_unlock_bh(&msk->pm.lock);
__mptcp_subflow_connect(sk, &local->addr, &remote);
__mptcp_subflow_connect(sk, &local.addr, &remote);
spin_lock_bh(&msk->pm.lock);
return;
}
@ -372,7 +377,7 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
struct mptcp_addr_info local;
int err;
pr_debug("accepted %d:%d remote family %d",
pr_debug("accepted %d:%d remote family %d\n",
msk->pm.add_addr_accepted, msk->pm.add_addr_accept_max,
msk->pm.remote.family);
msk->pm.subflows++;
@ -405,7 +410,7 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
pr_debug("address rm_id %d", msk->pm.rm_id);
pr_debug("address rm_id %d\n", msk->pm.rm_id);
if (!msk->pm.rm_id)
return;
@ -441,7 +446,7 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
pr_debug("subflow rm_id %d", rm_id);
pr_debug("subflow rm_id %d\n", rm_id);
if (!rm_id)
return;
@ -791,7 +796,7 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
struct mptcp_sock *msk;
long s_slot = 0, s_num = 0;
pr_debug("remove_id=%d", addr->id);
pr_debug("remove_id=%d\n", addr->id);
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;

View File

@ -128,7 +128,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;
pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
@ -164,7 +164,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
space = tcp_space(sk);
max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
RB_EMPTY_ROOT(&msk->out_of_order_queue));
if (after64(seq, max_seq)) {
/* out of window */
@ -469,7 +469,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
u32 old_copied_seq;
bool done = false;
pr_debug("msk=%p ssk=%p", msk, ssk);
pr_debug("msk=%p ssk=%p\n", msk, ssk);
tp = tcp_sk(ssk);
old_copied_seq = tp->copied_seq;
do {
@ -552,7 +552,7 @@ static bool mptcp_ofo_queue(struct mptcp_sock *msk)
u64 end_seq;
p = rb_first(&msk->out_of_order_queue);
pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
while (p) {
skb = rb_to_skb(p);
if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
@ -574,7 +574,7 @@ static bool mptcp_ofo_queue(struct mptcp_sock *msk)
int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
/* skip overlapping data, if any */
pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
delta);
MPTCP_SKB_CB(skb)->offset += delta;
@ -956,12 +956,12 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
psize = min_t(size_t, pfrag->size - offset, avail_size);
/* Copy to page */
pr_debug("left=%zu", msg_data_left(msg));
pr_debug("left=%zu\n", msg_data_left(msg));
psize = copy_page_from_iter(pfrag->page, offset,
min_t(size_t, msg_data_left(msg),
psize),
&msg->msg_iter);
pr_debug("left=%zu", msg_data_left(msg));
pr_debug("left=%zu\n", msg_data_left(msg));
if (!psize)
return -EINVAL;
@ -1031,7 +1031,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
mpext->use_map = 1;
mpext->dsn64 = 1;
pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
mpext->data_seq, mpext->subflow_seq, mpext->data_len,
mpext->dsn64);
@ -1147,7 +1147,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
}
}
pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld\n",
msk, nr_active, send_info[0].ssk, send_info[0].ratio,
send_info[1].ssk, send_info[1].ratio);
@ -1240,7 +1240,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sndbuf > READ_ONCE(sk->sk_sndbuf))
WRITE_ONCE(sk->sk_sndbuf, sndbuf);
pr_debug("conn_list->subflow=%p", ssk);
pr_debug("conn_list->subflow=%p\n", ssk);
lock_sock(ssk);
tx_ok = msg_data_left(msg);
@ -1577,7 +1577,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
pr_debug("block timeout %ld", timeo);
pr_debug("block timeout %ld\n", timeo);
mptcp_wait_data(sk, &timeo);
}
@ -1595,7 +1595,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
set_bit(MPTCP_DATA_READY, &msk->flags);
}
out_err:
pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d\n",
msk, test_bit(MPTCP_DATA_READY, &msk->flags),
skb_queue_empty(&sk->sk_receive_queue), copied);
mptcp_rcv_space_adjust(msk, copied);
@ -1712,7 +1712,7 @@ static void pm_work(struct mptcp_sock *msk)
spin_lock_bh(&msk->pm.lock);
pr_debug("msk=%p status=%x", msk, pm->status);
pr_debug("msk=%p status=%x\n", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
@ -1913,11 +1913,11 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
break;
default:
if (__mptcp_check_fallback(mptcp_sk(sk))) {
pr_debug("Fallback");
pr_debug("Fallback\n");
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
} else {
pr_debug("Sending DATA_FIN on subflow %p", ssk);
pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
mptcp_set_timeout(sk, ssk);
tcp_send_ack(ssk);
}
@ -1973,7 +1973,7 @@ static void mptcp_close(struct sock *sk, long timeout)
if (__mptcp_check_fallback(msk)) {
goto update_state;
} else if (mptcp_close_state(sk)) {
pr_debug("Sending DATA_FIN sk=%p", sk);
pr_debug("Sending DATA_FIN sk=%p\n", sk);
WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
WRITE_ONCE(msk->snd_data_fin_enable, 1);
@ -2181,12 +2181,12 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
return NULL;
}
pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
pr_debug("msk=%p, listener=%p\n", msk, mptcp_subflow_ctx(listener->sk));
newsk = inet_csk_accept(listener->sk, flags, err, kern);
if (!newsk)
return NULL;
pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
pr_debug("msk=%p, subflow is mptcp=%d\n", msk, sk_is_mptcp(newsk));
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
@ -2351,7 +2351,7 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
if (mptcp_unsupported(level, optname))
return -ENOPROTOOPT;
@ -2383,7 +2383,7 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
/* @@ the meaning of setsockopt() when the socket is connected and
* there are multiple subflows is not yet defined. It is up to the
@ -2454,7 +2454,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
struct socket *ssock;
ssock = __mptcp_nmpc_socket(msk);
pr_debug("msk=%p, subflow=%p", msk, ssock);
pr_debug("msk=%p, subflow=%p\n", msk, ssock);
if (WARN_ON_ONCE(!ssock))
return -EINVAL;
@ -2472,7 +2472,7 @@ void mptcp_finish_connect(struct sock *ssk)
sk = subflow->conn;
msk = mptcp_sk(sk);
pr_debug("msk=%p, token=%u", sk, subflow->token);
pr_debug("msk=%p, token=%u\n", sk, subflow->token);
mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
ack_seq++;
@ -2511,7 +2511,7 @@ bool mptcp_finish_join(struct sock *sk)
struct socket *parent_sock;
bool ret;
pr_debug("msk=%p, subflow=%p", msk, subflow);
pr_debug("msk=%p, subflow=%p\n", msk, subflow);
/* mptcp socket already closing? */
if (!mptcp_is_fully_established(parent))
@ -2673,7 +2673,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
struct socket *ssock;
int err;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
lock_sock(sock->sk);
ssock = __mptcp_nmpc_socket(msk);
@ -2703,7 +2703,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct socket *ssock;
int err;
pr_debug("msk=%p", msk);
pr_debug("msk=%p\n", msk);
lock_sock(sock->sk);
if (sock->sk->sk_state != TCP_LISTEN)
@ -2762,7 +2762,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
if (state == TCP_LISTEN)
return mptcp_check_readable(msk);
@ -2783,7 +2783,7 @@ static int mptcp_shutdown(struct socket *sock, int how)
struct mptcp_subflow_context *subflow;
int ret = 0;
pr_debug("sk=%p, how=%d", msk, how);
pr_debug("sk=%p, how=%d\n", msk, how);
lock_sock(sock->sk);

View File

@ -523,7 +523,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
{
if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
pr_debug("TCP fallback already done (msk=%p)", msk);
pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
@ -537,7 +537,7 @@ static inline void mptcp_do_fallback(struct sock *sk)
__mptcp_do_fallback(msk);
}
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
static inline bool subflow_simultaneous_connect(struct sock *sk)
{

View File

@ -34,7 +34,7 @@ static void subflow_req_destructor(struct request_sock *req)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
pr_debug("subflow_req=%p", subflow_req);
pr_debug("subflow_req=%p\n", subflow_req);
if (subflow_req->msk)
sock_put((struct sock *)subflow_req->msk);
@ -121,7 +121,7 @@ static void subflow_init_req(struct request_sock *req,
struct mptcp_options_received mp_opt;
int ret;
pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
ret = __subflow_init_req(req, sk_listener);
if (ret)
@ -183,7 +183,7 @@ static void subflow_init_req(struct request_sock *req,
subflow_init_req_cookie_join_save(subflow_req, skb);
}
pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
subflow_req->remote_nonce, subflow_req->msk);
}
}
@ -306,7 +306,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
@ -321,7 +321,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_capable = 1;
subflow->can_ack = 1;
subflow->remote_key = mp_opt.sndr_key;
pr_debug("subflow=%p, remote_key=%llu", subflow,
pr_debug("subflow=%p, remote_key=%llu\n", subflow,
subflow->remote_key);
mptcp_finish_connect(sk);
} else if (subflow->request_join) {
@ -332,7 +332,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u\n", subflow,
subflow->thmac, subflow->remote_nonce);
if (!subflow_thmac_valid(subflow)) {
@ -371,7 +371,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
pr_debug("subflow=%p", subflow);
pr_debug("subflow=%p\n", subflow);
/* Never answer to SYNs sent to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@ -401,7 +401,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
pr_debug("subflow=%p", subflow);
pr_debug("subflow=%p\n", subflow);
if (skb->protocol == htons(ETH_P_IP))
return subflow_v4_conn_request(sk, skb);
@ -543,7 +543,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct sock *new_msk = NULL;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
/* After child creation we must look for 'mp_capable' even when options
* are not parsed
@ -692,7 +692,7 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
@ -768,7 +768,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
goto validate_seq;
}
pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d\n",
mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
mpext->data_len, mpext->data_fin);
@ -782,7 +782,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (data_len == 1) {
bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
mpext->dsn64);
pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
if (subflow->map_valid) {
/* A DATA_FIN might arrive in a DSS
* option before the previous mapping
@ -807,7 +807,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_fin_seq &= GENMASK_ULL(31, 0);
mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
data_fin_seq, mpext->dsn64);
}
@ -818,7 +818,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (!mpext->dsn64) {
map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
mpext->data_seq);
pr_debug("expanded seq=%llu", subflow->map_seq);
pr_debug("expanded seq=%llu\n", subflow->map_seq);
} else {
map_seq = mpext->data_seq;
}
@ -850,7 +850,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
subflow->map_data_len = data_len;
subflow->map_valid = 1;
subflow->mpc_map = mpext->mpc_map;
pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
pr_debug("new map seq=%llu subflow_seq=%u data_len=%u\n",
subflow->map_seq, subflow->map_subflow_seq,
subflow->map_data_len);
@ -880,7 +880,7 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
avail_len = skb->len - offset;
incr = limit >= avail_len ? avail_len + fin : limit;
pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
@ -901,7 +901,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct mptcp_sock *msk;
struct sk_buff *skb;
pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
pr_debug("msk=%p ssk=%p data_avail=%d skb=%p\n", subflow->conn, ssk,
subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
if (!skb_peek(&ssk->sk_receive_queue))
subflow->data_avail = 0;
@ -914,7 +914,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
u64 old_ack;
status = get_mapping_status(ssk, msk);
pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
pr_debug("msk=%p ssk=%p status=%d\n", msk, ssk, status);
if (status == MAPPING_INVALID) {
ssk->sk_err = EBADMSG;
goto fatal;
@ -953,7 +953,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
old_ack = READ_ONCE(msk->ack_seq);
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
ack_seq);
if (ack_seq == old_ack) {
subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
@ -991,7 +991,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
subflow->map_valid = 0;
subflow->data_avail = 0;
pr_debug("Done with mapping: seq=%u data_len=%u",
pr_debug("Done with mapping: seq=%u data_len=%u\n",
subflow->map_subflow_seq,
subflow->map_data_len);
}
@ -1079,7 +1079,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
if (likely(icsk->icsk_af_ops == target))
@ -1162,7 +1162,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
goto failed;
mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
subflow->local_id = local_id;
@ -1233,7 +1233,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
subflow = mptcp_subflow_ctx(sf->sk);
pr_debug("subflow=%p", subflow);
pr_debug("subflow=%p\n", subflow);
*new_sock = sf;
sock_hold(sk);
@ -1255,7 +1255,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
INIT_LIST_HEAD(&ctx->node);
pr_debug("subflow=%p", ctx);
pr_debug("subflow=%p\n", ctx);
ctx->tcp_sock = sk;
@ -1332,7 +1332,7 @@ static int subflow_ulp_init(struct sock *sk)
goto out;
}
pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
tp->is_mptcp = 1;
ctx->icsk_af_ops = icsk->icsk_af_ops;

View File

@ -785,12 +785,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
}
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
if (allocate_src)
q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
if (allocate_dst)
q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
}
found:
/* reserve queue for future packets in same flow */
reduced_hash = outer_hash + k;

View File

@ -733,11 +733,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
if (err != NET_XMIT_SUCCESS &&
net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch);
qdisc_tree_reduce_backlog(sch, 1,
pkt_len);
if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch);
qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}

View File

@ -1510,7 +1510,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_get_bss);
static void rb_insert_bss(struct cfg80211_registered_device *rdev,
static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *bss)
{
struct rb_node **p = &rdev->bss_tree.rb_node;
@ -1526,7 +1526,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
if (WARN_ON(!cmp)) {
/* will sort of leak this BSS */
return;
return false;
}
if (cmp < 0)
@ -1537,6 +1537,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
rb_link_node(&bss->rbn, parent, p);
rb_insert_color(&bss->rbn, &rdev->bss_tree);
return true;
}
static struct cfg80211_internal_bss *
@ -1563,6 +1564,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
return NULL;
}
static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *bss)
{
lockdep_assert_held(&rdev->bss_lock);
if (!rb_insert_bss(rdev, bss))
return;
list_add_tail(&bss->list, &rdev->bss_list);
rdev->bss_entries++;
}
static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *bss)
{
lockdep_assert_held(&rdev->bss_lock);
rb_erase(&bss->rbn, &rdev->bss_tree);
if (!rb_insert_bss(rdev, bss)) {
list_del(&bss->list);
if (!list_empty(&bss->hidden_list))
list_del_init(&bss->hidden_list);
if (!list_empty(&bss->pub.nontrans_list))
list_del_init(&bss->pub.nontrans_list);
rdev->bss_entries--;
}
rdev->bss_generation++;
}
static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *new)
{
@ -1838,9 +1867,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
bss_ref_get(rdev, pbss);
}
list_add_tail(&new->list, &rdev->bss_list);
rdev->bss_entries++;
rb_insert_bss(rdev, new);
cfg80211_insert_bss(rdev, new);
found = new;
}
@ -2702,10 +2729,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
rdev->bss_generation++;
}
rb_erase(&cbss->rbn, &rdev->bss_tree);
rb_insert_bss(rdev, cbss);
rdev->bss_generation++;
cfg80211_rehash_bss(rdev, cbss);
list_for_each_entry_safe(nontrans_bss, tmp,
&cbss->pub.nontrans_list,
@ -2713,9 +2737,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
bss = container_of(nontrans_bss,
struct cfg80211_internal_bss, pub);
bss->pub.channel = chan;
rb_erase(&bss->rbn, &rdev->bss_tree);
rb_insert_bss(rdev, bss);
rdev->bss_generation++;
cfg80211_rehash_bss(rdev, bss);
}
done:

View File

@ -1679,6 +1679,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
struct aa_profile *p;
p = aa_deref_parent(profile);
dent = prof_dir(p);
if (!dent) {
error = -ENOENT;
goto fail2;
}
/* adding to parent that previously didn't have children */
dent = aafs_create_dir("profiles", dent);
if (IS_ERR(dent))

View File

@ -4229,7 +4229,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
rcu_read_unlock();
if (hskp == NULL)
rc = netlbl_req_setattr(req, &skp->smk_netlabel);
rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
else
netlbl_req_delattr(req);

View File

@ -5067,6 +5067,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
/* forcibly mute the speaker output without caching; return true if updated */
static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
{
if (!nid)
return false;
if (!nid_has_mute(codec, nid, HDA_OUTPUT))
return false; /* no mute, skip */
if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
HDA_AMP_MUTE)
return false; /* both channels already muted, skip */
/* direct amp update without caching */
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
return true;
}
/**
* snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
* @codec: the HDA codec
*
* Forcibly mute the speaker outputs, to be called at suspend or shutdown.
*
* The mute state done by this function isn't cached, hence the original state
* will be restored at resume.
*
* Return true if the mute state has been changed.
*/
bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
const int *paths;
const struct nid_path *path;
int i, p, num_paths;
bool updated = false;
/* if already powered off, do nothing */
if (!snd_hdac_is_power_on(&codec->core))
return false;
if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
paths = spec->out_paths;
num_paths = spec->autocfg.line_outs;
} else {
paths = spec->speaker_paths;
num_paths = spec->autocfg.speaker_outs;
}
for (i = 0; i < num_paths; i++) {
path = snd_hda_get_path_from_idx(codec, paths[i]);
if (!path)
continue;
for (p = 0; p < path->depth; p++)
if (force_mute_output_path(codec, path->path[p]))
updated = true;
}
return updated;
}
EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
/**
* snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
* set up the hda_gen_spec

View File

@ -364,5 +364,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
int (*callback)(struct led_classdev *,
enum led_brightness));
bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
#endif /* __SOUND_HDA_GENERIC_H */

View File

@ -181,6 +181,8 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
snd_hda_gen_shutup_speakers(codec);
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
@ -236,6 +238,7 @@ enum {
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
CXT_PINCFG_SWS_JS201D,
CXT_PINCFG_TOP_SPEAKER,
};
/* for hda_fixup_thinkpad_acpi() */
@ -903,6 +906,13 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_sws_js201d,
},
[CXT_PINCFG_TOP_SPEAKER] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x1d, 0x82170111 },
{ }
},
},
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@ -999,6 +1009,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
{}
};
@ -1018,6 +1030,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
{ .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
{}
};

View File

@ -6968,6 +6968,7 @@ enum {
ALC236_FIXUP_HP_GPIO_LED,
ALC236_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
ALC236_FIXUP_LENOVO_INV_DMIC,
ALC298_FIXUP_SAMSUNG_AMP,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
@ -8361,6 +8362,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc236_fixup_hp_mute_led_micmute_vref,
},
[ALC236_FIXUP_LENOVO_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic,
.chained = true,
.chain_id = ALC283_FIXUP_INT_MIC,
},
[ALC298_FIXUP_SAMSUNG_AMP] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_samsung_amp,
@ -9105,6 +9112,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@ -9355,6 +9363,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@ -9596,6 +9605,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
{.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
{}
};
#define ALC225_STANDARD_PINS \

View File

@ -4014,6 +4014,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
kfree(substream->runtime);
substream->runtime = NULL;
break;
default: