Merge b48d25e6ca on remote branch

Change-Id: I7ac1d8c55ec63b18319c67c8174ed4ad80763bc3
This commit is contained in:
Linux Build Service Account 2022-04-07 23:09:51 -07:00
commit 8b74941de1
53 changed files with 2850 additions and 2498 deletions

View File

@ -1 +1 @@
LTS_5.4.161_b9d179c605d3
LTS_5.4.161_5d7a491ae4a7

File diff suppressed because it is too large Load Diff

View File

@ -583,6 +583,7 @@
__kfifo_alloc
__kfifo_free
kmalloc_order_trace
ktime_get_with_offset
__local_bh_enable_ip
memmove
param_ops_ulong
@ -590,6 +591,7 @@
__rcu_read_lock
__rcu_read_unlock
regulatory_hint
rfc1042_header
skb_copy
skb_realloc_headroom
strlcat
@ -660,6 +662,7 @@
phy_pm_runtime_get_sync
phy_pm_runtime_put_sync
platform_get_irq_byname_optional
pm_runtime_barrier
system_freezable_wq
usb_add_gadget_udc
usb_decode_ctrl
@ -733,7 +736,6 @@
generic_file_read_iter
generic_file_splice_read
generic_read_dir
generic_shutdown_super
__get_free_pages
get_zeroed_page
iget5_locked
@ -745,6 +747,7 @@
kernel_read
kernel_write
kern_path
kill_anon_super
kobject_create_and_add
kobject_put
lockref_get

View File

@ -380,3 +380,4 @@ CONFIG_GKI_HIDDEN_DRM_CONFIGS=y
CONFIG_SDX_EXT_IPC=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_MACSEC=y
CONFIG_SYSVIPC=y

View File

@ -317,6 +317,7 @@ CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_EXAR is not set
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y

View File

@ -9,7 +9,6 @@ CONFIG_QCOM_COMMAND_DB=m
CONFIG_PINCTRL_HOLI=m
CONFIG_REGULATOR_STUB=m
CONFIG_REGULATOR_PROXY_CONSUMER=m
CONFIG_KEYBOARD_GPIO=m
CONFIG_EXTCON=m
CONFIG_SPMI_MSM_PMIC_ARB=m
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=m
@ -52,7 +51,6 @@ CONFIG_QCOM_SMP2P=m
CONFIG_QCOM_SMSM=m
CONFIG_MSM_QMP=m
CONFIG_QCOM_SMP2P_SLEEPSTATE=m
CONFIG_RPMSG_CHAR=m
CONFIG_RPMSG_QCOM_GLINK_RPM=m
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_QCOM_GLINK=m
@ -72,7 +70,6 @@ CONFIG_MSM_SERVICE_LOCATOR=m
CONFIG_MSM_SERVICE_NOTIFIER=m
CONFIG_QSEE_IPC_IRQ=m
CONFIG_RPMSG_QCOM_GLINK_SPSS=m
CONFIG_REGULATOR_FIXED_VOLTAGE=m
CONFIG_REGULATOR_QTI_FIXED_VOLTAGE=m
CONFIG_REGULATOR_REFGEN=m
CONFIG_COMMON_CLK_QCOM=m
@ -186,15 +183,11 @@ CONFIG_SND_USB_AUDIO_QMI=m
# CONFIG_USB_STORAGE_KARMA is not set
# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_STORAGE_ENE_UB6250 is not set
# CONFIG_USB_UAS is not set
CONFIG_USB_STORAGE=m
CONFIG_USB_CONFIGFS_NCM=m
CONFIG_USB_CONFIGFS_F_CCID=m
CONFIG_USB_CONFIGFS_F_CDEV=m
CONFIG_USB_CONFIGFS_F_GSI=m
CONFIG_MSM_QUSB_PHY=m
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_LEDS_QPNP_FLASH_V2=m
CONFIG_QTI_IOMMU_SUPPORT=m
CONFIG_MMC_SDHCI_MSM=m

View File

@ -290,6 +290,7 @@ CONFIG_INPUT_UINPUT=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y

View File

@ -4,7 +4,7 @@
#include <uapi/asm/setup.h>
#define COMMAND_LINE_SIZE 2048
#define COMMAND_LINE_SIZE 4096
#include <linux/linkage.h>
#include <asm/page_types.h>

View File

@ -662,7 +662,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* Uncomment this block to log an error on every VERIFY failure */
@ -529,6 +530,8 @@ struct fastrpc_apps {
struct hlist_head drivers;
spinlock_t hlock;
struct device *dev;
/* Indicates fastrpc device node info */
struct device *dev_fastrpc;
unsigned int latency;
int rpmsg_register;
bool legacy_remote_heap;
@ -547,6 +550,8 @@ struct fastrpc_apps {
void *ramdump_handle;
bool enable_ramdump;
struct mutex mut_uid;
/* Indicates cdsp device status */
int remote_cdsp_status;
};
struct fastrpc_mmap {
@ -2972,6 +2977,39 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
return err;
}
/*
* name : fastrpc_get_dsp_status
* @in : pointer to fastrpc_apps
* @out : void
* Description : This function reads the property
* string from device node and updates the cdsp device
* avialbility status if the node belongs to cdsp device.
*/
static void fastrpc_get_dsp_status(struct fastrpc_apps *me)
{
int ret = -1;
struct device_node *node = NULL;
const char *name = NULL;
do {
node = of_find_compatible_node(node, NULL, "qcom,pil-tz-generic");
if (node) {
ret = of_property_read_string(node, "qcom,firmware-name", &name);
if (!strcmp(name, "cdsp")) {
ret = of_device_is_available(node);
me->remote_cdsp_status = ret;
ADSPRPC_INFO("adsprpc: %s: cdsp node found with ret:%x\n",
__func__, ret);
break;
}
} else {
ADSPRPC_ERR("adsprpc: Error: %s: cdsp node not found\n", __func__);
break;
}
} while (1);
}
static void fastrpc_init(struct fastrpc_apps *me)
{
int i;
@ -6683,6 +6721,52 @@ static int fastrpc_setup_service_locator(struct device *dev,
return err;
}
/*
* name : remote_cdsp_status_show
* @in : dev : pointer to device node
* attr: pointer to device attribute
* @out : buf : Contains remote cdsp status
* @Description : This function updates the buf with
* remote cdsp status by reading the fastrpc node
* @returns : bytes written to buf
*/
static ssize_t remote_cdsp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fastrpc_apps *me = &gfa;
/*
* Default remote DSP status: 0
* driver possibly not probed yet or not the main device.
*/
if (!dev || !dev->driver ||
!of_device_is_compatible(dev->of_node, "qcom,msm-fastrpc-compute")) {
ADSPRPC_ERR(
"adsprpc: Error: %s: driver not probed yet or not the main device\n",
__func__);
return 0;
}
return scnprintf(buf, PAGE_SIZE, "%d",
me->remote_cdsp_status);
}
/* Remote cdsp status attribute declartion as read only */
static DEVICE_ATTR_RO(remote_cdsp_status);
/* Declaring attribute for remote dsp */
static struct attribute *msm_remote_dsp_attrs[] = {
&dev_attr_remote_cdsp_status.attr,
NULL
};
/* Defining remote dsp attributes in attributes group */
static struct attribute_group msm_remote_dsp_attr_group = {
.attrs = msm_remote_dsp_attrs,
};
static int fastrpc_probe(struct platform_device *pdev)
{
int err = 0;
@ -6693,6 +6777,14 @@ static int fastrpc_probe(struct platform_device *pdev)
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute")) {
me->dev_fastrpc = dev;
err = sysfs_create_group(&pdev->dev.kobj, &msm_remote_dsp_attr_group);
if (err) {
ADSPRPC_ERR(
"adsprpc: Error: %s: initialization of sysfs create group failed with %d\n",
__func__, err);
goto bail;
}
init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
&gcinfo[0].rhvm);
fastrpc_init_privileged_gids(dev, "qcom,fastrpc-gids",
@ -6814,6 +6906,7 @@ static int __init fastrpc_device_init(void)
}
memset(me, 0, sizeof(*me));
fastrpc_init(me);
fastrpc_get_dsp_status(me);
me->dev = NULL;
me->legacy_remote_heap = false;
VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));

View File

@ -182,7 +182,7 @@ EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);
static void etr_pcie_close_channel(struct byte_cntr *byte_cntr_data)
{
if (!byte_cntr_data)
if (!byte_cntr_data || !byte_cntr_data->pcie_chan_opened)
return;
mutex_lock(&byte_cntr_data->byte_cntr_lock);

View File

@ -1235,11 +1235,15 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
pm_runtime_mark_last_busy(gi2c->dev);
pm_runtime_put_autosuspend(gi2c->dev);
}
gi2c->cur = NULL;
gi2c->err = 0;
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"i2c txn ret:%d\n", ret);
return ret;
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c txn ret:%d, num:%d, err%:%d\n", ret, num, gi2c->err);
if (gi2c->err)
return gi2c->err;
else
return ret;
}
static u32 geni_i2c_func(struct i2c_adapter *adap)

View File

@ -1474,7 +1474,7 @@ static int haptics_open_loop_drive_config(struct haptics_chip *chip, bool en)
u8 val;
if ((is_boost_vreg_enabled_in_open_loop(chip) ||
is_haptics_external_powered(chip)) && en) {
chip->hboost_enabled || is_haptics_external_powered(chip)) && en) {
/* Force VREG_RDY */
rc = haptics_masked_write(chip, chip->cfg_addr_base,
HAP_CFG_VSET_CFG_REG, FORCE_VREG_RDY_BIT,
@ -1818,13 +1818,17 @@ static int haptics_set_manual_rc_clk_cal(struct haptics_chip *chip)
static int haptics_update_fifo_samples(struct haptics_chip *chip,
u8 *samples, u32 length)
{
int rc, count, i;
int rc = 0, count, i, remain;
u8 tmp[HAP_PTN_V2_FIFO_DIN_NUM] = {0};
if (samples == NULL) {
dev_err(chip->dev, "no FIFO samples available\n");
return -EINVAL;
}
if (!length)
return 0;
if (chip->ptn_revision == HAP_PTN_V1) {
for (i = 0; i < length; i++) {
rc = haptics_update_fifo_sample_v1(chip, samples[i]);
@ -1833,6 +1837,7 @@ static int haptics_update_fifo_samples(struct haptics_chip *chip,
}
} else {
count = length / HAP_PTN_V2_FIFO_DIN_NUM;
remain = length % HAP_PTN_V2_FIFO_DIN_NUM;
for (i = 0; i < count; i++) {
rc = haptics_update_fifo_sample_v2(chip,
samples, HAP_PTN_V2_FIFO_DIN_NUM);
@ -1842,16 +1847,26 @@ static int haptics_update_fifo_samples(struct haptics_chip *chip,
samples += HAP_PTN_V2_FIFO_DIN_NUM;
}
if (length % HAP_PTN_V2_FIFO_DIN_NUM) {
rc = haptics_update_fifo_sample_v2(chip,
samples,
length % HAP_PTN_V2_FIFO_DIN_NUM);
if (rc < 0)
return rc;
if (remain) {
/*
* In HAP_PTN_V2 module, when 1-byte FIFO write clashes
* with the HW FIFO read operation, the HW will only read
* 1 valid byte in every 4 bytes FIFO samples. So avoid
* this by keeping the samples 4-byte aligned and always
* use 4-byte write for HAP_PTN_V2 module.
*/
if (chip->ptn_revision == HAP_PTN_V2) {
memcpy(tmp, samples, remain);
rc = haptics_update_fifo_sample_v2(chip,
tmp, HAP_PTN_V2_FIFO_DIN_NUM);
} else {
rc = haptics_update_fifo_sample_v2(chip,
samples, remain);
}
}
}
return 0;
return rc;
}
static int haptics_set_fifo_playrate(struct haptics_chip *chip,
@ -1984,6 +1999,10 @@ static int haptics_set_fifo(struct haptics_chip *chip, struct fifo_cfg *fifo)
return available;
num = min_t(u32, available, num);
/* Keep the FIFO programming 4-byte aligned if FIFO refilling is needed */
if ((num < fifo->num_s) && (num % HAP_PTN_V2_FIFO_DIN_NUM))
num = round_down(num, HAP_PTN_V2_FIFO_DIN_NUM);
rc = haptics_update_fifo_samples(chip, fifo->samples, num);
if (rc < 0) {
dev_err(chip->dev, "write FIFO samples failed, rc=%d\n", rc);
@ -2875,23 +2894,18 @@ static irqreturn_t fifo_empty_irq_handler(int irq, void *data)
if (num < 0)
goto unlock;
/*
* With HAPTICS_PATTERN module revision 2.0 and above, if use
* 1-byte write before 4-byte write, the hardware would insert
* zeros in between to keep the FIFO samples 4-byte aligned, and
* the inserted 0 values would cause HW stop driving hence spurs
* will be seen on the haptics output. So only use 1-byte write
* at the end of FIFO streaming.
*/
if (samples_left <= num)
num = samples_left;
else if ((chip->ptn_revision >= HAP_PTN_V2) &&
(num % HAP_PTN_V2_FIFO_DIN_NUM))
num -= (num % HAP_PTN_V2_FIFO_DIN_NUM);
samples = fifo->samples + status->samples_written;
/* Write more pattern data into FIFO memory. */
/*
* Always use 4-byte burst write in the middle of FIFO programming to
* avoid HW padding zeros during 1-byte write which would cause the HW
* stop driving for the unexpected padding zeros.
*/
if (num < samples_left)
num = round_down(num, HAP_PTN_V2_FIFO_DIN_NUM);
else
num = samples_left;
rc = haptics_update_fifo_samples(chip, samples, num);
if (rc < 0) {
dev_err(chip->dev, "Update FIFO samples failed, rc=%d\n",

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
*/
@ -133,6 +133,9 @@ int qcom_icc_rpm_set(struct icc_node *src, struct icc_node *dst)
do_div(clk_rate, qn->buswidth);
bus_clk_rate[i] = max(bus_clk_rate[i], clk_rate);
if (bus_clk_rate[i] > RPM_CLK_MAX_LEVEL)
bus_clk_rate[i] = RPM_CLK_MAX_LEVEL;
}
}

View File

@ -911,6 +911,13 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
prop_rsp_pkt->num_params,
prop_rsp_pkt->prop_param[0]);
if (prop_rsp_pkt->header.size <
sizeof(struct ipc_msg_header_pkt)) {
pr_err("Invalid rsp pkt size %d\n",
prop_rsp_pkt->header.size);
break;
}
host_ctx->cmd_ret_status = prop_rsp_pkt->header.status;
if (prop_rsp_pkt->num_params > 0) {

View File

@ -95,7 +95,7 @@ static DEFINE_SPINLOCK(ubi_devices_lock);
static ssize_t version_show(struct class *class, struct class_attribute *attr,
char *buf)
{
return scnprintf(buf, sizeof(int), "%d\n", UBI_VERSION);
return scnprintf(buf, PAGE_SIZE, "%d\n", UBI_VERSION);
}
static CLASS_ATTR_RO(version);
@ -376,49 +376,49 @@ static ssize_t dev_attribute_show(struct device *dev,
return -ENODEV;
if (attr == &dev_eraseblock_size)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->leb_size);
else if (attr == &dev_avail_eraseblocks)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->avail_pebs);
else if (attr == &dev_total_eraseblocks)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->good_peb_count);
else if (attr == &dev_volumes_count)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->max_ec);
else if (attr == &dev_reserved_for_bad)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->bad_peb_count);
else if (attr == &dev_max_vol_count)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->vtbl_slots);
else if (attr == &dev_min_io_size)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->min_io_size);
else if (attr == &dev_bgt_enabled)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->mtd->index);
else if (attr == &dev_ro_mode)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
ubi->ro_mode);
else if (attr == &dev_mtd_trigger_scrub)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
atomic_read(&ubi->scrub_work_count));
else if (attr == &dev_mtd_max_scrub_sqnum)
ret = scnprintf(buf, sizeof(unsigned long long), "%llu\n",
ret = scnprintf(buf, PAGE_SIZE, "%llu\n",
get_max_sqnum(ubi));
else if (attr == &dev_mtd_min_scrub_sqnum)
ret = scnprintf(buf, sizeof(unsigned long long), "%llu\n",
ret = scnprintf(buf, PAGE_SIZE, "%llu\n",
ubi_wl_scrub_get_min_sqnum(ubi));
else
ret = -EINVAL;
@ -516,7 +516,7 @@ static int uif_init(struct ubi_device *ubi)
int i, err;
dev_t dev;
scnprintf(ubi->ubi_name, sizeof(UBI_NAME_STR) + 5,
scnprintf(ubi->ubi_name, sizeof(ubi->ubi_name),
UBI_NAME_STR "%d", ubi->ubi_num);
/*

View File

@ -54,7 +54,7 @@ static struct device_attribute attr_vol_upd_marker =
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, size;
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
struct ubi_device *ubi;
@ -73,35 +73,32 @@ static ssize_t vol_attribute_show(struct device *dev,
spin_unlock(&ubi->volumes_lock);
if (attr == &attr_vol_reserved_ebs)
ret = scnprintf(buf, sizeof(int), "%d\n", vol->reserved_pebs);
ret = scnprintf(buf, PAGE_SIZE, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
const char *tp;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
tp = "dynamic";
size = 8;
} else {
else
tp = "static";
size = 7;
}
ret = scnprintf(buf, size, "%s\n", tp);
ret = scnprintf(buf, PAGE_SIZE, "%s\n", tp);
} else if (attr == &attr_vol_name)
ret = scnprintf(buf, vol->name_len + 1, "%s\n",
ret = scnprintf(buf, PAGE_SIZE, "%s\n",
vol->name);
else if (attr == &attr_vol_corrupted)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
vol->alignment);
else if (attr == &attr_vol_usable_eb_size)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
vol->usable_leb_size);
else if (attr == &attr_vol_data_bytes)
ret = scnprintf(buf, sizeof(unsigned long long), "%lld\n",
ret = scnprintf(buf, PAGE_SIZE, "%lld\n",
vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = scnprintf(buf, sizeof(int), "%d\n",
ret = scnprintf(buf, PAGE_SIZE, "%d\n",
vol->upd_marker);
else
/* This must be a bug */

View File

@ -136,12 +136,36 @@ static int cnss_stats_show_state(struct seq_file *s,
return 0;
}
static int cnss_stats_show_capability(struct seq_file *s,
struct cnss_plat_data *plat_priv)
{
if (test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
seq_puts(s, "\n<---------------- FW Capability ----------------->\n");
seq_printf(s, "Chip ID: 0x%x\n", plat_priv->chip_info.chip_id);
seq_printf(s, "Chip family: 0x%x\n",
plat_priv->chip_info.chip_family);
seq_printf(s, "Board ID: 0x%x\n",
plat_priv->board_info.board_id);
seq_printf(s, "SOC Info: 0x%x\n", plat_priv->soc_info.soc_id);
seq_printf(s, "Firmware Version: 0x%x\n",
plat_priv->fw_version_info.fw_version);
seq_printf(s, "Firmware Build Timestamp: %s\n",
plat_priv->fw_version_info.fw_build_timestamp);
seq_printf(s, "Firmware Build ID: %s\n",
plat_priv->fw_build_id);
}
return 0;
}
static int cnss_stats_show(struct seq_file *s, void *data)
{
struct cnss_plat_data *plat_priv = s->private;
cnss_stats_show_state(s, plat_priv);
cnss_stats_show_capability(s, plat_priv);
return 0;
}

View File

@ -2003,6 +2003,11 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt)
link_fail:
dev->power_on = false;
if (dev->phy_rev >= 3)
ep_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, BIT(8), 0);
else
ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, BIT(0), 0);
if (!ep_pcie_debug_keep_resource)
ep_pcie_pipe_clk_deinit(dev);
pipe_clk_fail:
@ -2021,7 +2026,6 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt)
int ep_pcie_core_disable_endpoint(void)
{
u32 val = 0;
unsigned long irqsave_flags;
struct ep_pcie_dev_t *dev = &ep_pcie_dev;
@ -2051,10 +2055,6 @@ int ep_pcie_core_disable_endpoint(void)
}
dev->conf_ipa_msi_iatu = false;
val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
EP_PCIE_DBG(dev, "PCIe V%d: LTSSM_STATE during disable:0x%x\n",
dev->rev, (val >> 0xC) & 0x3f);
EP_PCIE_DBG2(dev, "PCIe V%d: Set pcie_disconnect_req during D3_COLD\n",
dev->rev);
ep_pcie_write_reg_field(dev->tcsr_perst_en,

File diff suppressed because it is too large Load Diff

View File

@ -673,7 +673,9 @@ enum mhi_msg_level {
extern uint32_t bhi_imgtxdb;
extern enum mhi_msg_level mhi_msg_lvl;
extern enum mhi_msg_level mhi_ipc_msg_lvl;
extern enum mhi_msg_level mhi_ipc_err_msg_lvl;
extern void *mhi_ipc_log;
extern void *mhi_ipc_err_log;
#define mhi_log(_msg_lvl, _msg, ...) do { \
if (_msg_lvl >= mhi_msg_lvl) { \
@ -682,7 +684,11 @@ extern void *mhi_ipc_log;
} \
if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
ipc_log_string(mhi_ipc_log, \
"[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
"[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
} \
if (mhi_ipc_err_log && (_msg_lvl >= mhi_ipc_err_msg_lvl)) { \
ipc_log_string(mhi_ipc_err_log, \
"[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
} \
} while (0)

View File

@ -43,7 +43,7 @@ enum mhi_dev_net_dbg_lvl {
MSG_NET_reserved = 0x80000000
};
static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_CRITICAL;
static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_ERROR;
static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_VERBOSE;
static void *mhi_net_ipc_log;
@ -121,7 +121,7 @@ static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt)
chan_attrib->dir = MHI_DIR_OUT;
chan_attrib->chan_id = channel;
chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE;
mhi_dev_net_log(MHI_INFO, "Write chan attributes dir %d chan_id %d\n",
mhi_dev_net_log(MHI_INFO, "Write ch attributes dir %d ch_id:%d\n",
chan_attrib->dir, chan_attrib->chan_id);
channel = MHI_CLIENT_IP_SW_4_IN;
@ -129,7 +129,7 @@ static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt)
chan_attrib->dir = MHI_DIR_IN;
chan_attrib->chan_id = channel;
chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE;
mhi_dev_net_log(MHI_INFO, "Read chan attributes dir %d chan_id %d\n",
mhi_dev_net_log(MHI_INFO, "Read ch attributes dir %d ch_id %d\n",
chan_attrib->dir, chan_attrib->chan_id);
return 0;
}
@ -144,7 +144,7 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
struct mhi_req *wreq = NULL;
if (mhi_dev_channel_isempty(client->in_handle)) {
mhi_dev_net_log(MHI_INFO, "%s stop network xmmit\n", __func__);
mhi_dev_net_log(MHI_INFO, "stop network xmmit\n");
netif_stop_queue(client->dev);
return;
}
@ -176,8 +176,9 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
spin_unlock_irqrestore(&client->wrt_lock, flags);
xfer_data = mhi_dev_write_channel(wreq);
if (xfer_data <= 0) {
pr_err("%s(): Failed to write skb len %d\n",
__func__, skb->len);
mhi_dev_net_log(MHI_ERROR,
"Failed to write skb len %d\n",
skb->len);
kfree_skb(skb);
return;
}
@ -186,8 +187,7 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
/* Check if free buffers are available*/
if (mhi_dev_channel_isempty(client->in_handle)) {
mhi_dev_net_log(MHI_INFO,
"%s buffers are full stop xmit\n",
__func__);
"buffers are full stop xmit\n");
netif_stop_queue(client->dev);
break;
}
@ -279,9 +279,9 @@ static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
struct mhi_req, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_ATOMIC);
skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_KERNEL);
if (skb == NULL) {
pr_err("%s(): skb alloc failed\n", __func__);
mhi_dev_net_log(MHI_ERROR, "skb alloc failed\n");
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
list_add_tail(&req->list, &mhi_handle->rx_buffers);
spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
@ -298,7 +298,8 @@ static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
bytes_avail = mhi_dev_read_channel(req);
if (bytes_avail < 0) {
pr_err("Failed to read chan %d bytes_avail = %d\n",
mhi_dev_net_log(MHI_ERROR,
"Failed to read ch_id:%d bytes_avail = %d\n",
chan, bytes_avail);
spin_lock_irqsave(&mhi_handle->rd_lock, flags);
kfree_skb(skb);
@ -473,7 +474,8 @@ static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr)
mhi_dev_net_ether_setup :
mhi_dev_net_rawip_setup);
if (!netdev) {
pr_err("Failed to allocate netdev for mhi_dev_net\n");
mhi_dev_net_log(MHI_ERROR,
"Failed to allocate netdev for mhi_dev_net\n");
goto net_dev_alloc_fail;
}
@ -488,7 +490,8 @@ static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr)
*mhi_dev_net_ctxt = mhi_dev_net_ptr;
ret = register_netdev(mhi_dev_net_ptr->dev);
if (ret) {
pr_err("Failed to register mhi_dev_net device\n");
mhi_dev_net_log(MHI_ERROR,
"Failed to register mhi_dev_net device\n");
goto net_dev_reg_fail;
}
mhi_dev_net_log(MHI_INFO, "Successfully registred mhi_dev_net\n");
@ -510,18 +513,18 @@ static int mhi_dev_net_open_chan_create_netif(struct mhi_dev_net_client *client)
struct list_head *cp, *q;
struct mhi_req *mreq;
mhi_dev_net_log(MHI_DBG, "opening OUT %d IN %d channels\n",
mhi_dev_net_log(MHI_DBG, "opening OUT ch_id:%d IN ch_id:%d channels\n",
client->out_chan,
client->in_chan);
mhi_dev_net_log(MHI_DBG,
"Initializing inbound chan %d.\n",
"Initializing inbound ch_id:%d.\n",
client->in_chan);
rc = mhi_dev_open_channel(client->out_chan, &client->out_handle,
mhi_net_ctxt.net_event_notifier);
if (rc < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open chan %d, ret 0x%x\n",
"Failed to open ch_id:%d, ret 0x%x\n",
client->out_chan, rc);
goto handle_not_rdy_err;
} else
@ -531,13 +534,13 @@ static int mhi_dev_net_open_chan_create_netif(struct mhi_dev_net_client *client)
mhi_net_ctxt.net_event_notifier);
if (rc < 0) {
mhi_dev_net_log(MHI_ERROR,
"Failed to open chan %d, ret 0x%x\n",
"Failed to open ch_id:%d, ret 0x%x\n",
client->in_chan, rc);
goto handle_in_err;
} else
atomic_set(&client->tx_enabled, 1);
mhi_dev_net_log(MHI_INFO, "IN %d, OUT %d channels are opened",
mhi_dev_net_log(MHI_INFO, "IN ch_id:%d, OUT ch_id:%d channels are opened",
client->in_chan, client->out_chan);
INIT_LIST_HEAD(&client->rx_buffers);
@ -546,12 +549,14 @@ static int mhi_dev_net_open_chan_create_netif(struct mhi_dev_net_client *client)
ret = mhi_dev_net_alloc_read_reqs(client);
if (ret) {
pr_err("failed to allocate rx req buffers\n");
mhi_dev_net_log(MHI_ERROR,
"failed to allocate rx req buffers\n");
goto rx_req_failed;
}
ret = mhi_dev_net_alloc_write_reqs(client);
if (ret) {
pr_err("failed to allocate write req buffers\n");
mhi_dev_net_log(MHI_ERROR,
"failed to allocate write req buffers\n");
goto tx_req_failed;
}
if (atomic_read(&client->tx_enabled)) {
@ -607,7 +612,8 @@ static int mhi_dev_net_rgstr_client(struct mhi_dev_net_client *client, int idx)
mutex_init(&client->out_chan_lock);
spin_lock_init(&client->wrt_lock);
spin_lock_init(&client->rd_lock);
mhi_dev_net_log(MHI_INFO, "Registering out %d, In %d channels\n",
mhi_dev_net_log(MHI_INFO, "Registering OUT ch_id:%d\t"
"IN ch_id:%d channels\n",
client->out_chan, client->in_chan);
return 0;
}
@ -648,20 +654,20 @@ static void mhi_dev_net_state_cb(struct mhi_dev_client_cb_data *cb_data)
ret = mhi_ctrl_state_info(mhi_client->in_chan, &info_in_ch);
if (ret) {
mhi_dev_net_log(MHI_ERROR,
"Failed to obtain in_channel %d state\n",
"Failed to obtain IN ch_id:%d state\n",
mhi_client->in_chan);
return;
}
ret = mhi_ctrl_state_info(mhi_client->out_chan, &info_out_ch);
if (ret) {
mhi_dev_net_log(MHI_ERROR,
"Failed to obtain out_channel %d state\n",
"Failed to obtain OUT ch_id:%d state\n",
mhi_client->out_chan);
return;
}
mhi_dev_net_log(MHI_MSG_VERBOSE, "in_channel :%d, state :%d\n",
mhi_dev_net_log(MHI_MSG_VERBOSE, "IN ch_id::%d, state :%d\n",
mhi_client->in_chan, info_in_ch);
mhi_dev_net_log(MHI_MSG_VERBOSE, "out_channel :%d, state :%d\n",
mhi_dev_net_log(MHI_MSG_VERBOSE, "OUT ch_id:%d, state :%d\n",
mhi_client->out_chan, info_out_ch);
if (info_in_ch == MHI_STATE_CONNECTED &&
info_out_ch == MHI_STATE_CONNECTED) {

View File

@ -101,7 +101,7 @@ static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
chid_idx = chdb_id/32;
if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB) {
pr_err("Invalid channel id:%d\n", chid_idx);
mhi_log(MHI_MSG_ERROR, "Invalid ch_id:%d\n", chid_idx);
return -EINVAL;
}
@ -633,7 +633,7 @@ int mhi_dev_restore_mmio(struct mhi_dev *dev)
rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_MASK_A7_n(i),
dev->chdb[i].mask);
if (rc) {
mhi_log(MHI_MSG_VERBOSE,
mhi_log(MHI_MSG_ERROR,
"Error writing enable for A7\n");
return rc;
}

View File

@ -94,7 +94,7 @@ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset)
if (ring->wr_offset == wr_offset) {
mhi_log(MHI_MSG_VERBOSE,
"nothing to cache for ring %d, local wr_ofst %lu\n",
"nothing to cache for ring_id:%d, local wr_ofst %lu\n",
ring->id, ring->wr_offset);
mhi_log(MHI_MSG_VERBOSE,
"new wr_offset %lu\n", wr_offset);
@ -112,16 +112,16 @@ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, size_t wr_offset)
ring->id < (mhi_ctx->ev_ring_start +
mhi_ctx->cfg.event_rings)) {
mhi_log(MHI_MSG_VERBOSE,
"not caching event ring %d\n", ring->id);
"not caching event ring_id:%d\n", ring->id);
return 0;
}
mhi_log(MHI_MSG_VERBOSE, "caching ring %d, start %lu, end %lu\n",
mhi_log(MHI_MSG_VERBOSE, "caching ring_id:%d, start %lu, end %lu\n",
ring->id, old_offset, wr_offset);
if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
mhi_log(MHI_MSG_ERROR,
"failed to fetch elements for ring %d, start %lu, end %lu\n",
"failed to fetch elements for ring_id:%d, start %lu, end %lu\n",
ring->id, old_offset, wr_offset);
return -EINVAL;
}
@ -143,28 +143,28 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
case RING_TYPE_CMD:
rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
if (rc) {
pr_err("%s: CMD DB read failed\n", __func__);
mhi_log(MHI_MSG_ERROR, "CMD DB read failed\n");
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"ring %d wr_offset from db 0x%lx\n",
"ring_id:%d wr_offset from db 0x%lx\n",
ring->id, (size_t) wr_offset);
break;
case RING_TYPE_ER:
rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
if (rc) {
pr_err("%s: EVT DB read failed\n", __func__);
mhi_log(MHI_MSG_ERROR, "EVT DB read failed\n");
return rc;
}
break;
case RING_TYPE_CH:
rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
if (rc) {
pr_err("%s: CH DB read failed\n", __func__);
mhi_log(MHI_MSG_ERROR, "CH DB read failed\n");
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"ring %d wr_offset from db 0x%lx\n",
"ring_id:%d wr_offset from db 0x%lx\n",
ring->id, (size_t) wr_offset);
break;
default:
@ -192,14 +192,17 @@ int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, size_t offset)
/* get the element and invoke the respective callback */
el = &ring->ring_cache[offset];
mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr);
mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, offset:%lu\n",
if (ring->type == RING_TYPE_CH) {
mhi_log(MHI_MSG_VERBOSE, "TRE data buff ptr : 0x%llx\n",
el->tre.data_buf_ptr);
mhi_log(MHI_MSG_VERBOSE, "TRE len : 0x%x, rd_offset:%lu\n",
el->tre.len, offset);
}
if (ring->ring_cb)
return ring->ring_cb(ring->mhi_dev, el, (void *)ring);
else
mhi_log(MHI_MSG_ERROR, "No callback registered for ring %d\n",
mhi_log(MHI_MSG_ERROR, "No callback registered for ring_id:%d\n",
ring->id);
return 0;
@ -209,54 +212,52 @@ EXPORT_SYMBOL(mhi_dev_process_ring_element);
int mhi_dev_process_ring(struct mhi_dev_ring *ring)
{
int rc = 0;
union mhi_dev_ring_element_type *el;
if (WARN_ON(!ring))
return -EINVAL;
mhi_log(MHI_MSG_VERBOSE,
"Before wr update ring_id (%d) element (%lu) with wr:%lu\n",
"Before wr update ring_id:%d rp:%lu wp:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
rc = mhi_dev_update_wr_offset(ring);
if (rc) {
mhi_log(MHI_MSG_ERROR,
"Error updating write-offset for ring %d\n",
"Error updating write-offset for ring_id:%d\n",
ring->id);
return rc;
}
/* get the element and invoke the respective callback */
el = &ring->ring_cache[ring->wr_offset];
mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr);
mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, wr_offset:%lu\n",
el->tre.len, ring->wr_offset);
if (ring->type == RING_TYPE_CH) {
/* notify the clients that there are elements in the ring */
rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
if (rc)
pr_err("Error fetching elements\n");
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"After ring update ring_id (%d) element (%lu) with wr:%lu\n",
"After wp update ring_id:%d rp:%lu with wr:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
/*
* Notify the clients that there are elements in the ring.
* For channels, simply notify client for the first element (no need to
* notify for all the elements) and return (no need to update rd
* pointer). When client consumes the elements, rp will be updated.
*/
if (ring->type == RING_TYPE_CH) {
rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
if (rc)
mhi_log(MHI_MSG_ERROR, "Error fetching elements\n");
return rc;
}
while (ring->rd_offset != ring->wr_offset) {
mhi_log(MHI_MSG_VERBOSE,
"Processing ring_id:%d rd_offset:%lu, wr_offset:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
if (rc) {
mhi_log(MHI_MSG_ERROR,
"Error processing ring (%d) element (%lu)\n",
"Error processing ring_id:%d element(rp):%lu\n",
ring->id, ring->rd_offset);
return rc;
}
mhi_log(MHI_MSG_VERBOSE,
"Processing ring (%d) rd_offset:%lu, wr_offset:%lu\n",
ring->id, ring->rd_offset, ring->wr_offset);
mhi_dev_ring_inc_index(ring, ring->rd_offset);
}
@ -297,7 +298,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
ring->wr_offset - 1;
if (num_free_elem < num_elem) {
mhi_log(MHI_MSG_ERROR, "No space to add %d elem in ring (%d)\n",
mhi_log(MHI_MSG_ERROR, "No space to add %d elem in ring_id:%d\n",
num_elem, ring->id);
return -EINVAL;
}
@ -312,8 +313,9 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
mhi_dev_ring_inc_index(ring, ring->rd_offset);
mhi_log(MHI_MSG_VERBOSE,
"Writing %d elements, ring old 0x%x, new 0x%x\n",
num_elem, old_offset, ring->rd_offset);
"Writing %d elements in ring_id:%d\t"
"ring old-offset 0x%x, new-offset 0x%x\n",
num_elem, ring->id, old_offset, ring->rd_offset);
ring->ring_ctx->generic.rp = (ring->rd_offset *
sizeof(union mhi_dev_ring_element_type)) +
@ -335,7 +337,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
host_addr.virt_addr = element;
host_addr.size = sizeof(union mhi_dev_ring_element_type);
mhi_log(MHI_MSG_VERBOSE, "adding element to ring (%d)\n",
mhi_log(MHI_MSG_VERBOSE, "adding element to ring_id:%d\n",
ring->id);
mhi_log(MHI_MSG_VERBOSE, "rd_ofset %lu\n", ring->rd_offset);
mhi_log(MHI_MSG_VERBOSE, "type %d\n", element->generic.type);
@ -355,7 +357,7 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
(element + i)->evt_tr_comp.code);
mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n",
(element + i)->evt_tr_comp.type);
mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n",
mhi_log(MHI_MSG_VERBOSE, "evnt ch_id :0x%x\n",
(element + i)->evt_tr_comp.chid);
}
/* Adding multiple ring elements */
@ -490,19 +492,6 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;
if (ring->type == RING_TYPE_ER)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
(ring->id - mhi->ev_ring_start) *
sizeof(union mhi_dev_ring_ctx));
else if (ring->type == RING_TYPE_CMD)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
else if (ring->type == RING_TYPE_CH)
ring->ring_ctx_shadow =
(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));
ring->ring_ctx_shadow = ring->ring_ctx;
if (ring->type != RING_TYPE_ER || ring->type != RING_TYPE_CH) {
@ -574,7 +563,7 @@ void mhi_ring_set_state(struct mhi_dev_ring *ring,
return;
if (state > RING_STATE_PENDING) {
pr_err("%s: Invalid ring state\n", __func__);
mhi_log(MHI_MSG_ERROR, "Invalid ring state\n");
return;
}

View File

@ -55,7 +55,7 @@ enum uci_dbg_level {
UCI_DBG_reserved = 0x80000000
};
static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL;
static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_ERROR;
static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO;
static void *mhi_uci_ipc_log;
@ -452,7 +452,7 @@ static bool mhi_uci_are_channels_connected(struct uci_client *uci_client)
rc = mhi_ctrl_state_info(uci_client->in_chan, &info_ch_in);
if (rc) {
uci_log(UCI_DBG_DBG,
"Channels %d is not available with %d\n",
"ch_id:%d is not available with %d\n",
uci_client->out_chan, rc);
return false;
}
@ -460,7 +460,7 @@ static bool mhi_uci_are_channels_connected(struct uci_client *uci_client)
rc = mhi_ctrl_state_info(uci_client->out_chan, &info_ch_out);
if (rc) {
uci_log(UCI_DBG_DBG,
"Channels %d is not available with %d\n",
"ch_id:%d is not available with %d\n",
uci_client->out_chan, rc);
return false;
}
@ -468,7 +468,7 @@ static bool mhi_uci_are_channels_connected(struct uci_client *uci_client)
if ((info_ch_in != MHI_STATE_CONNECTED) ||
(info_ch_out != MHI_STATE_CONNECTED)) {
uci_log(UCI_DBG_DBG,
"Channels %d or %d are not connected\n",
"ch_id:%d or %d are not connected\n",
uci_client->in_chan, uci_client->out_chan);
return false;
}
@ -490,13 +490,13 @@ static int mhi_init_read_chan(struct uci_client *client_handle,
return -EINVAL;
}
if (chan >= MHI_MAX_SOFTWARE_CHANNELS) {
uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan);
uci_log(UCI_DBG_ERROR, "Incorrect ch_id:%d\n", chan);
return -EINVAL;
}
in_chan_attr = client_handle->in_chan_attr;
if (!in_chan_attr) {
uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n",
uci_log(UCI_DBG_ERROR, "Null channel attributes for ch_id:%d\n",
client_handle->in_chan);
return -EINVAL;
}
@ -531,7 +531,7 @@ static struct mhi_req *mhi_uci_get_req(struct uci_client *uci_handle)
spin_lock_irqsave(&uci_handle->req_lock, flags);
if (list_empty(&uci_handle->req_list)) {
uci_log(UCI_DBG_ERROR, "Request pool empty for chans %d, %d\n",
uci_log(UCI_DBG_ERROR, "Request pool empty for ch_id:%d, %d\n",
uci_handle->in_chan, uci_handle->out_chan);
spin_unlock_irqrestore(&uci_handle->req_lock, flags);
return NULL;
@ -546,7 +546,7 @@ static struct mhi_req *mhi_uci_get_req(struct uci_client *uci_handle)
* req is re-used
*/
if (req->is_stale && req->buf && MHI_UCI_IS_CHAN_DIR_IN(req->chan)) {
uci_log(UCI_DBG_VERBOSE, "Freeing write buf for chan %d\n",
uci_log(UCI_DBG_VERBOSE, "Freeing write buf for ch_id:%d\n",
req->chan);
kfree(req->buf);
}
@ -565,7 +565,7 @@ static int mhi_uci_put_req(struct uci_client *uci_handle, struct mhi_req *req)
spin_lock_irqsave(&uci_handle->req_lock, flags);
if (req->is_stale) {
uci_log(UCI_DBG_VERBOSE,
"Got stale completion for ch %d, ignoring\n",
"Got stale completion for ch_id:%d, ignoring\n",
req->chan);
spin_unlock_irqrestore(&uci_handle->req_lock, flags);
return -EINVAL;
@ -630,7 +630,8 @@ static int mhi_uci_send_sync(struct uci_client *uci_handle,
int ret_val;
uci_log(UCI_DBG_VERBOSE,
"Sync write for ch %d size %d\n", uci_handle->out_chan, size);
"Sync write for ch_id:%d size %d\n",
uci_handle->out_chan, size);
ureq.client = uci_handle->out_handle;
ureq.buf = data_loc;
@ -652,7 +653,7 @@ static int mhi_uci_send_async(struct uci_client *uci_handle,
struct mhi_req *ureq;
uci_log(UCI_DBG_DBG,
"Async write for ch %d size %d\n",
"Async write for ch_id:%d size %d\n",
uci_handle->out_chan, size);
ureq = mhi_uci_get_req(uci_handle);
@ -691,7 +692,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc,
ret_val = uci_handle->send(uci_handle, data_loc, size);
if (!ret_val) {
uci_log(UCI_DBG_VERBOSE,
"No descriptors available, did we poll, chan %d?\n",
"No descriptors available, did we poll, ch_id:%d?\n",
uci_handle->out_chan);
mutex_unlock(&uci_handle->out_chan_lock);
if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
@ -711,7 +712,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc,
* Wait till pending writes complete or a timeout.
*/
uci_log(UCI_DBG_VERBOSE,
"Write req list empty for chan %d\n",
"Write req list empty for ch_id:%d\n",
uci_handle->out_chan);
mutex_unlock(&uci_handle->out_chan_lock);
if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
@ -726,14 +727,14 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc,
* retry the write.
*/
uci_log(UCI_DBG_VERBOSE,
"Write req struct available for chan %d\n",
"Write req struct available for ch_id:%d\n",
uci_handle->out_chan);
mutex_lock(&uci_handle->out_chan_lock);
ret_val = 0;
continue;
} else if (!ret_val) {
uci_log(UCI_DBG_ERROR,
"Timed out waiting for write req, chan %d\n",
"Timed out waiting for write req, ch_id:%d\n",
uci_handle->out_chan);
return -EIO;
} else if (-ERESTARTSYS == ret_val) {
@ -743,7 +744,7 @@ static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc,
}
} else if (ret_val < 0) {
uci_log(UCI_DBG_ERROR,
"Err sending data: chan %d, buf %pK, size %d\n",
"Err sending data: ch_id:%d, buf %pK, size %d\n",
uci_handle->out_chan, data_loc, size);
ret_val = -EIO;
break;
@ -807,18 +808,18 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
if (!atomic_read(&uci_ctxt.mhi_disabled) &&
!mhi_dev_channel_isempty(uci_handle->in_handle)) {
uci_log(UCI_DBG_VERBOSE,
"Client can read chan %d\n", uci_handle->in_chan);
"Client can read ch_id:%d\n", uci_handle->in_chan);
mask |= POLLIN | POLLRDNORM;
}
if (!atomic_read(&uci_ctxt.mhi_disabled) &&
!mhi_dev_channel_isempty(uci_handle->out_handle)) {
uci_log(UCI_DBG_VERBOSE,
"Client can write chan %d\n", uci_handle->out_chan);
"Client can write ch_id:%d\n", uci_handle->out_chan);
mask |= POLLOUT | POLLWRNORM;
}
uci_log(UCI_DBG_VERBOSE,
"Client attempted to poll chan %d, returning mask 0x%x\n",
"Client attempted to poll ch_id:%d, returning mask 0x%x\n",
uci_handle->in_chan, mask);
mutex_unlock(&uci_handle->client_lock);
@ -853,7 +854,7 @@ static int mhi_uci_alloc_reqs(struct uci_client *client)
list_add_tail(&client->reqs[i].list, &client->req_list);
uci_log(UCI_DBG_INFO,
"Allocated %d write reqs for chan %d\n",
"Allocated %d write reqs for ch_id:%d\n",
num_reqs, client->out_chan);
return 0;
}
@ -866,12 +867,12 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, int *bytes_avail)
struct mhi_dev_client *client_handle;
uci_log(UCI_DBG_DBG,
"Async read for ch %d\n", uci_handle->in_chan);
"Async read for ch_id:%d\n", uci_handle->in_chan);
ureq = mhi_uci_get_req(uci_handle);
if (!ureq) {
uci_log(UCI_DBG_ERROR,
"Out of reqs for chan %d\n", uci_handle->in_chan);
"Out of reqs for ch_id:%d\n", uci_handle->in_chan);
return -EBUSY;
}
@ -917,14 +918,14 @@ static int mhi_uci_read_async(struct uci_client *uci_handle, int *bytes_avail)
uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
return compl_ret;
} else if (compl_ret == 0) {
uci_log(UCI_DBG_ERROR, "Read timed out for ch %d\n",
uci_log(UCI_DBG_ERROR, "Read timed out for ch_id:%d\n",
uci_handle->in_chan);
return -EIO;
}
uci_log(UCI_DBG_VERBOSE,
"wk up Read completed on ch %d\n", uci_handle->in_chan);
"wk up Read completed on ch_id:%d\n", uci_handle->in_chan);
uci_log(UCI_DBG_VERBOSE,
"Got pkt of sz 0x%lx at adr %pK, ch %d\n",
"Got pkt of sz 0x%lx at adr %pK, ch_id:%d\n",
uci_handle->pkt_size,
uci_handle->pkt_loc, uci_handle->in_chan);
} else {
@ -944,8 +945,8 @@ static int mhi_uci_read_sync(struct uci_client *uci_handle, int *bytes_avail)
struct mhi_req ureq;
struct mhi_dev_client *client_handle;
uci_log(UCI_DBG_ERROR,
"Sync read for ch %d\n", uci_handle->in_chan);
uci_log(UCI_DBG_INFO,
"Sync read for ch_id:%d\n", uci_handle->in_chan);
client_handle = uci_handle->in_handle;
ureq.chan = uci_handle->in_chan;
@ -969,7 +970,7 @@ static int mhi_uci_read_sync(struct uci_client *uci_handle, int *bytes_avail)
uci_handle->pkt_size = ureq.transfer_len;
uci_log(UCI_DBG_VERBOSE,
"Got pkt of sz 0x%lx at adr %pK, ch %d\n",
"Got pkt of sz 0x%lx at adr %pK, ch_id:%d\n",
uci_handle->pkt_size,
ureq.buf, ureq.chan);
} else {
@ -985,13 +986,12 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
int rc = 0;
if (!mhi_uci_are_channels_connected(uci_client)) {
uci_log(UCI_DBG_ERROR, "%s:Channels are not connected\n",
__func__);
uci_log(UCI_DBG_ERROR, "Channels are not connected\n");
return -ENODEV;
}
uci_log(UCI_DBG_DBG,
"Starting channels %d %d.\n",
"Starting channels OUT ch_id:%d IN ch_id:%d\n",
uci_client->out_chan,
uci_client->in_chan);
mutex_lock(&uci_client->out_chan_lock);
@ -1010,7 +1010,7 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
}
uci_log(UCI_DBG_DBG,
"Initializing inbound chan %d.\n",
"Initializing inbound ch_id:%d.\n",
uci_client->in_chan);
rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
if (rc < 0) {
@ -1031,7 +1031,7 @@ static int open_client_mhi_channels(struct uci_client *uci_client)
uci_ctxt.event_notifier);
if (rc < 0) {
uci_log(UCI_DBG_ERROR,
"Failed to open chan %d, ret %d\n",
"Failed to open ch_id:%d, ret %d\n",
uci_client->out_chan, rc);
goto handle_in_err;
}
@ -1134,14 +1134,14 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
mutex_lock(&uci_handle->client_lock);
in_chan_attr = uci_handle->in_chan_attr;
if (!in_chan_attr) {
uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n",
uci_log(UCI_DBG_ERROR, "Null channel attributes for ch_id:%d\n",
uci_handle->in_chan);
mutex_unlock(&uci_handle->client_lock);
return -EINVAL;
}
if (atomic_sub_return(1, &uci_handle->ref_count)) {
uci_log(UCI_DBG_DBG, "Client close chan %d, ref count 0x%x\n",
uci_log(UCI_DBG_DBG, "Client close ch_id:%d, ref count 0x%x\n",
iminor(mhi_inode),
atomic_read(&uci_handle->ref_count));
mutex_unlock(&uci_handle->client_lock);
@ -1149,7 +1149,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
}
uci_log(UCI_DBG_DBG,
"Last client left, closing channel 0x%x\n",
"Last client left, closing ch 0x%x\n",
iminor(mhi_inode));
do {
@ -1161,7 +1161,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
} while (++count < MHI_UCI_RELEASE_TIMEOUT_COUNT);
if (count == MHI_UCI_RELEASE_TIMEOUT_COUNT) {
uci_log(UCI_DBG_DBG, "Channel %d has pending writes\n",
uci_log(UCI_DBG_DBG, "ch_id:%d has pending writes\n",
iminor(mhi_inode));
}
@ -1192,7 +1192,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
list_del_init(&ureq->list);
ureq->is_stale = true;
uci_log(UCI_DBG_VERBOSE,
"Adding back req for chan %d to free list\n",
"Adding back req for ch_id:%d to free list\n",
ureq->chan);
list_add_tail(&ureq->list, &uci_handle->req_list);
count++;
@ -1247,7 +1247,7 @@ static int mhi_state_uevent(struct device *dev, struct kobj_uevent_env *env)
rc = mhi_ctrl_state_info(MHI_DEV_UEVENT_CTRL, &info);
if (rc) {
pr_err("Failed to obtain MHI_STATE\n");
uci_log(UCI_DBG_ERROR, "Failed to obtain MHI_STATE\n");
return -EINVAL;
}
@ -1257,12 +1257,13 @@ static int mhi_state_uevent(struct device *dev, struct kobj_uevent_env *env)
for (i = 0; i < ARRAY_SIZE(mhi_chan_attr_table); i++) {
chan_attrib = &mhi_chan_attr_table[i];
if (chan_attrib->state_bcast) {
uci_log(UCI_DBG_ERROR, "Calling notify for ch %d\n",
uci_log(UCI_DBG_INFO, "Calling notify for ch_id:%d\n",
chan_attrib->chan_id);
rc = mhi_ctrl_state_info(chan_attrib->chan_id, &info);
if (rc) {
pr_err("Failed to obtain channel %d state\n",
chan_attrib->chan_id);
uci_log(UCI_DBG_ERROR,
"Failed to obtain ch_id:%d state\n",
chan_attrib->chan_id);
return -EINVAL;
}
nbytes = 0;
@ -1307,7 +1308,7 @@ static ssize_t mhi_uci_ctrl_client_read(struct file *file,
"MHI_STATE=DISCONNECTED");
break;
default:
pr_err("invalid info:%d\n", info);
uci_log(UCI_DBG_ERROR, "invalid info:%d\n", info);
return -EINVAL;
}
@ -1329,8 +1330,7 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle,
do {
if (!mhi_uci_are_channels_connected(uci_handle)) {
uci_log(UCI_DBG_ERROR,
"%s:Channels are not connected\n", __func__);
uci_log(UCI_DBG_ERROR, "Channels are not connected\n");
return -ENODEV;
}
@ -1344,7 +1344,7 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle,
/* If nothing was copied yet, wait for data */
uci_log(UCI_DBG_VERBOSE,
"No data read_data_ready %d, chan %d\n",
"No data read_data_ready %d, ch_id:%d\n",
atomic_read(&uci_handle->read_data_ready),
uci_handle->in_chan);
if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
@ -1360,13 +1360,13 @@ static int __mhi_uci_client_read(struct uci_client *uci_handle,
}
uci_log(UCI_DBG_VERBOSE,
"wk up Got data on ch %d read_data_ready %d\n",
"wk up Got data on ch_id:%d read_data_ready %d\n",
uci_handle->in_chan,
atomic_read(&uci_handle->read_data_ready));
} else if (*bytes_avail > 0) {
/* A valid packet was returned from MHI */
uci_log(UCI_DBG_VERBOSE,
"Got packet: avail pkts %d phy_adr %pK, ch %d\n",
"Got packet: avail pkts %d phy_adr %pK, ch_id:%d\n",
atomic_read(&uci_handle->read_data_ready),
uci_handle->pkt_loc,
uci_handle->in_chan);
@ -1399,7 +1399,7 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
mutex = &uci_handle->in_chan_lock;
mutex_lock(mutex);
uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
uci_log(UCI_DBG_VERBOSE, "Client attempted read on ch_id:%d\n",
uci_handle->in_chan);
ret_val = __mhi_uci_client_read(uci_handle, &bytes_avail);
@ -1419,7 +1419,7 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
bytes_copied = *bytes_pending;
*bytes_pending = 0;
uci_log(UCI_DBG_VERBOSE, "Copied 0x%lx of 0x%x, chan %d\n",
uci_log(UCI_DBG_VERBOSE, "Copied 0x%lx of 0x%x, ch_id:%d\n",
bytes_copied, (u32)*bytes_pending, uci_handle->in_chan);
} else {
addr_offset = uci_handle->pkt_size - *bytes_pending;
@ -1430,7 +1430,7 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
}
bytes_copied = uspace_buf_size;
*bytes_pending -= uspace_buf_size;
uci_log(UCI_DBG_VERBOSE, "Copied 0x%lx of 0x%x,chan %d\n",
uci_log(UCI_DBG_VERBOSE, "Copied 0x%lx of 0x%x,ch_id:%d\n",
bytes_copied,
(u32)*bytes_pending,
uci_handle->in_chan);
@ -1438,7 +1438,7 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
/* We finished with this buffer, map it back */
if (*bytes_pending == 0) {
uci_log(UCI_DBG_VERBOSE,
"All data consumed. Pkt loc %p ,chan %d\n",
"All data consumed. Pkt loc %p ,ch_id:%d\n",
uci_handle->pkt_loc, uci_handle->in_chan);
uci_handle->pkt_loc = 0;
uci_handle->pkt_size = 0;
@ -1481,8 +1481,7 @@ static ssize_t mhi_uci_client_write(struct file *file,
}
if (!mhi_uci_are_channels_connected(uci_handle)) {
uci_log(UCI_DBG_ERROR, "%s:Channels are not connected\n",
__func__);
uci_log(UCI_DBG_ERROR, "Channels are not connected\n");
return -ENODEV;
}
@ -1541,8 +1540,7 @@ static ssize_t mhi_uci_client_write_iter(struct kiocb *iocb,
}
if (!mhi_uci_are_channels_connected(uci_handle)) {
uci_log(UCI_DBG_ERROR, "%s:Channels are not connected\n",
__func__);
uci_log(UCI_DBG_ERROR, "Channels are not connected\n");
return -ENODEV;
}
@ -1581,7 +1579,7 @@ void mhi_uci_chan_state_notify_all(struct mhi_dev *mhi,
for (i = 0; i < ARRAY_SIZE(mhi_chan_attr_table); i++) {
chan_attrib = &mhi_chan_attr_table[i];
if (chan_attrib->state_bcast) {
uci_log(UCI_DBG_ERROR, "Calling notify for ch %d\n",
uci_log(UCI_DBG_ERROR, "Calling notify for ch_id:%d\n",
chan_attrib->chan_id);
mhi_uci_chan_state_notify(mhi, chan_attrib->chan_id,
ch_state);
@ -1598,14 +1596,14 @@ void mhi_uci_chan_state_notify(struct mhi_dev *mhi,
int rc;
if (ch_id < 0 || ch_id >= MHI_MAX_SOFTWARE_CHANNELS) {
uci_log(UCI_DBG_ERROR, "Invalid chan %d\n", ch_id);
uci_log(UCI_DBG_ERROR, "Invalid ch_id:%d\n", ch_id);
return;
}
uci_handle = &uci_ctxt.client_handles[CHAN_TO_CLIENT(ch_id)];
if (!uci_handle->out_chan_attr ||
!uci_handle->out_chan_attr->state_bcast) {
uci_log(UCI_DBG_VERBOSE, "Uevents not enabled for chan %d\n",
uci_log(UCI_DBG_VERBOSE, "Uevents not enabled for ch_id:%d\n",
ch_id);
return;
}
@ -1631,7 +1629,7 @@ void mhi_uci_chan_state_notify(struct mhi_dev *mhi,
rc = kobject_uevent_env(&mhi->dev->kobj, KOBJ_CHANGE, buf);
if (rc)
uci_log(UCI_DBG_ERROR,
"Sending uevent failed for chan %d\n", ch_id);
"Sending uevent failed for ch_id:%d\n", ch_id);
if (ch_state == MHI_STATE_DISCONNECTED &&
!atomic_read(&uci_handle->ref_count)) {
@ -1651,7 +1649,7 @@ void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason)
if (reason->reason == MHI_DEV_CTRL_UPDATE) {
uci_ctrl_handle = &uci_ctxt.ctrl_handle;
if (!uci_ctrl_handle) {
pr_err("Invalid uci ctrl handle\n");
uci_log(UCI_DBG_ERROR, "Invalid uci ctrl handle\n");
return;
}
@ -1677,7 +1675,7 @@ static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason)
uci_handle->out_chan_attr->tre_notif_cb(reason);
} else if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
uci_log(UCI_DBG_DBG,
"recived TRE available event for chan %d\n",
"recived TRE available event for ch_id:%d\n",
uci_handle->in_chan);
if (reason->ch_id % 2) {
atomic_set(&uci_handle->write_data_ready, 1);
@ -1702,7 +1700,7 @@ static int mhi_register_client(struct uci_client *mhi_client, int index)
/* Init the completion event for AT ctrl read */
init_completion(&mhi_client->at_ctrl_read_done);
uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
uci_log(UCI_DBG_DBG, "Registering ch_id:%d.\n", mhi_client->out_chan);
return 0;
}
@ -2069,7 +2067,7 @@ static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data)
int rc, i;
struct mhi_req *ureq;
uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n",
uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for ch_id:%d, state %d\n",
cb_data->channel, cb_data->ctrl_info);
if (cb_data->ctrl_info == MHI_STATE_CONNECTED) {
@ -2120,7 +2118,7 @@ static void mhi_uci_generic_client_cb(struct mhi_dev_client_cb_data *cb_data)
{
struct uci_client *client = cb_data->user_data;
uci_log(UCI_DBG_DBG, "Rcvd MHI cb for channel %d, state %d\n",
uci_log(UCI_DBG_DBG, "Rcvd MHI cb for ch_id:%d, state %d\n",
cb_data->channel, cb_data->ctrl_info);
if (cb_data->ctrl_info == MHI_STATE_CONNECTED)
@ -2254,7 +2252,7 @@ int mhi_uci_init(void)
/* Control node */
uci_ctxt.cdev_ctrl = cdev_alloc();
if (uci_ctxt.cdev_ctrl == NULL) {
pr_err("%s: ctrl cdev alloc failed\n", __func__);
uci_log(UCI_DBG_ERROR, "ctrl cdev alloc failed\n");
return 0;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2022 The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
@ -8047,6 +8047,9 @@ static void smblib_lpd_ra_open_work(struct work_struct *work)
if (!(stat & TYPEC_WATER_DETECTION_STATUS_BIT)
|| (stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)) {
chg->lpd_stage = LPD_STAGE_NONE;
/* Remove LPD_VOTER from ICL is moisture status is gone in attached state. */
vote(chg->usb_icl_votable, LPD_VOTER, false, 0);
goto out;
}

View File

@ -400,6 +400,17 @@ bool icnss_is_fw_down(void)
}
EXPORT_SYMBOL(icnss_is_fw_down);
unsigned long icnss_get_device_config(void)
{
struct icnss_priv *priv = icnss_get_plat_priv();
if (!priv)
return 0;
return priv->device_config;
}
EXPORT_SYMBOL(icnss_get_device_config);
bool icnss_is_rejuvenate(void)
{
if (!penv)
@ -3905,6 +3916,14 @@ static void icnss_init_control_params(struct icnss_priv *priv)
}
}
static void icnss_read_device_configs(struct icnss_priv *priv)
{
if (of_property_read_bool(priv->pdev->dev.of_node,
"wlan-ipa-disabled")) {
set_bit(ICNSS_IPA_DISABLED, &priv->device_config);
}
}
static inline void icnss_get_smp2p_info(struct icnss_priv *priv)
{
@ -4007,6 +4026,8 @@ static int icnss_probe(struct platform_device *pdev)
icnss_init_control_params(priv);
icnss_read_device_configs(priv);
ret = icnss_resource_parse(priv);
if (ret)
goto out_reset_drvdata;

View File

@ -465,6 +465,7 @@ struct icnss_priv {
struct icnss_dms_data dms;
u8 use_nv_mac;
u32 wlan_en_delay_ms;
unsigned long device_config;
};
struct icnss_reg_info {

View File

@ -621,7 +621,6 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
{
int ret = 0;
struct spcom_user_create_channel_command *cmd = cmd_buf;
const size_t maxlen = sizeof(cmd->ch_name);
if (cmd_size != sizeof(*cmd)) {
spcom_pr_err("cmd_size [%d] , expected [%d]\n",
@ -629,11 +628,6 @@ static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
return -EINVAL;
}
if (strnlen(cmd->ch_name, maxlen) == maxlen) {
spcom_pr_err("channel name is not NULL terminated\n");
return -EINVAL;
}
mutex_lock(&spcom_dev->chdev_count_lock);
ret = spcom_create_channel_chardev(cmd->ch_name, cmd->is_sharable);
mutex_unlock(&spcom_dev->chdev_count_lock);
@ -2003,6 +1997,12 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
void *priv;
struct cdev *cdev;
if (!name || strnlen(name, SPCOM_CHANNEL_NAME_SIZE) ==
SPCOM_CHANNEL_NAME_SIZE) {
spcom_pr_err("invalid channel name\n");
return -EINVAL;
}
spcom_pr_dbg("creating channel [%s]\n", name);
ch = spcom_find_channel_by_name(name);
@ -2037,7 +2037,12 @@ static int spcom_create_channel_chardev(const char *name, bool is_sharable)
devt = spcom_dev->device_no + spcom_dev->chdev_count;
priv = ch;
dev = device_create(cls, parent, devt, priv, name);
/*
* Pass channel name as formatted string to avoid abuse by using a
* formatted string as channel name
*/
dev = device_create(cls, parent, devt, priv, "%s", name);
if (IS_ERR(dev)) {
spcom_pr_err("device_create failed\n");
ret = -ENODEV;

View File

@ -1,26 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2016, Linaro Limited
* Copyright (c) 2015-2017, 2019-2021 Linaro Limited
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/fdtable.h>
#include <linux/idr.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include "tee_private.h"
static void tee_shm_release(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
struct tee_device *teedev = shm->teedev;
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
if (shm->ctx)
list_del(&shm->link);
mutex_unlock(&teedev->mutex);
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@ -52,51 +44,6 @@ static void tee_shm_release(struct tee_shm *shm)
tee_device_put(teedev);
}
static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
*attach, enum dma_data_direction dir)
{
return NULL;
}
static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *table,
enum dma_data_direction dir)
{
}
static void tee_shm_op_release(struct dma_buf *dmabuf)
{
struct tee_shm *shm = dmabuf->priv;
tee_shm_release(shm);
}
static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
{
return NULL;
}
static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct tee_shm *shm = dmabuf->priv;
size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_REGISTER)
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
size, vma->vm_page_prot);
}
static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.map_dma_buf = tee_shm_op_map_dma_buf,
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
.release = tee_shm_op_release,
.map = tee_shm_op_map,
.mmap = tee_shm_op_mmap,
};
static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
struct tee_device *teedev,
size_t size, u32 flags)
@ -137,6 +84,7 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
goto err_dev_put;
}
refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_POOL;
shm->teedev = teedev;
shm->ctx = ctx;
@ -159,21 +107,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
goto err_pool_free;
}
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
exp_info.priv = shm;
shm->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(shm->dmabuf)) {
ret = ERR_CAST(shm->dmabuf);
goto err_rem;
}
}
if (ctx) {
teedev_ctx_get(ctx);
mutex_lock(&teedev->mutex);
@ -182,10 +115,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
}
return shm;
err_rem:
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
@ -268,6 +197,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_REGISTER;
shm->teedev = teedev;
shm->ctx = ctx;
@ -309,22 +239,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
exp_info.priv = shm;
shm->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(shm->dmabuf)) {
ret = ERR_CAST(shm->dmabuf);
teedev->desc->ops->shm_unregister(ctx, shm);
goto err;
}
}
mutex_lock(&teedev->mutex);
list_add_tail(&shm->link, &ctx->list_shm);
mutex_unlock(&teedev->mutex);
@ -352,6 +266,35 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
EXPORT_SYMBOL_GPL(tee_shm_register);
static int tee_shm_fop_release(struct inode *inode, struct file *filp)
{
tee_shm_put(filp->private_data);
return 0;
}
static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct tee_shm *shm = filp->private_data;
size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
size, vma->vm_page_prot);
}
static const struct file_operations tee_shm_fops = {
.owner = THIS_MODULE,
.release = tee_shm_fop_release,
.mmap = tee_shm_fop_mmap,
};
/**
* tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle
@ -364,10 +307,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
get_dma_buf(shm->dmabuf);
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
/* matched by tee_shm_put() in tee_shm_op_release() */
refcount_inc(&shm->refcount);
fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
if (fd < 0)
dma_buf_put(shm->dmabuf);
tee_shm_put(shm);
return fd;
}
@ -377,17 +321,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
*/
void tee_shm_free(struct tee_shm *shm)
{
/*
* dma_buf_put() decreases the dmabuf reference counter and will
* call tee_shm_release() when the last reference is gone.
*
* In the case of driver private memory we call tee_shm_release
* directly instead as it doesn't have a reference counter.
*/
if (shm->flags & TEE_SHM_DMA_BUF)
dma_buf_put(shm->dmabuf);
else
tee_shm_release(shm);
tee_shm_put(shm);
}
EXPORT_SYMBOL_GPL(tee_shm_free);
@ -494,10 +428,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
teedev = ctx->teedev;
mutex_lock(&teedev->mutex);
shm = idr_find(&teedev->idr, id);
/*
* If the tee_shm was found in the IDR it must have a refcount
* larger than 0 due to the guarantee in tee_shm_put() below. So
* it's safe to use refcount_inc().
*/
if (!shm || shm->ctx != ctx)
shm = ERR_PTR(-EINVAL);
else if (shm->flags & TEE_SHM_DMA_BUF)
get_dma_buf(shm->dmabuf);
else
refcount_inc(&shm->refcount);
mutex_unlock(&teedev->mutex);
return shm;
}
@ -509,7 +448,25 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
*/
void tee_shm_put(struct tee_shm *shm)
{
if (shm->flags & TEE_SHM_DMA_BUF)
dma_buf_put(shm->dmabuf);
struct tee_device *teedev = shm->teedev;
bool do_release = false;
mutex_lock(&teedev->mutex);
if (refcount_dec_and_test(&shm->refcount)) {
/*
* refcount has reached 0, we must now remove it from the
* IDR before releasing the mutex. This will guarantee that
* the refcount_inc() in tee_shm_get_from_id() never starts
* from 0.
*/
idr_remove(&teedev->idr, shm->id);
if (shm->ctx)
list_del(&shm->link);
do_release = true;
}
mutex_unlock(&teedev->mutex);
if (do_release)
tee_shm_release(teedev, shm);
}
EXPORT_SYMBOL_GPL(tee_shm_put);

View File

@ -1157,11 +1157,27 @@ int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC31_LCSR_TX_DEEMPH_3(0),
dwc->gen2_tx_de_emph3 & DWC31_TX_DEEMPH_MASK);
/* set inter-packet gap 199.794ns to improve EL_23 margin */
/*
* Set inter-packet gap 199.794ns to improve EL_23 margin.
*
* STAR 9001346572: Host: When a Single USB 2.0 Endpoint Receives NAKs Continuously, Host
* Stops Transfers to Other Endpoints. When an active endpoint that is not currently cached
* in the host controller is chosen to be cached to the same cache index as the endpoint
* that receives NAK, The endpoint that receives the NAK responses would be in continuous
* retry mode that would prevent it from getting evicted out of the host controller cache.
* This would prevent the new endpoint to get into the endpoint cache and therefore service
* to this endpoint is not done.
* The workaround is to disable lower layer LSP retrying the USB2.0 NAKed transfer. Forcing
* this to LSP upper layer allows next EP to evict the stuck EP from cache.
*/
if (dwc->revision >= DWC3_USB31_REVISION_170A) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
reg |= DWC3_GUCTL1_IP_GAP_ADD_ON(1);
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE;
dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
}
return 0;

View File

@ -413,6 +413,7 @@
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
/* Global User Control Register 3 */
#define DWC3_GUCTL3_USB20_RETRY_DISABLE BIT(16)
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
/* Device Configuration Register */

View File

@ -2004,6 +2004,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
if (interface >= MAX_CONFIG_INTERFACES ||
!os_desc_cfg->interface[interface])
break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Gadget Function Driver for Android USB accessories
*
@ -142,12 +143,62 @@ static struct usb_interface_descriptor acc_interface_desc = {
.bInterfaceProtocol = 0,
};
static struct usb_endpoint_descriptor acc_superspeedplus_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor acc_superspeedplus_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
.bLength = sizeof(acc_superspeedplus_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
.bLength = sizeof(acc_superspeed_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
@ -155,7 +206,7 @@ static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
@ -186,6 +237,24 @@ static struct usb_descriptor_header *hs_acc_descs[] = {
NULL,
};
static struct usb_descriptor_header *ss_acc_descs[] = {
(struct usb_descriptor_header *) &acc_interface_desc,
(struct usb_descriptor_header *) &acc_superspeed_in_desc,
(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
(struct usb_descriptor_header *) &acc_superspeed_out_desc,
(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
NULL,
};
static struct usb_descriptor_header *ssp_acc_descs[] = {
(struct usb_descriptor_header *) &acc_interface_desc,
(struct usb_descriptor_header *) &acc_superspeedplus_in_desc,
(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
(struct usb_descriptor_header *) &acc_superspeedplus_out_desc,
(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
NULL,
};
static struct usb_string acc_string_defs[] = {
[INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
{ }, /* end of list */
@ -1047,12 +1116,22 @@ __acc_function_bind(struct usb_configuration *c,
return ret;
/* support high speed hardware */
if (gadget_is_dualspeed(c->cdev->gadget)) {
acc_highspeed_in_desc.bEndpointAddress =
acc_fullspeed_in_desc.bEndpointAddress;
acc_highspeed_out_desc.bEndpointAddress =
acc_fullspeed_out_desc.bEndpointAddress;
}
acc_highspeed_in_desc.bEndpointAddress =
acc_fullspeed_in_desc.bEndpointAddress;
acc_highspeed_out_desc.bEndpointAddress =
acc_fullspeed_out_desc.bEndpointAddress;
/* support super speed hardware */
acc_superspeed_in_desc.bEndpointAddress =
acc_fullspeed_in_desc.bEndpointAddress;
acc_superspeed_out_desc.bEndpointAddress =
acc_fullspeed_out_desc.bEndpointAddress;
/* support super speed plus hardware */
acc_superspeedplus_in_desc.bEndpointAddress =
acc_fullspeed_in_desc.bEndpointAddress;
acc_superspeedplus_out_desc.bEndpointAddress =
acc_fullspeed_out_desc.bEndpointAddress;
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@ -1435,6 +1514,8 @@ static struct usb_function *acc_alloc(struct usb_function_instance *fi)
dev->function.strings = acc_strings,
dev->function.fs_descriptors = fs_acc_descs;
dev->function.hs_descriptors = hs_acc_descs;
dev->function.ss_descriptors = ss_acc_descs;
dev->function.ssp_descriptors = ssp_acc_descs;
dev->function.bind = acc_function_bind_configfs;
dev->function.unbind = acc_function_unbind;
dev->function.set_alt = acc_function_set_alt;

View File

@ -610,11 +610,11 @@ static int usb_cser_func_suspend(struct usb_function *f, u8 options)
if (!port->func_is_suspended) {
usb_cser_suspend(f);
port->func_is_suspended = true;
} else {
if (port->func_is_suspended) {
port->func_is_suspended = false;
usb_cser_resume(f);
}
}
} else {
if (port->func_is_suspended) {
port->func_is_suspended = false;
usb_cser_resume(f);
}
}
return 0;

View File

@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params,
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
return -EINVAL;
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);

View File

@ -2581,6 +2581,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
/* The below cases were checked when setting it. */
if (f2fs_is_pinned_file(inode))
return false;
if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
return true;
if (f2fs_lfs_mode(sbi))
return true;
if (S_ISDIR(inode->i_mode))

View File

@ -3235,17 +3235,17 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
if (f2fs_should_update_outplace(inode, NULL)) {
ret = -EINVAL;
goto out;
}
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
goto done;
}
if (f2fs_should_update_outplace(inode, NULL)) {
ret = -EINVAL;
goto out;
}
if (f2fs_pin_file_control(inode, false)) {
ret = -EAGAIN;
goto out;

View File

@ -319,6 +319,7 @@ static struct mount_info *get_mount_info(struct super_block *sb)
{
struct mount_info *result = sb->s_fs_info;
WARN_ON(!result);
return result;
}
@ -677,7 +678,7 @@ static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
struct mount_info *mi = get_mount_info(file_superblock(file));
bool root;
if (!dir || !mi) {
if (!dir) {
error = -EBADF;
goto out;
}
@ -1841,9 +1842,6 @@ static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *trap;
int error = 0;
if (!mi)
return -EBADF;
error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
if (error)
return error;
@ -2091,9 +2089,6 @@ static ssize_t incfs_getxattr(struct dentry *d, const char *name,
char *stored_value;
size_t stored_size;
if (!mi)
return -EBADF;
if (di && di->backing_path.dentry)
return vfs_getxattr(di->backing_path.dentry, name, value, size);
@ -2130,9 +2125,6 @@ static ssize_t incfs_setxattr(struct dentry *d, const char *name,
void **stored_value;
size_t *stored_size;
if (!mi)
return -EBADF;
if (di && di->backing_path.dentry)
return vfs_setxattr(di->backing_path.dentry, name, value, size,
flags);
@ -2173,11 +2165,6 @@ static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
return vfs_listxattr(di->backing_path.dentry, list, size);
}
static int incfs_test_super(struct super_block *s, void *p)
{
return s->s_fs_info != NULL;
}
struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
const char *dev_name, void *data)
{
@ -2187,8 +2174,7 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
struct dentry *index_dir;
struct super_block *src_fs_sb = NULL;
struct inode *root_inode = NULL;
struct super_block *sb = sget(type, incfs_test_super, set_anon_super,
flags, NULL);
struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL);
int error = 0;
if (IS_ERR(sb))
@ -2229,18 +2215,13 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
src_fs_sb = backing_dir_path.dentry->d_sb;
sb->s_maxbytes = src_fs_sb->s_maxbytes;
if (!sb->s_fs_info) {
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
if (IS_ERR_OR_NULL(mi)) {
error = PTR_ERR(mi);
pr_err("incfs: Error allocating mount info. %d\n", error);
mi = NULL;
goto err;
}
sb->s_fs_info = mi;
} else {
mi = sb->s_fs_info;
if (IS_ERR_OR_NULL(mi)) {
error = PTR_ERR(mi);
pr_err("incfs: Error allocating mount info. %d\n", error);
mi = NULL;
goto err;
}
index_dir = open_or_create_index_dir(backing_dir_path.dentry);
@ -2252,22 +2233,21 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
}
mi->mi_index_dir = index_dir;
sb->s_fs_info = mi;
root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
if (IS_ERR(root_inode)) {
error = PTR_ERR(root_inode);
goto err;
}
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
error = -ENOMEM;
goto err;
}
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
if (error)
goto err;
error = -ENOMEM;
goto err;
}
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
if (error)
goto err;
path_put(&backing_dir_path);
sb->s_flags |= SB_ACTIVE;
@ -2288,9 +2268,6 @@ static int incfs_remount_fs(struct super_block *sb, int *flags, char *data)
struct mount_info *mi = get_mount_info(sb);
int err = 0;
if (!mi)
return err;
sync_filesystem(sb);
err = parse_options(&options, (char *)data);
if (err)
@ -2318,9 +2295,6 @@ static int show_options(struct seq_file *m, struct dentry *root)
{
struct mount_info *mi = get_mount_info(root->d_sb);
if (!mi)
return -EBADF;
seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
if (mi->mi_options.read_log_pages != 0) {

View File

@ -427,8 +427,7 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
if (unlikely(!inode))
return failed_creating(dentry);
/* Do not set bits for OTH */
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
inode->i_op = ops;
inode->i_fop = &simple_dir_operations;

View File

@ -178,7 +178,7 @@ void tee_device_unregister(struct tee_device *teedev);
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
* @dmabuf: dmabuf used to for exporting to user space
* @refcount: reference counter
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device
*
@ -195,7 +195,7 @@ struct tee_shm {
unsigned int offset;
struct page **pages;
size_t num_pages;
struct dma_buf *dmabuf;
refcount_t refcount;
u32 flags;
int id;
};

View File

@ -1543,7 +1543,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);

View File

@ -23,6 +23,10 @@ enum icnss_uevent {
ICNSS_UEVENT_SMMU_FAULT,
};
enum icnss_device_config {
ICNSS_IPA_DISABLED,
};
struct icnss_uevent_hang_data {
void *hang_event_data;
uint16_t hang_event_data_len;
@ -196,4 +200,5 @@ extern int icnss_prevent_l1(struct device *dev);
extern void icnss_allow_l1(struct device *dev);
extern int icnss_get_mhi_state(struct device *dev);
extern int icnss_is_pci_ep_awake(struct device *dev);
extern unsigned long icnss_get_device_config(void);
#endif /* _ICNSS_WLAN_H_ */

View File

@ -1005,8 +1005,9 @@ enum ipa_hw_type {
IPA_HW_v4_11 = 20,
IPA_HW_v5_0 = 21,
IPA_HW_v5_1 = 22,
IPA_HW_v5_2 = 23,
};
#define IPA_HW_MAX (IPA_HW_v5_1 + 1)
#define IPA_HW_MAX (IPA_HW_v5_2 + 1)
#define IPA_HW_v4_0 IPA_HW_v4_0
#define IPA_HW_v4_1 IPA_HW_v4_1
@ -1017,6 +1018,7 @@ enum ipa_hw_type {
#define IPA_HW_v4_11 IPA_HW_v4_11
#define IPA_HW_v5_0 IPA_HW_v5_0
#define IPA_HW_v5_1 IPA_HW_v5_1
#define IPA_HW_v5_2 IPA_HW_v5_2
/**
* enum ipa_hw_feature_support - IPA HW supported feature

View File

@ -94,7 +94,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
int i;
struct hlist_head *hash;
hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
@ -159,7 +159,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
spin_lock_init(&dtab->index_lock);
} else {
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)

View File

@ -552,6 +552,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
/*
* Release agent gets called with all capabilities,
* require capabilities to set release agent.
*/
if ((of->file->f_cred->user_ns != &init_user_ns) ||
!capable(CAP_SYS_ADMIN))
return -EPERM;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
@ -964,6 +972,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
/* Specifying two release agents is forbidden */
if (ctx->release_agent)
return cg_invalf(fc, "cgroup1: release_agent respecified");
/*
* Release agent gets called with all capabilities,
* require capabilities to set release agent.
*/
if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
return cg_invalf(fc, "cgroup1: Setting release_agent not allowed");
ctx->release_agent = param->string;
param->string = NULL;
break;

View File

@ -398,6 +398,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
return 0;
pipe->nrbufs++;
buf->ops = &page_cache_pipe_buf_ops;
buf->flags = 0;
get_page(buf->page = page);
buf->offset = offset;
buf->len = bytes;
@ -524,6 +525,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
break;
pipe->nrbufs++;
pipe->bufs[idx].ops = &default_pipe_buf_ops;
pipe->bufs[idx].flags = 0;
pipe->bufs[idx].page = page;
pipe->bufs[idx].offset = 0;
if (left <= PAGE_SIZE) {

View File

@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (err)
goto free_stab;
stab->sks = bpf_map_area_alloc(stab->map.max_entries *
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
if (stab->sks)

View File

@ -499,7 +499,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}

View File

@ -440,7 +440,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}

View File

@ -1953,15 +1953,18 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
u16 rcv_nxt = l->rcv_nxt;
u16 dlen = msg_data_sz(hdr);
u32 dlen = msg_data_sz(hdr), glen = 0;
int mtyp = msg_type(hdr);
bool reply = msg_probe(hdr);
u16 glen = 0;
void *data;
char *if_name;
int rc = 0;
trace_tipc_proto_rcv(skb, false, l->name);
if (dlen > U16_MAX)
goto exit;
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
@ -2063,7 +2066,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
ga = NULL;
}
if(glen > dlen)
break;
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
&l->mon_state, l->bearer_id);

View File

@ -457,6 +457,8 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
state->probing = false;
/* Sanity check received domain record */
if (new_member_cnt > MAX_MON_DOMAIN)
return;
if (dlen < dom_rec_len(arrv_dom, 0))
return;
if (dlen != dom_rec_len(arrv_dom, new_member_cnt))

View File

@ -2517,7 +2517,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
struct crypto_aead *aead;
@ -2548,17 +2548,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
mtu = __xfrm_state_mtu(x, mtu);
if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
return IPV6_MIN_MTU;
return mtu;
}
EXPORT_SYMBOL_GPL(xfrm_state_mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
{