Merge fd7bf900c3
("i2c: cadence: cdns_i2c_master_xfer(): Fix runtime PM leak on error path") into android12-5.10-lts
Steps on the way to 5.10.180 Change-Id: I3438288cccc8a544306afc5aa178fde55e7e34fb Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
2dcf843019
@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
|
||||
* 'data' contains an integer that corresponds to the feature we're
|
||||
* testing
|
||||
*/
|
||||
static int proc_salinfo_show(struct seq_file *m, void *v)
|
||||
static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned long data = (unsigned long)v;
|
||||
seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
|
||||
|
@ -79,7 +79,7 @@ void *per_cpu_init(void)
|
||||
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline void
|
||||
static inline __init void
|
||||
alloc_per_cpu_data(void)
|
||||
{
|
||||
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
|
||||
|
@ -1516,6 +1516,14 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
|
||||
return 0;
|
||||
|
||||
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
|
||||
if (IS_ERR(drbg->jent)) {
|
||||
const int err = PTR_ERR(drbg->jent);
|
||||
|
||||
drbg->jent = NULL;
|
||||
if (fips_enabled)
|
||||
return err;
|
||||
pr_info("DRBG: Continuing without Jitter RNG\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1571,14 +1579,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||
if (ret)
|
||||
goto free_everything;
|
||||
|
||||
if (IS_ERR(drbg->jent)) {
|
||||
ret = PTR_ERR(drbg->jent);
|
||||
drbg->jent = NULL;
|
||||
if (fips_enabled || ret != -ENOENT)
|
||||
goto free_everything;
|
||||
pr_info("DRBG: Continuing without Jitter RNG\n");
|
||||
}
|
||||
|
||||
reseed = false;
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,6 @@ static void btsdio_remove(struct sdio_func *func)
|
||||
|
||||
BT_DBG("func %p", func);
|
||||
|
||||
cancel_work_sync(&data->work);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
|
@ -126,7 +126,8 @@ config NPCM7XX_KCS_IPMI_BMC
|
||||
|
||||
config ASPEED_BT_IPMI_BMC
|
||||
depends on ARCH_ASPEED || COMPILE_TEST
|
||||
depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
|
||||
depends on MFD_SYSCON
|
||||
select REGMAP_MMIO
|
||||
tristate "BT IPMI bmc driver"
|
||||
help
|
||||
Provides a driver for the BT (Block Transfer) IPMI interface
|
||||
|
@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
|
||||
/**
|
||||
* fpga_bridge_get - get an exclusive reference to a fpga bridge
|
||||
* @dev: parent device that fpga bridge was registered with
|
||||
* @info: fpga manager info
|
||||
* @info: fpga image specific information
|
||||
*
|
||||
* Given a device, get an exclusive reference to a fpga bridge.
|
||||
*
|
||||
|
@ -740,6 +740,7 @@ int __init etm_perf_init(void)
|
||||
etm_pmu.addr_filters_sync = etm_addr_filters_sync;
|
||||
etm_pmu.addr_filters_validate = etm_addr_filters_validate;
|
||||
etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
|
||||
etm_pmu.module = THIS_MODULE;
|
||||
|
||||
ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
|
||||
if (ret == 0)
|
||||
|
@ -792,8 +792,10 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
#if IS_ENABLED(CONFIG_I2C_SLAVE)
|
||||
/* Check i2c operating mode and switch if possible */
|
||||
if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
|
||||
if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
|
||||
return -EAGAIN;
|
||||
if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set mode to master */
|
||||
cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
|
||||
|
@ -528,6 +528,12 @@ static int max44009_probe(struct i2c_client *client,
|
||||
return devm_iio_device_register(&client->dev, indio_dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id max44009_of_match[] = {
|
||||
{ .compatible = "maxim,max44009" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, max44009_of_match);
|
||||
|
||||
static const struct i2c_device_id max44009_id[] = {
|
||||
{ "max44009", 0 },
|
||||
{ }
|
||||
@ -537,18 +543,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
|
||||
static struct i2c_driver max44009_driver = {
|
||||
.driver = {
|
||||
.name = MAX44009_DRV_NAME,
|
||||
.of_match_table = max44009_of_match,
|
||||
},
|
||||
.probe = max44009_probe,
|
||||
.id_table = max44009_id,
|
||||
};
|
||||
module_i2c_driver(max44009_driver);
|
||||
|
||||
static const struct of_device_id max44009_of_match[] = {
|
||||
{ .compatible = "maxim,max44009" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, max44009_of_match);
|
||||
|
||||
MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@gmail.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
|
||||
|
@ -2212,11 +2212,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
int d;
|
||||
struct bio *wbio, *wbio2;
|
||||
struct bio *wbio = r10_bio->devs[1].bio;
|
||||
struct bio *wbio2 = r10_bio->devs[1].repl_bio;
|
||||
|
||||
/* Need to test wbio2->bi_end_io before we call
|
||||
* submit_bio_noacct as if the former is NULL,
|
||||
* the latter is free to free wbio2.
|
||||
*/
|
||||
if (wbio2 && !wbio2->bi_end_io)
|
||||
wbio2 = NULL;
|
||||
|
||||
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
|
||||
fix_recovery_read_error(r10_bio);
|
||||
end_sync_request(r10_bio);
|
||||
if (wbio->bi_end_io)
|
||||
end_sync_request(r10_bio);
|
||||
if (wbio2)
|
||||
end_sync_request(r10_bio);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2225,14 +2236,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
||||
* and submit the write request
|
||||
*/
|
||||
d = r10_bio->devs[1].devnum;
|
||||
wbio = r10_bio->devs[1].bio;
|
||||
wbio2 = r10_bio->devs[1].repl_bio;
|
||||
/* Need to test wbio2->bi_end_io before we call
|
||||
* submit_bio_noacct as if the former is NULL,
|
||||
* the latter is free to free wbio2.
|
||||
*/
|
||||
if (wbio2 && !wbio2->bi_end_io)
|
||||
wbio2 = NULL;
|
||||
if (wbio->bi_end_io) {
|
||||
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
|
||||
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
|
||||
@ -3615,6 +3618,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
|
||||
return nc*fc;
|
||||
}
|
||||
|
||||
static void raid10_free_conf(struct r10conf *conf)
|
||||
{
|
||||
if (!conf)
|
||||
return;
|
||||
|
||||
mempool_exit(&conf->r10bio_pool);
|
||||
kfree(conf->mirrors);
|
||||
kfree(conf->mirrors_old);
|
||||
kfree(conf->mirrors_new);
|
||||
safe_put_page(conf->tmppage);
|
||||
bioset_exit(&conf->bio_split);
|
||||
kfree(conf);
|
||||
}
|
||||
|
||||
static struct r10conf *setup_conf(struct mddev *mddev)
|
||||
{
|
||||
struct r10conf *conf = NULL;
|
||||
@ -3697,13 +3714,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
||||
return conf;
|
||||
|
||||
out:
|
||||
if (conf) {
|
||||
mempool_exit(&conf->r10bio_pool);
|
||||
kfree(conf->mirrors);
|
||||
safe_put_page(conf->tmppage);
|
||||
bioset_exit(&conf->bio_split);
|
||||
kfree(conf);
|
||||
}
|
||||
raid10_free_conf(conf);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -3741,6 +3752,9 @@ static int raid10_run(struct mddev *mddev)
|
||||
if (!conf)
|
||||
goto out;
|
||||
|
||||
mddev->thread = conf->thread;
|
||||
conf->thread = NULL;
|
||||
|
||||
if (mddev_is_clustered(conf->mddev)) {
|
||||
int fc, fo;
|
||||
|
||||
@ -3753,9 +3767,6 @@ static int raid10_run(struct mddev *mddev)
|
||||
}
|
||||
}
|
||||
|
||||
mddev->thread = conf->thread;
|
||||
conf->thread = NULL;
|
||||
|
||||
if (mddev->queue) {
|
||||
blk_queue_max_discard_sectors(mddev->queue,
|
||||
mddev->chunk_sectors);
|
||||
@ -3909,10 +3920,7 @@ static int raid10_run(struct mddev *mddev)
|
||||
|
||||
out_free_conf:
|
||||
md_unregister_thread(&mddev->thread);
|
||||
mempool_exit(&conf->r10bio_pool);
|
||||
safe_put_page(conf->tmppage);
|
||||
kfree(conf->mirrors);
|
||||
kfree(conf);
|
||||
raid10_free_conf(conf);
|
||||
mddev->private = NULL;
|
||||
out:
|
||||
return -EIO;
|
||||
@ -3920,15 +3928,7 @@ static int raid10_run(struct mddev *mddev)
|
||||
|
||||
static void raid10_free(struct mddev *mddev, void *priv)
|
||||
{
|
||||
struct r10conf *conf = priv;
|
||||
|
||||
mempool_exit(&conf->r10bio_pool);
|
||||
safe_put_page(conf->tmppage);
|
||||
kfree(conf->mirrors);
|
||||
kfree(conf->mirrors_old);
|
||||
kfree(conf->mirrors_new);
|
||||
bioset_exit(&conf->bio_split);
|
||||
kfree(conf);
|
||||
raid10_free_conf(priv);
|
||||
}
|
||||
|
||||
static void raid10_quiesce(struct mddev *mddev, int quiesce)
|
||||
|
@ -165,10 +165,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
|
||||
static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
|
||||
{
|
||||
struct vmci_host_dev *vmci_host_dev = filp->private_data;
|
||||
struct vmci_ctx *context = vmci_host_dev->context;
|
||||
struct vmci_ctx *context;
|
||||
__poll_t mask = 0;
|
||||
|
||||
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
|
||||
/*
|
||||
* Read context only if ct_type == VMCIOBJ_CONTEXT to make
|
||||
* sure that context is initialized
|
||||
*/
|
||||
context = vmci_host_dev->context;
|
||||
|
||||
/* Check for VMCI calls to this VM context. */
|
||||
if (wait)
|
||||
poll_wait(filp, &context->host_context.wait_queue,
|
||||
|
@ -650,7 +650,7 @@ static int nmclan_config(struct pcmcia_device *link)
|
||||
} else {
|
||||
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
|
||||
sig[0], sig[1]);
|
||||
return -ENODEV;
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2641,6 +2641,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (adapter->hw.mac.type < ixgbe_mac_X550)
|
||||
return 16;
|
||||
else
|
||||
return 64;
|
||||
}
|
||||
|
||||
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
u32 *rule_locs)
|
||||
{
|
||||
@ -2649,7 +2657,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
|
||||
switch (cmd->cmd) {
|
||||
case ETHTOOL_GRXRINGS:
|
||||
cmd->data = adapter->num_rx_queues;
|
||||
cmd->data = min_t(int, adapter->num_rx_queues,
|
||||
ixgbe_rss_indir_tbl_max(adapter));
|
||||
ret = 0;
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
@ -3051,14 +3060,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (adapter->hw.mac.type < ixgbe_mac_X550)
|
||||
return 16;
|
||||
else
|
||||
return 64;
|
||||
}
|
||||
|
||||
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
|
||||
{
|
||||
return IXGBE_RSS_KEY_SIZE;
|
||||
@ -3107,8 +3108,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
|
||||
int i;
|
||||
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
|
||||
|
||||
if (hfunc)
|
||||
return -EINVAL;
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Fill out the redirection table */
|
||||
if (indir) {
|
||||
|
@ -1194,9 +1194,6 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
|
||||
int ret;
|
||||
struct device *dev = &bsp_priv->pdev->dev;
|
||||
|
||||
if (!ldo)
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
ret = regulator_enable(ldo);
|
||||
if (ret)
|
||||
@ -1227,14 +1224,11 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
|
||||
of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
|
||||
bsp_priv->ops = ops;
|
||||
|
||||
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
|
||||
bsp_priv->regulator = devm_regulator_get(dev, "phy");
|
||||
if (IS_ERR(bsp_priv->regulator)) {
|
||||
if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
|
||||
dev_err(dev, "phy regulator is not available yet, deferred probing\n");
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
dev_err(dev, "no regulator found\n");
|
||||
bsp_priv->regulator = NULL;
|
||||
ret = PTR_ERR(bsp_priv->regulator);
|
||||
dev_err_probe(dev, ret, "failed to get phy regulator\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
|
||||
|
@ -1372,13 +1372,13 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
memset(data, 0, sizeof(*data));
|
||||
|
||||
/* make sure only one bit is set in only one fid */
|
||||
if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
|
||||
"fid1=%x, fid2=%x\n", fid1, fid2))
|
||||
return;
|
||||
|
||||
memset(data, 0, sizeof(*data));
|
||||
|
||||
if (fid1) {
|
||||
fifo_idx = ffs(fid1) - 1;
|
||||
if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
|
||||
|
@ -344,8 +344,10 @@ static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
|
||||
const struct iwl_fw *fw = priv->fwrt->fw;
|
||||
|
||||
*pos = ++state->pos;
|
||||
if (*pos >= fw->ucode_capa.n_cmd_versions)
|
||||
if (*pos >= fw->ucode_capa.n_cmd_versions) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
@ -191,6 +191,12 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
|
||||
alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL)
|
||||
goto err;
|
||||
|
||||
if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
|
||||
alloc->req_size == 0) {
|
||||
IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
|
||||
|
||||
return 0;
|
||||
|
@ -1885,6 +1885,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rsp = (void *)hcmd.resp_pkt->data;
|
||||
if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
|
||||
ret = -ENXIO;
|
||||
@ -1962,6 +1967,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rsp = (void *)hcmd.resp_pkt->data;
|
||||
if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
|
||||
ret = -ENXIO;
|
||||
|
@ -620,7 +620,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
|
||||
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
||||
{
|
||||
int ret;
|
||||
int t = 0;
|
||||
int iter;
|
||||
|
||||
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
|
||||
@ -635,6 +634,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
for (iter = 0; iter < 10; iter++) {
|
||||
int t = 0;
|
||||
|
||||
/* If HW is not ready, prepare the conditions to check again */
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_PREPARE);
|
||||
|
@ -278,8 +278,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
|
||||
|
||||
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
|
||||
|
||||
if (!buffer || copy_from_user(tmp, buffer, tmp_len))
|
||||
return count;
|
||||
if (copy_from_user(tmp, buffer, tmp_len))
|
||||
return -EFAULT;
|
||||
|
||||
tmp[tmp_len] = '\0';
|
||||
|
||||
@ -287,7 +287,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
|
||||
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
|
||||
|
||||
if (num != 3)
|
||||
return count;
|
||||
return -EINVAL;
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
@ -375,8 +375,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
|
||||
|
||||
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
|
||||
|
||||
if (!buffer || copy_from_user(tmp, buffer, tmp_len))
|
||||
return count;
|
||||
if (copy_from_user(tmp, buffer, tmp_len))
|
||||
return -EFAULT;
|
||||
|
||||
tmp[tmp_len] = '\0';
|
||||
|
||||
@ -386,7 +386,7 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
|
||||
if (num != 4) {
|
||||
rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
|
||||
"Format is <path> <addr> <mask> <data>\n");
|
||||
return count;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rtl_set_rfreg(hw, path, addr, bitmask, data);
|
||||
|
@ -4416,11 +4416,19 @@ static void nvme_fw_act_work(struct work_struct *work)
|
||||
nvme_get_fw_slot_info(ctrl);
|
||||
}
|
||||
|
||||
static u32 nvme_aer_type(u32 result)
|
||||
{
|
||||
return result & 0x7;
|
||||
}
|
||||
|
||||
static u32 nvme_aer_subtype(u32 result)
|
||||
{
|
||||
return (result & 0xff00) >> 8;
|
||||
}
|
||||
|
||||
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
||||
{
|
||||
u32 aer_notice_type = (result & 0xff00) >> 8;
|
||||
|
||||
trace_nvme_async_event(ctrl, aer_notice_type);
|
||||
u32 aer_notice_type = nvme_aer_subtype(result);
|
||||
|
||||
switch (aer_notice_type) {
|
||||
case NVME_AER_NOTICE_NS_CHANGED:
|
||||
@ -4451,24 +4459,40 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
dev_warn(ctrl->device, "resetting controller due to AER\n");
|
||||
nvme_reset_ctrl(ctrl);
|
||||
}
|
||||
|
||||
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
||||
volatile union nvme_result *res)
|
||||
{
|
||||
u32 result = le32_to_cpu(res->u32);
|
||||
u32 aer_type = result & 0x07;
|
||||
u32 aer_type = nvme_aer_type(result);
|
||||
u32 aer_subtype = nvme_aer_subtype(result);
|
||||
|
||||
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
|
||||
return;
|
||||
|
||||
trace_nvme_async_event(ctrl, result);
|
||||
switch (aer_type) {
|
||||
case NVME_AER_NOTICE:
|
||||
nvme_handle_aen_notice(ctrl, result);
|
||||
break;
|
||||
case NVME_AER_ERROR:
|
||||
/*
|
||||
* For a persistent internal error, don't run async_event_work
|
||||
* to submit a new AER. The controller reset will do it.
|
||||
*/
|
||||
if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
|
||||
nvme_handle_aer_persistent_error(ctrl);
|
||||
return;
|
||||
}
|
||||
fallthrough;
|
||||
case NVME_AER_SMART:
|
||||
case NVME_AER_CSS:
|
||||
case NVME_AER_VS:
|
||||
trace_nvme_async_event(ctrl, aer_type);
|
||||
ctrl->aen_result = result;
|
||||
break;
|
||||
default:
|
||||
|
@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
|
||||
),
|
||||
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
|
||||
__entry->ctrl_id, __entry->result,
|
||||
__print_symbolic(__entry->result,
|
||||
aer_name(NVME_AER_NOTICE_NS_CHANGED),
|
||||
aer_name(NVME_AER_NOTICE_ANA),
|
||||
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
|
||||
aer_name(NVME_AER_NOTICE_DISC_CHANGED),
|
||||
aer_name(NVME_AER_ERROR),
|
||||
aer_name(NVME_AER_SMART),
|
||||
aer_name(NVME_AER_CSS),
|
||||
aer_name(NVME_AER_VS))
|
||||
__print_symbolic(__entry->result & 0x7,
|
||||
aer_name(NVME_AER_ERROR),
|
||||
aer_name(NVME_AER_SMART),
|
||||
aer_name(NVME_AER_NOTICE),
|
||||
aer_name(NVME_AER_CSS),
|
||||
aer_name(NVME_AER_VS))
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -570,10 +570,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
||||
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
bool aborted = false;
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_START:
|
||||
tfcp_req->inistate = INI_IO_ACTIVE;
|
||||
@ -582,11 +583,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
||||
aborted = true;
|
||||
break;
|
||||
default:
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
if (unlikely(aborted))
|
||||
ret = -ECANCELED;
|
||||
@ -607,8 +608,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
bool completed = false;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
fcpreq = tfcp_req->fcpreq;
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_ABORTED:
|
||||
@ -617,11 +619,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||
completed = true;
|
||||
break;
|
||||
default:
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
if (unlikely(completed)) {
|
||||
/* remove reference taken in original abort downcall */
|
||||
@ -633,9 +635,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
||||
&tfcp_req->tgt_fcp_req);
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
tfcp_req->fcpreq = NULL;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
||||
/* call_host_done releases reference for abort downcall */
|
||||
@ -651,11 +653,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
fcpreq = tfcp_req->fcpreq;
|
||||
tfcp_req->inistate = INI_IO_COMPLETED;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
||||
}
|
||||
@ -759,13 +762,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
u32 rsplen = 0, xfrlen = 0;
|
||||
int fcp_err = 0, active, aborted;
|
||||
u8 op = tgt_fcpreq->op;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
fcpreq = tfcp_req->fcpreq;
|
||||
active = tfcp_req->active;
|
||||
aborted = tfcp_req->aborted;
|
||||
tfcp_req->active = true;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
if (unlikely(active))
|
||||
/* illegal - call while i/o active */
|
||||
@ -773,9 +777,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
|
||||
if (unlikely(aborted)) {
|
||||
/* target transport has aborted i/o prior */
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
tfcp_req->active = false;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
tgt_fcpreq->transferred_length = 0;
|
||||
tgt_fcpreq->fcp_error = -ECANCELED;
|
||||
tgt_fcpreq->done(tgt_fcpreq);
|
||||
@ -832,9 +836,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
tfcp_req->active = false;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
tgt_fcpreq->transferred_length = xfrlen;
|
||||
tgt_fcpreq->fcp_error = fcp_err;
|
||||
@ -848,15 +852,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
||||
{
|
||||
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* mark aborted only in case there were 2 threads in transport
|
||||
* (one doing io, other doing abort) and only kills ops posted
|
||||
* after the abort request
|
||||
*/
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
tfcp_req->aborted = true;
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
tfcp_req->status = NVME_SC_INTERNAL;
|
||||
|
||||
@ -898,6 +903,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
||||
struct fcloop_fcpreq *tfcp_req;
|
||||
bool abortio = true;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&inireq->inilock);
|
||||
tfcp_req = inireq->tfcp_req;
|
||||
@ -910,7 +916,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||
return;
|
||||
|
||||
/* break initiator/target relationship for io */
|
||||
spin_lock_irq(&tfcp_req->reqlock);
|
||||
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_START:
|
||||
case INI_IO_ACTIVE:
|
||||
@ -920,11 +926,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||
abortio = false;
|
||||
break;
|
||||
default:
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(&tfcp_req->reqlock);
|
||||
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
||||
|
||||
if (abortio)
|
||||
/* leave the reference while the work item is scheduled */
|
||||
|
@ -264,12 +264,15 @@ int of_device_request_module(struct device *dev)
|
||||
if (size < 0)
|
||||
return size;
|
||||
|
||||
str = kmalloc(size + 1, GFP_KERNEL);
|
||||
/* Reserve an additional byte for the trailing '\0' */
|
||||
size++;
|
||||
|
||||
str = kmalloc(size, GFP_KERNEL);
|
||||
if (!str)
|
||||
return -ENOMEM;
|
||||
|
||||
of_device_get_modalias(dev, str, size);
|
||||
str[size] = '\0';
|
||||
str[size - 1] = '\0';
|
||||
ret = request_module(str);
|
||||
kfree(str);
|
||||
|
||||
|
@ -1272,6 +1272,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
|
||||
static int __init imx6_pcie_init(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_matching_node(NULL, imx6_pcie_of_match);
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
of_node_put(np);
|
||||
|
||||
/*
|
||||
* Since probe() can be deferred we need to make sure that
|
||||
* hook_fault_code is not called after __init memory is freed
|
||||
|
@ -193,6 +193,7 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
|
||||
*/
|
||||
if (estate == PCI_ERS_RESULT_RECOVERED) {
|
||||
pci_dbg(edev, "DPC port successfully recovered\n");
|
||||
pcie_clear_device_status(edev);
|
||||
acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
|
||||
} else {
|
||||
pci_dbg(edev, "DPC port recovery failed\n");
|
||||
|
@ -10477,7 +10477,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
goto out_iounmap_all;
|
||||
} else {
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_all;
|
||||
goto out_iounmap_ctrl;
|
||||
}
|
||||
}
|
||||
|
||||
@ -10495,7 +10495,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
dev_err(&pdev->dev,
|
||||
"ioremap failed for SLI4 HBA dpp registers.\n");
|
||||
error = -ENOMEM;
|
||||
goto out_iounmap_ctrl;
|
||||
goto out_iounmap_all;
|
||||
}
|
||||
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
|
||||
}
|
||||
@ -10520,9 +10520,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
return 0;
|
||||
|
||||
out_iounmap_all:
|
||||
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
|
||||
if (phba->sli4_hba.drbl_regs_memmap_p)
|
||||
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
|
||||
out_iounmap_ctrl:
|
||||
iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
|
||||
if (phba->sli4_hba.ctrl_regs_memmap_p)
|
||||
iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
|
||||
out_iounmap_conf:
|
||||
iounmap(phba->sli4_hba.conf_regs_memmap_p);
|
||||
|
||||
|
@ -1355,17 +1355,30 @@ static int cqspi_remove(struct platform_device *pdev)
|
||||
static int cqspi_suspend(struct device *dev)
|
||||
{
|
||||
struct cqspi_st *cqspi = dev_get_drvdata(dev);
|
||||
struct spi_master *master = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = spi_master_suspend(master);
|
||||
cqspi_controller_enable(cqspi, 0);
|
||||
return 0;
|
||||
|
||||
clk_disable_unprepare(cqspi->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cqspi_resume(struct device *dev)
|
||||
{
|
||||
struct cqspi_st *cqspi = dev_get_drvdata(dev);
|
||||
struct spi_master *master = dev_get_drvdata(dev);
|
||||
|
||||
cqspi_controller_enable(cqspi, 1);
|
||||
return 0;
|
||||
clk_prepare_enable(cqspi->clk);
|
||||
cqspi_wait_idle(cqspi);
|
||||
cqspi_controller_init(cqspi);
|
||||
|
||||
cqspi->current_cs = -1;
|
||||
cqspi->sclk = 0;
|
||||
|
||||
return spi_master_resume(master);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops cqspi__dev_pm_ops = {
|
||||
|
@ -207,8 +207,8 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
|
||||
struct spi_device *spi,
|
||||
int bits_per_word)
|
||||
{
|
||||
/* QE uses Little Endian for words > 8
|
||||
* so transform all words > 8 into 8 bits
|
||||
/* CPM/QE uses Little Endian for words > 8
|
||||
* so transform 16 and 32 bits words into 8 bits
|
||||
* Unfortnatly that doesn't work for LSB so
|
||||
* reject these for now */
|
||||
/* Note: 32 bits word, LSB works iff
|
||||
@ -216,9 +216,11 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
|
||||
if (spi->mode & SPI_LSB_FIRST &&
|
||||
bits_per_word > 8)
|
||||
return -EINVAL;
|
||||
if (bits_per_word > 8)
|
||||
if (bits_per_word <= 8)
|
||||
return bits_per_word;
|
||||
if (bits_per_word == 16 || bits_per_word == 32)
|
||||
return 8; /* pretend its 8 bits */
|
||||
return bits_per_word;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int fsl_spi_setup_transfer(struct spi_device *spi,
|
||||
@ -248,7 +250,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
|
||||
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
|
||||
mpc8xxx_spi,
|
||||
bits_per_word);
|
||||
else if (mpc8xxx_spi->flags & SPI_QE)
|
||||
else
|
||||
bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
|
||||
bits_per_word);
|
||||
|
||||
|
@ -1554,9 +1554,8 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
|
||||
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(spi_imx->dev);
|
||||
ret = pm_runtime_resume_and_get(spi_imx->dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(spi_imx->dev);
|
||||
dev_err(spi_imx->dev, "failed to enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
@ -1766,13 +1765,10 @@ static int spi_imx_remove(struct platform_device *pdev)
|
||||
spi_bitbang_stop(&spi_imx->bitbang);
|
||||
|
||||
ret = pm_runtime_get_sync(spi_imx->dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(spi_imx->dev);
|
||||
dev_err(spi_imx->dev, "failed to enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
writel(0, spi_imx->base + MXC_CSPICTRL);
|
||||
if (ret >= 0)
|
||||
writel(0, spi_imx->base + MXC_CSPICTRL);
|
||||
else
|
||||
dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
|
||||
|
||||
pm_runtime_dont_use_autosuspend(spi_imx->dev);
|
||||
pm_runtime_put_sync(spi_imx->dev);
|
||||
|
@ -1276,19 +1276,23 @@ static int spi_qup_remove(struct platform_device *pdev)
|
||||
struct spi_qup *controller = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret >= 0) {
|
||||
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
|
||||
ERR_PTR(ret));
|
||||
|
||||
clk_disable_unprepare(controller->cclk);
|
||||
clk_disable_unprepare(controller->iclk);
|
||||
} else {
|
||||
dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
|
||||
ERR_PTR(ret));
|
||||
}
|
||||
|
||||
spi_qup_release_dma(master);
|
||||
|
||||
clk_disable_unprepare(controller->cclk);
|
||||
clk_disable_unprepare(controller->iclk);
|
||||
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
|
@ -2568,6 +2568,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
|
||||
debugfs_remove_recursive(usb3->dentry);
|
||||
device_remove_file(&pdev->dev, &dev_attr_role);
|
||||
|
||||
cancel_work_sync(&usb3->role_work);
|
||||
usb_role_switch_unregister(usb3->role_sw);
|
||||
|
||||
usb_del_gadget_udc(&usb3->gadget);
|
||||
|
@ -75,7 +75,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
|
||||
|
||||
/* For soc_device_attribute */
|
||||
#define RCAR_XHCI_FIRMWARE_V2 BIT(0) /* FIRMWARE V2 */
|
||||
#define RCAR_XHCI_FIRMWARE_V3 BIT(1) /* FIRMWARE V3 */
|
||||
|
||||
static const struct soc_device_attribute rcar_quirks_match[] = {
|
||||
{
|
||||
@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
|
||||
|
||||
if (quirks & RCAR_XHCI_FIRMWARE_V2)
|
||||
firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
|
||||
else if (quirks & RCAR_XHCI_FIRMWARE_V3)
|
||||
firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
|
||||
else
|
||||
firmware_name = priv->firmware_name;
|
||||
|
||||
|
@ -829,6 +829,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
bool found = false;
|
||||
struct bio *target = bio ? *bio : NULL;
|
||||
|
||||
f2fs_bug_on(sbi, !target && !page);
|
||||
|
||||
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
|
||||
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
|
||||
struct list_head *head = &io->bio_list;
|
||||
@ -2875,7 +2877,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
f2fs_submit_merged_write(sbi, DATA);
|
||||
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
|
||||
if (bio && *bio)
|
||||
f2fs_submit_merged_ipu_write(sbi, bio, NULL);
|
||||
submitted = NULL;
|
||||
}
|
||||
|
||||
|
@ -2378,6 +2378,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
|
||||
spin_unlock(&jh->b_state_lock);
|
||||
write_unlock(&journal->j_state_lock);
|
||||
jbd2_journal_put_journal_head(jh);
|
||||
/* Already zapped buffer? Nothing to do... */
|
||||
if (!bh->b_bdev)
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
|
@ -7,10 +7,9 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/rtmutex.h>
|
||||
#include "internal.h"
|
||||
|
||||
static DEFINE_RT_MUTEX(pmsg_lock);
|
||||
static DEFINE_MUTEX(pmsg_lock);
|
||||
|
||||
static ssize_t write_pmsg(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
|
||||
if (!access_ok(buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
rt_mutex_lock(&pmsg_lock);
|
||||
mutex_lock(&pmsg_lock);
|
||||
ret = psinfo->write_user(&record, buf);
|
||||
rt_mutex_unlock(&pmsg_lock);
|
||||
mutex_unlock(&pmsg_lock);
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,6 @@ struct nfnetlink_subsystem {
|
||||
int (*commit)(struct net *net, struct sk_buff *skb);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb,
|
||||
enum nfnl_abort_action action);
|
||||
void (*cleanup)(struct net *net);
|
||||
bool (*valid_genid)(struct net *net, u32 genid);
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
|
@ -602,6 +602,10 @@ enum {
|
||||
NVME_AER_VS = 7,
|
||||
};
|
||||
|
||||
enum {
|
||||
NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
|
||||
};
|
||||
|
||||
enum {
|
||||
NVME_AER_NOTICE_NS_CHANGED = 0x00,
|
||||
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
|
||||
#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
|
||||
#include <asm/vga.h>
|
||||
#endif
|
||||
|
||||
|
@ -150,13 +150,8 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
|
||||
if (likely(!cross_pg))
|
||||
return false;
|
||||
|
||||
if (pool->dma_pages_cnt) {
|
||||
return !(pool->dma_pages[addr >> PAGE_SHIFT] &
|
||||
XSK_NEXT_PG_CONTIG_MASK);
|
||||
}
|
||||
|
||||
/* skb path */
|
||||
return addr + len > pool->addrs_cnt;
|
||||
return pool->dma_pages_cnt &&
|
||||
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
|
||||
}
|
||||
|
||||
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
|
||||
|
@ -10,15 +10,16 @@
|
||||
|
||||
TRACE_EVENT(qrtr_ns_service_announce_new,
|
||||
|
||||
TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
|
||||
TP_PROTO(unsigned int service, unsigned int instance,
|
||||
unsigned int node, unsigned int port),
|
||||
|
||||
TP_ARGS(service, instance, node, port),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__le32, service)
|
||||
__field(__le32, instance)
|
||||
__field(__le32, node)
|
||||
__field(__le32, port)
|
||||
__field(unsigned int, service)
|
||||
__field(unsigned int, instance)
|
||||
__field(unsigned int, node)
|
||||
__field(unsigned int, port)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,
|
||||
|
||||
TRACE_EVENT(qrtr_ns_service_announce_del,
|
||||
|
||||
TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
|
||||
TP_PROTO(unsigned int service, unsigned int instance,
|
||||
unsigned int node, unsigned int port),
|
||||
|
||||
TP_ARGS(service, instance, node, port),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__le32, service)
|
||||
__field(__le32, instance)
|
||||
__field(__le32, node)
|
||||
__field(__le32, port)
|
||||
__field(unsigned int, service)
|
||||
__field(unsigned int, instance)
|
||||
__field(unsigned int, node)
|
||||
__field(unsigned int, port)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,
|
||||
|
||||
TRACE_EVENT(qrtr_ns_server_add,
|
||||
|
||||
TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
|
||||
TP_PROTO(unsigned int service, unsigned int instance,
|
||||
unsigned int node, unsigned int port),
|
||||
|
||||
TP_ARGS(service, instance, node, port),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__le32, service)
|
||||
__field(__le32, instance)
|
||||
__field(__le32, node)
|
||||
__field(__le32, port)
|
||||
__field(unsigned int, service)
|
||||
__field(unsigned int, instance)
|
||||
__field(unsigned int, node)
|
||||
__field(unsigned int, port)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -1516,7 +1516,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx.optlen > max_optlen || ctx.optlen < 0) {
|
||||
if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
@ -1530,8 +1530,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
}
|
||||
|
||||
if (ctx.optlen != 0) {
|
||||
if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
|
||||
put_user(ctx.optlen, optlen)) {
|
||||
if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (put_user(ctx.optlen, optlen)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -4773,6 +4773,9 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
|
||||
skb = alloc_skb(0, GFP_ATOMIC);
|
||||
} else {
|
||||
skb = skb_clone(orig_skb, GFP_ATOMIC);
|
||||
|
||||
if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
|
||||
return;
|
||||
}
|
||||
if (!skb)
|
||||
return;
|
||||
|
@ -1564,9 +1564,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
|
||||
cork->dst = NULL;
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
|
||||
if (iph->protocol == IPPROTO_ICMP)
|
||||
icmp_out_count(net, ((struct icmphdr *)
|
||||
skb_transport_header(skb))->type);
|
||||
if (iph->protocol == IPPROTO_ICMP) {
|
||||
u8 icmp_type;
|
||||
|
||||
/* For such sockets, transhdrlen is zero when do ip_append_data(),
|
||||
* so icmphdr does not in skb linear region and can not get icmp_type
|
||||
* by icmp_hdr(skb)->type.
|
||||
*/
|
||||
if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
|
||||
icmp_type = fl4->fl4_icmp_type;
|
||||
else
|
||||
icmp_type = icmp_hdr(skb)->type;
|
||||
icmp_out_count(net, icmp_type);
|
||||
}
|
||||
|
||||
ip_cork_release(cork);
|
||||
out:
|
||||
|
@ -7442,6 +7442,8 @@ static int nf_tables_validate(struct net *net)
|
||||
if (nft_table_validate(net, table) < 0)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -8273,11 +8275,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nf_tables_cleanup(struct net *net)
|
||||
{
|
||||
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
|
||||
}
|
||||
|
||||
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
|
||||
enum nfnl_abort_action action)
|
||||
{
|
||||
@ -8309,7 +8306,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
|
||||
.cb = nf_tables_cb,
|
||||
.commit = nf_tables_commit,
|
||||
.abort = nf_tables_abort,
|
||||
.cleanup = nf_tables_cleanup,
|
||||
.valid_genid = nf_tables_valid_genid,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
@ -530,8 +530,6 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
goto replay_abort;
|
||||
}
|
||||
}
|
||||
if (ss->cleanup)
|
||||
ss->cleanup(net);
|
||||
|
||||
nfnl_err_deliver(&err_list, oskb);
|
||||
kfree_skb(skb);
|
||||
|
@ -1744,7 +1744,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct netlink_sock *nlk = nlk_sk(sk);
|
||||
int len, val, err;
|
||||
unsigned int flag;
|
||||
int len, val;
|
||||
|
||||
if (level != SOL_NETLINK)
|
||||
return -ENOPROTOOPT;
|
||||
@ -1756,39 +1757,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
switch (optname) {
|
||||
case NETLINK_PKTINFO:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
|
||||
if (put_user(len, optlen) ||
|
||||
put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_RECV_PKTINFO;
|
||||
break;
|
||||
case NETLINK_BROADCAST_ERROR:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
|
||||
if (put_user(len, optlen) ||
|
||||
put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_BROADCAST_SEND_ERROR;
|
||||
break;
|
||||
case NETLINK_NO_ENOBUFS:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
|
||||
if (put_user(len, optlen) ||
|
||||
put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_RECV_NO_ENOBUFS;
|
||||
break;
|
||||
case NETLINK_LIST_MEMBERSHIPS: {
|
||||
int pos, idx, shift;
|
||||
int pos, idx, shift, err = 0;
|
||||
|
||||
err = 0;
|
||||
netlink_lock_table();
|
||||
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
|
||||
if (len - pos < sizeof(u32))
|
||||
@ -1805,40 +1784,32 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
|
||||
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
|
||||
err = -EFAULT;
|
||||
netlink_unlock_table();
|
||||
break;
|
||||
return err;
|
||||
}
|
||||
case NETLINK_CAP_ACK:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
|
||||
if (put_user(len, optlen) ||
|
||||
put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_CAP_ACK;
|
||||
break;
|
||||
case NETLINK_EXT_ACK:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
|
||||
if (put_user(len, optlen) || put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_EXT_ACK;
|
||||
break;
|
||||
case NETLINK_GET_STRICT_CHK:
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
|
||||
if (put_user(len, optlen) || put_user(val, optval))
|
||||
return -EFAULT;
|
||||
err = 0;
|
||||
flag = NETLINK_F_STRICT_CHK;
|
||||
break;
|
||||
default:
|
||||
err = -ENOPROTOOPT;
|
||||
return -ENOPROTOOPT;
|
||||
}
|
||||
return err;
|
||||
|
||||
if (len < sizeof(int))
|
||||
return -EINVAL;
|
||||
|
||||
len = sizeof(int);
|
||||
val = nlk->flags & flag ? 1 : 0;
|
||||
|
||||
if (put_user(len, optlen) ||
|
||||
copy_to_user(optval, &val, len))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
|
||||
|
@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct netlink_range_validation iq_range = {
|
||||
.max = INT_MAX,
|
||||
};
|
||||
|
||||
static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
|
||||
[TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },
|
||||
|
||||
[TCA_FQ_PLIMIT] = { .type = NLA_U32 },
|
||||
[TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
|
||||
[TCA_FQ_QUANTUM] = { .type = NLA_U32 },
|
||||
[TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
|
||||
[TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
|
||||
[TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
|
||||
[TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
|
||||
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
|
||||
|
@ -155,6 +155,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
|
||||
return false;
|
||||
|
||||
if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
|
||||
addr + desc->len > pool->addrs_cnt ||
|
||||
xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
|
||||
return false;
|
||||
|
||||
|
@ -41,6 +41,8 @@ are cached and potentially out of date"""
|
||||
self.show_subtree(child, level + 1)
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
if utils.gdb_eval_or_none("clk_root_list") is None:
|
||||
raise gdb.GdbError("No clocks registered")
|
||||
gdb.write(" enable prepare protect \n")
|
||||
gdb.write(" clock count count count rate \n")
|
||||
gdb.write("------------------------------------------------------------------------\n")
|
||||
|
@ -5,7 +5,7 @@
|
||||
import gdb
|
||||
import sys
|
||||
|
||||
from linux.utils import CachedType
|
||||
from linux.utils import CachedType, gdb_eval_or_none
|
||||
from linux.lists import list_for_each_entry
|
||||
|
||||
generic_pm_domain_type = CachedType('struct generic_pm_domain')
|
||||
@ -70,6 +70,8 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
|
||||
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
if gdb_eval_or_none("&gpd_list") is None:
|
||||
raise gdb.GdbError("No power domain(s) registered")
|
||||
gdb.write('domain status children\n');
|
||||
gdb.write(' /device runtime status\n');
|
||||
gdb.write('----------------------------------------------------------------------\n');
|
||||
|
@ -807,15 +807,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client,
|
||||
es8316->irq = i2c_client->irq;
|
||||
mutex_init(&es8316->lock);
|
||||
|
||||
ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
"es8316", es8316);
|
||||
if (ret == 0) {
|
||||
/* Gets re-enabled by es8316_set_jack() */
|
||||
disable_irq(es8316->irq);
|
||||
} else {
|
||||
dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
|
||||
es8316->irq = -ENXIO;
|
||||
if (es8316->irq > 0) {
|
||||
ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
|
||||
"es8316", es8316);
|
||||
if (ret) {
|
||||
dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
|
||||
es8316->irq = -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
return devm_snd_soc_register_component(&i2c_client->dev,
|
||||
|
@ -204,10 +204,10 @@ static int fsl_mqs_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
|
||||
of_node_put(gpr_np);
|
||||
if (IS_ERR(mqs_priv->regmap)) {
|
||||
dev_err(&pdev->dev, "failed to get gpr regmap\n");
|
||||
ret = PTR_ERR(mqs_priv->regmap);
|
||||
goto err_free_gpr_np;
|
||||
return PTR_ERR(mqs_priv->regmap);
|
||||
}
|
||||
} else {
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
@ -236,8 +236,7 @@ static int fsl_mqs_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(mqs_priv->mclk)) {
|
||||
dev_err(&pdev->dev, "failed to get the clock: %ld\n",
|
||||
PTR_ERR(mqs_priv->mclk));
|
||||
ret = PTR_ERR(mqs_priv->mclk);
|
||||
goto err_free_gpr_np;
|
||||
return PTR_ERR(mqs_priv->mclk);
|
||||
}
|
||||
|
||||
dev_set_drvdata(&pdev->dev, mqs_priv);
|
||||
@ -246,13 +245,9 @@ static int fsl_mqs_probe(struct platform_device *pdev)
|
||||
ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
|
||||
&fsl_mqs_dai, 1);
|
||||
if (ret)
|
||||
goto err_free_gpr_np;
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_gpr_np:
|
||||
of_node_put(gpr_np);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fsl_mqs_remove(struct platform_device *pdev)
|
||||
|
@ -363,8 +363,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
|
||||
struct bpf_insn *insn_start = buf_start;
|
||||
struct bpf_insn *insn_end = buf_end;
|
||||
struct bpf_insn *cur = insn_start;
|
||||
bool double_insn = false;
|
||||
|
||||
for (; cur <= insn_end; cur++) {
|
||||
if (double_insn) {
|
||||
double_insn = false;
|
||||
continue;
|
||||
}
|
||||
double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
|
||||
printf("% 4d: ", (int)(cur - insn_start + start_idx));
|
||||
print_bpf_insn(&cbs, cur, true);
|
||||
if (cur != insn_end)
|
||||
|
@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)
|
||||
|
||||
static bool connect_send(const char *cgroup_path)
|
||||
{
|
||||
bool res = true;
|
||||
int server_fd = -1, client_fd = -1;
|
||||
char message[] = "message";
|
||||
bool res = true;
|
||||
|
||||
if (join_cgroup(cgroup_path))
|
||||
goto out_clean;
|
||||
@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
|
||||
if (client_fd < 0)
|
||||
goto out_clean;
|
||||
|
||||
if (send(client_fd, "message", strlen("message"), 0) < 0)
|
||||
if (send(client_fd, &message, sizeof(message), 0) < 0)
|
||||
goto out_clean;
|
||||
|
||||
if (read(server_fd, &message, sizeof(message)) < 0)
|
||||
goto out_clean;
|
||||
|
||||
res = false;
|
||||
|
Loading…
Reference in New Issue
Block a user