Merge 1429a9260f ("Revert "dm: requeue IO if mapping table not yet available"") into android12-5.10-lts

Steps on the way to 5.10.226

Resolves merge conflicts in:
	fs/f2fs/xattr.c
	fs/nfsd/filecache.c

Change-Id: I09ff012f62cfc2cd08550684766f05eac93951fb
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-14 07:16:40 +00:00
commit 0407f5e40e
24 changed files with 316 additions and 189 deletions

View File

@ -71,7 +71,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}

View File

@ -257,6 +257,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
kfree(sg_table);
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@ -338,7 +339,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table);
if (rc) {
tmc_free_sg_table(sg_table);
kfree(sg_table);
return ERR_PTR(rc);
}

View File

@ -238,9 +238,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
struct ad7606_state *st = iio_priv(indio_dev);
DECLARE_BITMAP(values, 3);
values[0] = val;
values[0] = val & GENMASK(2, 0);
gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
st->gpio_os->info, values);
/* AD7616 requires a reset to update value */
@ -445,7 +445,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
return PTR_ERR(st->gpio_range);
st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
GPIOD_OUT_HIGH);
GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_standby))
return PTR_ERR(st->gpio_standby);
@ -704,7 +704,7 @@ static int ad7606_suspend(struct device *dev)
if (st->gpio_standby) {
gpiod_set_value(st->gpio_range, 1);
gpiod_set_value(st->gpio_standby, 0);
gpiod_set_value(st->gpio_standby, 1);
}
return 0;

View File

@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned long os[3] = {1};
DECLARE_BITMAP(os, 3);
bitmap_fill(os, 3);
/*
* Software mode is enabled when all three oversampling
* pins are set to high. If oversampling gpios are defined
@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
* otherwise, they must be hardwired to VDD
*/
if (st->gpio_os) {
gpiod_set_array_value(ARRAY_SIZE(os),
gpiod_set_array_value(st->gpio_os->ndescs,
st->gpio_os->desc, st->gpio_os->info, os);
}
/* OS of 128 and 256 are available only in software mode */

View File

@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = lookup_atid(t, atid);
if (!ep)
return -EINVAL;
pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int ret = 0;
ep = lookup_atid(t, atid);
if (!ep)
return -EINVAL;
la = (struct sockaddr_in *)&ep->com.local_addr;
ra = (struct sockaddr_in *)&ep->com.remote_addr;
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;

View File

@ -959,7 +959,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
}
struct roce_hem_item {
struct hns_roce_hem_item {
struct list_head list; /* link all hems in the same bt level */
struct list_head sibling; /* link all hems in last hop for mtt */
void *addr;
@ -969,21 +969,26 @@ struct roce_hem_item {
int end; /* end buf offset in this hem */
};
static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
int start, int end,
int count, bool exist_bt,
int bt_level)
/* All HEM items are linked in a tree structure */
struct hns_roce_hem_head {
struct list_head branch[HNS_ROCE_MAX_BT_REGION];
struct list_head root;
struct list_head leaf;
};
static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
bool exist_bt, int bt_level)
{
struct roce_hem_item *hem;
struct hns_roce_hem_item *hem;
hem = kzalloc(sizeof(*hem), GFP_KERNEL);
if (!hem)
return NULL;
if (exist_bt) {
hem->addr = dma_alloc_coherent(hr_dev->dev,
count * BA_BYTE_LEN,
&hem->dma_addr, GFP_KERNEL);
hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
&hem->dma_addr, GFP_KERNEL);
if (!hem->addr) {
kfree(hem);
return NULL;
@ -1000,7 +1005,7 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
struct roce_hem_item *hem, bool exist_bt)
struct hns_roce_hem_item *hem, bool exist_bt)
{
if (exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
@ -1011,7 +1016,7 @@ static void hem_list_free_item(struct hns_roce_dev *hr_dev,
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
struct list_head *head, bool exist_bt)
{
struct roce_hem_item *hem, *temp_hem;
struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
@ -1027,24 +1032,24 @@ static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
/* assign L0 table address to hem from root bt */
static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
struct roce_hem_item *hem, void *cpu_addr,
struct hns_roce_hem_item *hem, void *cpu_addr,
u64 phy_addr)
{
hem->addr = cpu_addr;
hem->dma_addr = (dma_addr_t)phy_addr;
}
static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem,
static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
int offset)
{
return (hem->start <= offset && offset <= hem->end);
}
static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
int page_offset)
static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
int page_offset)
{
struct roce_hem_item *hem, *temp_hem;
struct roce_hem_item *found = NULL;
struct hns_roce_hem_item *hem, *temp_hem;
struct hns_roce_hem_item *found = NULL;
list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
if (hem_list_page_is_in_range(hem, page_offset)) {
@ -1074,9 +1079,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
* @bt_level: base address table level
* @unit: ba entries per bt page
*/
static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
{
u32 step;
u64 step;
int max;
int i;
@ -1112,7 +1117,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
{
struct hns_roce_buf_region *r;
int total = 0;
int step;
u64 step;
int i;
for (i = 0; i < region_cnt; i++) {
@ -1134,16 +1139,16 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
int offset, struct list_head *mid_bt,
struct list_head *btm_bt)
{
struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
struct roce_hem_item *cur, *pre;
struct hns_roce_hem_item *cur, *pre;
const int hopnum = r->hopnum;
int start_aligned;
int distance;
int ret = 0;
int max_ofs;
int level;
u32 step;
u64 step;
int end;
if (hopnum <= 1)
@ -1167,10 +1172,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
/* config L1 bt to last bt and link them to corresponding parent */
for (level = 1; level < hopnum; level++) {
cur = hem_list_search_item(&mid_bt[level], offset);
if (cur) {
hem_ptrs[level] = cur;
continue;
if (!hem_list_is_bottom_bt(hopnum, level)) {
cur = hem_list_search_item(&mid_bt[level], offset);
if (cur) {
hem_ptrs[level] = cur;
continue;
}
}
step = hem_list_calc_ba_range(hopnum, level, unit);
@ -1180,7 +1187,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
}
start_aligned = (distance / step) * step + r->offset;
end = min_t(int, start_aligned + step - 1, max_ofs);
end = min_t(u64, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
true, level);
if (!cur) {
@ -1214,52 +1221,96 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
return ret;
}
static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list, int unit,
const struct hns_roce_buf_region *regions,
int region_cnt)
static struct hns_roce_hem_item *
alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
const struct hns_roce_buf_region *regions, int region_cnt)
{
struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
struct roce_hem_item *hem, *temp_hem, *root_hem;
const struct hns_roce_buf_region *r;
struct list_head temp_root;
struct list_head temp_btm;
void *cpu_base;
u64 phy_base;
int ret = 0;
struct hns_roce_hem_item *hem;
int ba_num;
int offset;
int total;
int step;
int i;
r = &regions[0];
root_hem = hem_list_search_item(&hem_list->root_bt, r->offset);
if (root_hem)
return 0;
ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
if (ba_num < 1)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&temp_root);
offset = r->offset;
if (ba_num > unit)
return ERR_PTR(-ENOBUFS);
offset = regions[0].offset;
/* indicate to last region */
r = &regions[region_cnt - 1];
root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
ba_num, true, 0);
hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
ba_num, true, 0);
if (!hem)
return ERR_PTR(-ENOMEM);
*max_ba_num = ba_num;
return hem;
}
static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
u64 phy_base, const struct hns_roce_buf_region *r,
struct list_head *branch_head,
struct list_head *leaf_head)
{
struct hns_roce_hem_item *hem;
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
r->count, false, 0);
if (!hem)
return -ENOMEM;
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
return r->count;
}
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
int unit, const struct hns_roce_buf_region *r,
const struct list_head *branch_head)
{
struct hns_roce_hem_item *hem, *temp_hem;
int total = 0;
int offset;
u64 step;
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step < 1)
return -EINVAL;
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
total++;
}
return total;
}
static int
setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
int unit, int max_ba_num, struct hns_roce_hem_head *head,
const struct hns_roce_buf_region *regions, int region_cnt)
{
const struct hns_roce_buf_region *r;
struct hns_roce_hem_item *root_hem;
void *cpu_base;
u64 phy_base;
int i, total;
int ret;
root_hem = list_first_entry(&head->root,
struct hns_roce_hem_item, list);
if (!root_hem)
return -ENOMEM;
list_add(&root_hem->list, &temp_root);
hem_list->root_ba = root_hem->dma_addr;
INIT_LIST_HEAD(&temp_btm);
for (i = 0; i < region_cnt; i++)
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
for (i = 0; i < region_cnt && total < ba_num; i++) {
for (i = 0; i < region_cnt && total < max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@ -1271,48 +1322,64 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
/* if hopnum is 0 or 1, cut a new fake hem from the root bt
* which's address share to all regions.
*/
if (hem_list_is_bottom_bt(r->hopnum, 0)) {
hem = hem_list_alloc_item(hr_dev, r->offset,
r->offset + r->count - 1,
r->count, false, 0);
if (!hem) {
ret = -ENOMEM;
goto err_exit;
}
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
list_add(&hem->list, &temp_list[i]);
list_add(&hem->sibling, &temp_btm);
total += r->count;
} else {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step < 1) {
ret = -EINVAL;
goto err_exit;
}
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem,
&hem_list->mid_bt[i][1], list) {
offset = (hem->start - r->offset) / step *
BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
}
}
if (hem_list_is_bottom_bt(r->hopnum, 0))
ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
&head->branch[i], &head->leaf);
else
ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
&hem_list->mid_bt[i][1]);
if (ret < 0)
return ret;
total += ret;
}
list_splice(&temp_btm, &hem_list->btm_bt);
list_splice(&temp_root, &hem_list->root_bt);
list_splice(&head->leaf, &hem_list->btm_bt);
list_splice(&head->root, &hem_list->root_bt);
for (i = 0; i < region_cnt; i++)
list_splice(&temp_list[i], &hem_list->mid_bt[i][0]);
list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
return 0;
}
err_exit:
static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list, int unit,
const struct hns_roce_buf_region *regions,
int region_cnt)
{
struct hns_roce_hem_item *root_hem;
struct hns_roce_hem_head head;
int max_ba_num;
int ret;
int i;
root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
if (root_hem)
return 0;
max_ba_num = 0;
root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
region_cnt);
if (IS_ERR(root_hem))
return PTR_ERR(root_hem);
/* List head for storing all allocated HEM items */
INIT_LIST_HEAD(&head.root);
INIT_LIST_HEAD(&head.leaf);
for (i = 0; i < region_cnt; i++)
hem_list_free_all(hr_dev, &temp_list[i], false);
INIT_LIST_HEAD(&head.branch[i]);
hem_list_free_all(hr_dev, &temp_root, true);
hem_list->root_ba = root_hem->dma_addr;
list_add(&root_hem->list, &head.root);
ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
region_cnt);
if (ret) {
for (i = 0; i < region_cnt; i++)
hem_list_free_all(hr_dev, &head.branch[i], false);
hem_list_free_all(hr_dev, &head.root, true);
}
return ret;
}
@ -1398,7 +1465,7 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
int offset, int *mtt_cnt, u64 *phy_addr)
{
struct list_head *head = &hem_list->btm_bt;
struct roce_hem_item *hem, *temp_hem;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
u64 phy_base = 0;
int nr = 0;

View File

@ -633,30 +633,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
}
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, struct hns_roce_buf_region *region)
struct hns_roce_buf_region *region, dma_addr_t *pages,
int max_count)
{
int count, npage;
int offset, end;
__le64 *mtts;
int offset;
int count;
int npage;
u64 addr;
int end;
int i;
/* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
if (!region->hopnum)
return 0;
offset = region->offset;
end = offset + region->count;
npage = 0;
while (offset < end) {
while (offset < end && npage < max_count) {
count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset, &count, NULL);
if (!mtts)
return -ENOBUFS;
for (i = 0; i < count; i++) {
for (i = 0; i < count && npage < max_count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
addr = to_hr_hw_page_addr(pages[npage]);
else
@ -668,7 +664,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
offset += count;
}
return 0;
return npage;
}
static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
@ -835,8 +831,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
unsigned int i;
int err;
unsigned int i, mapped_cnt;
int ret;
/*
* Only use the first page address as root ba when hopnum is 0, this
@ -847,26 +843,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
for (i = 0; i < mtr->hem_cfg.region_count; i++) {
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
/* if hopnum is 0, no need to map pages in this region */
if (!r->hopnum) {
mapped_cnt += r->count;
continue;
}
if (r->offset + r->count > page_cnt) {
err = -EINVAL;
ret = -EINVAL;
ibdev_err(ibdev,
"failed to check mtr%u end %u + %u, max %u.\n",
i, r->offset, r->count, page_cnt);
return err;
return ret;
}
err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
if (err) {
ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
page_cnt - mapped_cnt);
if (ret < 0) {
ibdev_err(ibdev,
"failed to map mtr%u offset %u, ret = %d.\n",
i, r->offset, err);
return err;
i, r->offset, ret);
return ret;
}
mapped_cnt += ret;
ret = 0;
}
return 0;
if (mapped_cnt < page_cnt) {
ret = -ENOBUFS;
ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
mapped_cnt, page_cnt);
}
return ret;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,

View File

@ -1287,19 +1287,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
__acquire(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
spin_lock_irq(&send_cq->lock);
spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
spin_lock_irq(&recv_cq->lock);
spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@ -1319,13 +1319,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock(&recv_cq->lock);
} else if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
spin_unlock(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
spin_unlock(&recv_cq->lock);
}
}

View File

@ -643,6 +643,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);

View File

@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
DMERR_LIMIT("%s: mapping table unavailable, erroring io",
dm_device_name(md));
dm_put_live_table(md, srcu_idx);
return BLK_STS_RESOURCE;
return BLK_STS_IOERR;
}
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);

View File

@ -1697,10 +1697,15 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
DMERR_LIMIT("%s: mapping table unavailable, erroring io",
dm_device_name(md));
bio_io_error(bio);
goto out;
}
/* If suspended, or map not yet available, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
unlikely(!map)) {
/* If suspended, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)

View File

@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
if (!ndev->debugfs_dir)
if (IS_ERR(ndev->debugfs_dir))
ndev->debugfs_info = NULL;
else
ndev->debugfs_info =

View File

@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
"\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
"\tOut buff phys addr %pap\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer size %pa\n", &peer->outbuf_size);

View File

@ -769,7 +769,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
of_match_device(dove_pinctrl_of_match, &pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
void __iomem *base;
int i;
int i, ret;
pdev->dev.platform_data = (void *)match->data;
@ -784,15 +784,18 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
clk_prepare_enable(clk);
mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, mpp_res);
if (IS_ERR(base))
return PTR_ERR(base);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto err_probe;
}
mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
sizeof(*mpp_data), GFP_KERNEL);
if (!mpp_data)
return -ENOMEM;
if (!mpp_data) {
ret = -ENOMEM;
goto err_probe;
}
dove_pinctrl_info.control_data = mpp_data;
for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
@ -811,8 +814,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
mpp4_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mpp4_base))
return PTR_ERR(mpp4_base);
if (IS_ERR(mpp4_base)) {
ret = PTR_ERR(mpp4_base);
goto err_probe;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@ -823,8 +828,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
pmu_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmu_base))
return PTR_ERR(pmu_base);
if (IS_ERR(pmu_base)) {
ret = PTR_ERR(pmu_base);
goto err_probe;
}
gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
if (IS_ERR(gconfmap)) {
@ -834,12 +841,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
adjust_resource(&fb_res,
(mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
if (IS_ERR(gc_base))
return PTR_ERR(gc_base);
if (IS_ERR(gc_base)) {
ret = PTR_ERR(gc_base);
goto err_probe;
}
gconfmap = devm_regmap_init_mmio(&pdev->dev,
gc_base, &gc_regmap_config);
if (IS_ERR(gconfmap))
return PTR_ERR(gconfmap);
if (IS_ERR(gconfmap)) {
ret = PTR_ERR(gconfmap);
goto err_probe;
}
}
/* Warn on any missing DT resource */
@ -847,6 +859,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
return mvebu_pinctrl_probe(pdev);
err_probe:
clk_disable_unprepare(clk);
return ret;
}
static struct platform_driver dove_pinctrl_driver = {

View File

@ -919,8 +919,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_controller error.\n");
goto out_pm_get;
dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
goto free_dma;
}
pm_runtime_mark_last_busy(fsl_lpspi->dev);
@ -928,6 +928,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
return 0;
free_dma:
fsl_lpspi_dma_exit(controller);
out_pm_get:
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_put_sync(fsl_lpspi->dev);
@ -944,6 +946,9 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
fsl_lpspi_dma_exit(controller);
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
return 0;
}

View File

@ -100,11 +100,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (irq < 0)
return;
irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
if (unlikely(ret))
@ -397,6 +395,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
vq->last_avail_idx = vq_state.avail_index;
break;
case VHOST_SET_VRING_CALL:
if (vq->call_ctx.ctx) {
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_unsetup_vq_irq(v, idx);
vq->call_ctx.producer.token = NULL;
}
break;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@ -422,12 +428,17 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (vq->call_ctx.ctx) {
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_setup_vq_irq(v, idx);
} else {
cb.callback = NULL;
cb.private = NULL;
cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@ -888,6 +899,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
vqs[i]->call_ctx.ctx = NULL;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);

View File

@ -180,6 +180,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
/* Setup virtqueue callback */
cb.callback = virtio_vdpa_virtqueue_cb;
cb.private = info;
cb.trigger = NULL;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));

View File

@ -213,29 +213,6 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, wdog);
}
static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
{
struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
if (watchdog_active(&imx_sc_wdd->wdd))
imx_sc_wdt_stop(&imx_sc_wdd->wdd);
return 0;
}
static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
{
struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
if (watchdog_active(&imx_sc_wdd->wdd))
imx_sc_wdt_start(&imx_sc_wdd->wdd);
return 0;
}
static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
imx_sc_wdt_suspend, imx_sc_wdt_resume);
static const struct of_device_id imx_sc_wdt_dt_ids[] = {
{ .compatible = "fsl,imx-sc-wdt", },
{ /* sentinel */ }
@ -247,7 +224,6 @@ static struct platform_driver imx_sc_wdt_driver = {
.driver = {
.name = "imx-sc-wdt",
.of_match_table = imx_sc_wdt_dt_ids,
.pm = &imx_sc_wdt_pm_ops,
},
};
module_platform_driver(imx_sc_wdt_driver);

View File

@ -265,7 +265,8 @@ enum {
ORPHAN_INO, /* for orphan ino list */
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
TRANS_DIR_INO, /* for trasactions dir ino list */
TRANS_DIR_INO, /* for transactions dir ino list */
XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@ -1128,6 +1129,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
CP_XATTR_DIR,
};
enum iostat_type {

View File

@ -219,6 +219,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
XATTR_DIR_INO))
cp_reason = CP_XATTR_DIR;
return cp_reason;
}

View File

@ -580,6 +580,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
.id = id,
.type = type,
};
__be32 status = nfs_ok;
__be32 *p;
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@ -592,12 +593,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
return nfserrno(ret);
ret = strlen(item->name);
WARN_ON_ONCE(ret > IDMAP_NAMESZ);
p = xdr_reserve_space(xdr, ret + 4);
if (!p)
return nfserr_resource;
p = xdr_encode_opaque(p, item->name, ret);
if (unlikely(!p)) {
status = nfserr_resource;
goto out_put;
}
xdr_encode_opaque(p, item->name, ret);
out_put:
cache_put(&item->h, nn->idtoname_cache);
return 0;
return status;
}
static bool

View File

@ -806,6 +806,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
ci = &cmsg->cm_u.cm_clntinfo;
if (get_user(namelen, &ci->cc_name.cn_len))
return -EFAULT;
if (!namelen) {
dprintk("%s: namelen should not be zero", __func__);
return -EINVAL;
}
name.data = memdup_user(&ci->cc_name.cn_id, namelen);
if (IS_ERR_OR_NULL(name.data))
return -EFAULT;
@ -828,6 +832,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
cnm = &cmsg->cm_u.cm_name;
if (get_user(namelen, &cnm->cn_len))
return -EFAULT;
if (!namelen) {
dprintk("%s: namelen should not be zero", __func__);
return -EINVAL;
}
name.data = memdup_user(&cnm->cn_id, namelen);
if (IS_ERR_OR_NULL(name.data))
return -EFAULT;

View File

@ -11,10 +11,16 @@
* vDPA callback definition.
* @callback: interrupt callback function
* @private: the data passed to the callback function
* @trigger: the eventfd for the callback (Optional).
* When it is set, the vDPA driver must guarantee that
* signaling it is functional equivalent to triggering
* the callback. Then vDPA parent can signal it directly
* instead of triggering the callback.
*/
struct vdpa_callback {
irqreturn_t (*callback)(void *data);
void *private;
struct eventfd_ctx *trigger;
};
/**

View File

@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
{ CP_RECOVER_DIR, "dir needs recovery" })
{ CP_RECOVER_DIR, "dir needs recovery" }, \
{ CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \