soc: qcom: hab: Put work of hab_ctx_free() to workqueue if needed

hab_ctx_free() might sleep due to call habmem_remove_export()
->habmem_export_put()->habmem_export_destroy()
->habmem_exp_release()->dma_buf_unmap_attachment() &
dma_buf_detach(),because dma_buf_unmap_attachment() and
dma_buf_detach() might sleep.

Meanwhile hab_ctx_free() might be called in atomic context in
following situations:
  1. physical_channel_rx_dispatch()->hab_msg_recv()->
hab_vchan_put()->hab_ctx_put()->hab_ctx_free() in tasklet.
  2. hab client holds spin_lock and calls hab_vchan_close()->
hab_vchan_put()->hab_vchan_free()->hab_ctx_put()->hab_ctx_free().

So move the work of hab_ctx_free() to workquque when hab_ctx_free()
is called in non-preemptive context.

Change-Id: I1f7ef6cb377833290752e1032a1925c66483ebf3
Signed-off-by: Deyan Wang <quic_deyawang@quicinc.com>
This commit is contained in:
Deyan Wang 2023-07-08 20:43:40 +05:30
parent d2101694a7
commit 8eb3555a68
2 changed files with 43 additions and 4 deletions

View File

@ -116,11 +116,14 @@ struct uhab_context *hab_ctx_alloc(int kernel)
return ctx;
}
/* ctx can only be freed when all the vchan releases the refcnt */
void hab_ctx_free(struct kref *ref)
/*
* This function might sleep. One scenario (only applicable for Linux)
* is as below, hab_ctx_free_fn->habmem_remove_export->habmem_export_put
* ->habmem_export_destroy->habmem_exp_release,
* where dma_buf_unmap_attachment() & dma_buf_detach() might sleep.
*/
static void hab_ctx_free_fn(struct uhab_context *ctx)
{
struct uhab_context *ctx =
container_of(ref, struct uhab_context, refcount);
struct hab_export_ack_recvd *exp_ack_recvd, *expack_tmp;
struct hab_import_ack_recvd *imp_ack_recvd, *impack_tmp;
struct virtual_channel *vchan;
@ -269,6 +272,41 @@ void hab_ctx_free(struct kref *ref)
kfree(ctx);
}
static void hab_ctx_free_work_fn(struct work_struct *work)
{
struct uhab_context *ctx =
container_of(work, struct uhab_context, destroy_work);
hab_ctx_free_fn(ctx);
}
/*
* ctx can only be freed after all the vchan releases the refcnt
* and hab_release() is called.
*
* this function might be called in atomic context in following situations
* (only applicable to Linux):
* 1. physical_channel_rx_dispatch()->hab_msg_recv()->hab_vchan_put()
* ->hab_ctx_put()->hab_ctx_free() in tasklet.
* 2. hab client holds spin_lock and calls hab_vchan_close()->hab_vchan_put()
* ->hab_vchan_free()->hab_ctx_free().
*/
void hab_ctx_free(struct kref *ref)
{
struct uhab_context *ctx =
container_of(ref, struct uhab_context, refcount);
if (likely(preemptible())) {
hab_ctx_free_fn(ctx);
} else {
pr_info("In non-preemptive context now, ctx owner %d\n",
ctx->owner);
INIT_WORK(&ctx->destroy_work, hab_ctx_free_work_fn);
schedule_work(&ctx->destroy_work);
}
}
/*
* caller needs to call vchan_put() afterwards. this is used to refcnt
* the local ioctl access based on ctx

View File

@ -305,6 +305,7 @@ struct hab_device {
struct uhab_context {
struct list_head node; /* managed by the driver */
struct kref refcount;
struct work_struct destroy_work;
struct list_head vchannels;
int vcnt;