From 7741693b1663c0aa2e9d48a58f64abddc0cadd95 Mon Sep 17 00:00:00 2001 From: Hua Huang Date: Thu, 26 May 2022 21:03:44 +0800 Subject: [PATCH] Setting cancelling flag before op cancel and up the semephore in cancel ast. m0_op_cancel() is used to cancel an ongoing operation. The real cancel is posted as an AST(idx_op_cancel_ast) in m0__idx_cancel(). But before/while this AST gets executed, the op itself may already be stable and m0_op_fini() may already be called and this op may already be freed. An op cancelling flag and a semaphore are introduced to synchronize the op cancel and op finalization. So, take the semaphore before destroying it, and up the semaphore when the cancel AST finishes. Signed-off-by: Hua Huang --- motr/client.c | 7 +++++++ motr/client.h | 11 +++++++++++ motr/idx_dix.c | 8 ++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/motr/client.c b/motr/client.c index fbc742fbbc4..6461c858aa5 100644 --- a/motr/client.c +++ b/motr/client.c @@ -841,6 +841,8 @@ M0_INTERNAL int m0_op_init(struct m0_op *op, m0_mutex_init(&op->op_priv_lock); m0_mutex_init(&op->op_pending_tx_lock); spti_tlist_init(&op->op_pending_tx); + op->op_cancelling = false; + m0_semaphore_init(&op->op_sema, 0); return M0_RC(0); } @@ -859,6 +861,9 @@ void m0_op_fini(struct m0_op *op) M0_OS_FAILED))); M0_PRE(op->op_size >= sizeof *oc); + if (op->op_cancelling) + m0_semaphore_down(&op->op_sema); + oc = bob_of(op, struct m0_op_common, oc_op, &oc_bobtype); if (oc->oc_cb_fini != NULL) oc->oc_cb_fini(oc); @@ -876,6 +881,8 @@ void m0_op_fini(struct m0_op *op) m0_sm_group_unlock(grp); m0_sm_group_fini(grp); + op->op_cancelling = false; + m0_semaphore_fini(&op->op_sema); /* Finalise op's bob */ m0_op_bob_fini(op); diff --git a/motr/client.h b/motr/client.h index da982a4b960..41a6c05d500 100644 --- a/motr/client.h +++ b/motr/client.h @@ -709,6 +709,17 @@ struct m0_op { /* Operation's private data, can be used as arguments for callbacks.*/ void *op_datum; uint64_t op_count; + + /** + * This flag is set when there is an onging cancel operation. + * There is no refcount in this op. But the op cancelling AST + * needs this op being valid. The op cancelling AST will + * semaphore up when it is done. The m0_op_fini() checks this flag + * and semaphore down on it if needed. This will make sure the op + * is not freed before the op cancel is done. + */ + bool op_cancelling; + struct m0_semaphore op_sema; /** * Private field, to be used by internal implementation. */ diff --git a/motr/idx_dix.c b/motr/idx_dix.c index 7bee805a564..5e5ef3a0357 100644 --- a/motr/idx_dix.c +++ b/motr/idx_dix.c @@ -1178,7 +1178,7 @@ static void idx_op_cancel_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast) M0_ENTRY(); if (oi->oi_in_completion) - return; + goto out; req = oi->oi_dix_req; if (idx_is_distributed(oi)) { @@ -1188,6 +1188,8 @@ static void idx_op_cancel_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast) } else { cas_index_cancel(req); } +out: + m0_semaphore_up(&oi->oi_oc.oc_op.op_sema); M0_LEAVE(); } @@ -1214,8 +1216,10 @@ M0_INTERNAL int m0__idx_cancel(struct m0_op_idx *oi) if (!M0_IN(dreq->dr_type, (DIX_CREATE, DIX_DELETE, DIX_CCTGS_LOOKUP)) && - !oi->oi_in_completion) + !oi->oi_in_completion) { + oi->oi_oc.oc_op.op_cancelling = true; m0_sm_ast_post(oi->oi_sm_grp, op_ast); + } return M0_RC(0); }