提交 66d44e2b 编写于 作者: D Dave Jiang 提交者: Xiaochen Shen

dmaengine: idxd: rework descriptor free path on failure

mainline inclusion
from mainline-v5.17
commit 5d78abb6
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I596WO
CVE: NA

Intel-SIG: commit 5d78abb6 dmaengine: idxd: rework descriptor free path on failure.
Incremental backporting patches for DSA/IAA on Intel Xeon platform.

Deviation from upstream:
Merge commit 5cb664fb Merge branch 'fixes' into next.

--------------------------------

Refactor the completion function to allow skipping of descriptor freeing on
the submission failure path. This completely removes descriptor freeing
from the submit failure path and leave the responsibility to the caller.
Reviewed-by: NKevin Tian <kevin.tian@intel.com>
Signed-off-by: NDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163528416222.3925689.12859769271667814762.stgit@djiang5-desk3.ch.intel.comSigned-off-by: NVinod Koul <vkoul@kernel.org>
Signed-off-by: NXiaochen Shen <xiaochen.shen@intel.com>
上级 b757600d
......@@ -21,7 +21,8 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type)
enum idxd_complete_type comp_type,
bool free_desc)
{
struct dma_async_tx_descriptor *tx;
struct dmaengine_result res;
......@@ -44,6 +45,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc,
tx->callback = NULL;
tx->callback_result = NULL;
}
if (free_desc)
idxd_free_desc(desc->wq, desc);
}
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
......@@ -174,8 +178,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
if (rc < 0)
if (rc < 0) {
idxd_free_desc(wq, desc);
return rc;
}
return cookie;
}
......
......@@ -579,7 +579,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
......@@ -603,10 +603,4 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
idxd_dma_complete_txd(desc, reason);
idxd_free_desc(desc->wq, desc);
}
#endif
......@@ -717,10 +717,8 @@ static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
if (!head)
return;
llist_for_each_entry_safe(desc, itr, head, llnode) {
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
idxd_free_desc(desc->wq, desc);
}
llist_for_each_entry_safe(desc, itr, head, llnode)
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
}
static void idxd_flush_work_list(struct idxd_irq_entry *ie)
......@@ -729,8 +727,7 @@ static void idxd_flush_work_list(struct idxd_irq_entry *ie)
list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
list_del(&desc->list);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
idxd_free_desc(desc->wq, desc);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
}
}
......
......@@ -195,11 +195,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
complete_desc(desc, IDXD_COMPLETE_NORMAL);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
......@@ -238,11 +238,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
complete_desc(desc, IDXD_COMPLETE_NORMAL);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
}
}
......
......@@ -134,17 +134,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
/*
* complete_desc() will return desc to allocator and the desc can be
* acquired by a different process and the desc->list can be modified.
* Delete desc from list so the list trasversing does not get corrupted
* by the other process.
* completing the descriptor will return desc to allocator and
* the desc can be acquired by a different process and the
* desc->list can be modified. Delete desc from list so the
* list trasversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
complete_desc(d, IDXD_COMPLETE_NORMAL);
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
}
}
......@@ -155,15 +155,11 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED) {
idxd_free_desc(wq, desc);
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
}
if (!percpu_ref_tryget_live(&wq->wq_active)) {
idxd_free_desc(wq, desc);
if (!percpu_ref_tryget_live(&wq->wq_active))
return -ENXIO;
}
portal = idxd_wq_portal_addr(wq);
......@@ -198,8 +194,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
/* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
else
idxd_free_desc(wq, desc);
return rc;
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册