提交 e0884772 编写于 作者: D Dave Jiang 提交者: Vinod Koul

ioatdma: Removing hw bug workaround for CB3.x .2 and earlier

CB3.2 and earlier hardware has silicon bugs that are no longer needed with
the new hardware. We don't have to use a NULL op to signal interrupt for
RAID ops any longer. This code make sure the legacy workarounds only happen on
legacy hardware.
Signed-off-by: NDave Jiang <dave.jiang@intel.com>
Acked-by: NDan Williams <djbw@fb.com>
Signed-off-by: NVinod Koul <vinod.koul@intel.com>
上级 42c91ee7
...@@ -837,6 +837,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, ...@@ -837,6 +837,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc; struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext; struct ioat_ring_ent *ext;
...@@ -847,6 +848,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, ...@@ -847,6 +848,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
u32 offset = 0; u32 offset = 0;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
int i, s, idx, with_ext, num_descs; int i, s, idx, with_ext, num_descs;
int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
dev_dbg(to_dev(chan), "%s\n", __func__); dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide /* the engine requires at least two sources (we provide
...@@ -872,7 +874,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, ...@@ -872,7 +874,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order. * order.
*/ */
if (likely(num_descs) && if (likely(num_descs) &&
ioat2_check_space_lock(ioat, num_descs+1) == 0) ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
idx = ioat->head; idx = ioat->head;
else else
return NULL; return NULL;
...@@ -926,6 +928,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, ...@@ -926,6 +928,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
dump_pq_desc_dbg(ioat, desc, ext); dump_pq_desc_dbg(ioat, desc, ext);
if (!cb32) {
pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
pq->ctl_f.compl_write = 1;
compl_desc = desc;
} else {
/* completion descriptor carries interrupt bit */ /* completion descriptor carries interrupt bit */
compl_desc = ioat2_get_ring_ent(ioat, idx + i); compl_desc = ioat2_get_ring_ent(ioat, idx + i);
compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
...@@ -936,6 +943,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, ...@@ -936,6 +943,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
hw->ctl_f.compl_write = 1; hw->ctl_f.compl_write = 1;
hw->size = NULL_DESC_BUFFER_SIZE; hw->size = NULL_DESC_BUFFER_SIZE;
dump_desc_dbg(ioat, compl_desc); dump_desc_dbg(ioat, compl_desc);
}
/* we leave the channel locked to ensure in order submission */ /* we leave the channel locked to ensure in order submission */
return &compl_desc->txd; return &compl_desc->txd;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册