diff options
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 17 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 21 |
2 files changed, 18 insertions, 20 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 49386ce04bf5..a371b07a0981 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -39,6 +39,7 @@ #include "../dmaengine.h" static char *chanerr_str[] = { + "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", "Next Descriptor Address Error", "Descriptor Error", @@ -66,7 +67,6 @@ static char *chanerr_str[] = { "Result Guard Tag verification Error", "Result Application Tag verification Error", "Result Reference Tag verification Error", - NULL }; static void ioat_eh(struct ioatdma_chan *ioat_chan); @@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr) { int i; - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) { if ((chanerr >> i) & 1) { - if (chanerr_str[i]) { - dev_err(to_dev(ioat_chan), "Err(%d): %s\n", - i, chanerr_str[i]); - } else - break; + dev_err(to_dev(ioat_chan), "Err(%d): %s\n", + i, chanerr_str[i]); } } } @@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) { struct ioat_dma_descriptor *hw; struct ioat_ring_ent *desc; - struct ioatdma_device *ioat_dma; struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); int chunk; dma_addr_t phys; u8 *pos; off_t offs; - ioat_dma = to_ioatdma_device(chan->device); - chunk = idx / IOAT_DESCS_PER_2M; idx &= (IOAT_DESCS_PER_2M - 1); offs = idx * IOAT_DESC_SZ; @@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { - struct dmaengine_result res; - dma_cookie_complete(tx); dma_descriptor_unmap(tx); - res.result = DMA_TRANS_NOERROR; dmaengine_desc_get_callback_invoke(tx, NULL); tx->callback = NULL; tx->callback_result = NULL; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 015f7110b96d..90eddd9f07e4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_src)) { dev_err(dev, "mapping src buffer failed\n"); + err = -ENOMEM; goto free_resources; } dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_dest)) { dev_err(dev, "mapping dest buffer failed\n"); + err = -ENOMEM; goto unmap_src; } flags = DMA_PREP_INTERRUPT; @@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) op = IOAT_OP_XOR; dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dest_dma)) + if (dma_mapping_error(dev, dest_dma)) { + err = -ENOMEM; goto free_resources; + } for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_srcs[i] = DMA_ERROR_CODE; for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) + if (dma_mapping_error(dev, dma_srcs[i])) { + err = -ENOMEM; goto dma_unmap; + } } tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, @@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) + if (dma_mapping_error(dev, dma_srcs[i])) { + err = -ENOMEM; goto dma_unmap; + } } tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, @@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) + if (dma_mapping_error(dev, dma_srcs[i])) { + err = -ENOMEM; goto dma_unmap; + } } tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, @@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) struct dma_device *dma; struct dma_chan *c; struct ioatdma_chan *ioat_chan; - bool is_raid_device = false; int err; u16 val16; @@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); if (ioat_dma->cap & IOAT_CAP_XOR) { - is_raid_device = true; dma->max_xor = 8; dma_cap_set(DMA_XOR, dma->cap_mask); @@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) } if (ioat_dma->cap & IOAT_CAP_PQ) { - is_raid_device = true; dma->device_prep_dma_pq = ioat_prep_pq; dma->device_prep_dma_pq_val = ioat_prep_pq_val; |