From 16605e8d50898ac88b5b504a7fbd63ecdcf37702 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Tue, 22 Dec 2015 19:35:23 -0800 Subject: [PATCH 1/3] dmaengine: Revert "dmaengine: mic_x100: add missing spin_unlock" This reverts commit e958e079e254 ("dmaengine: mic_x100: add missing spin_unlock"). The above patch is incorrect. There is nothing wrong with the original code. The spin_lock is acquired in the "prep" functions and released in "submit". Signed-off-by: Ashutosh Dixit Signed-off-by: Vinod Koul --- drivers/dma/mic_x100_dma.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index cddfa8dbf4bd..068e920ecb68 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -317,7 +317,6 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); struct device *dev = mic_dma_ch_to_device(mic_ch); int result; - struct dma_async_tx_descriptor *tx = NULL; if (!len && !flags) return NULL; @@ -325,13 +324,10 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, spin_lock(&mic_ch->prep_lock); result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); if (result >= 0) - tx = allocate_tx(mic_ch); - - if (!tx) - dev_err(dev, "Error enqueueing dma, error=%d\n", result); - + return allocate_tx(mic_ch); + dev_err(dev, "Error enqueueing dma, error=%d\n", result); spin_unlock(&mic_ch->prep_lock); - return tx; + return NULL; } static struct dma_async_tx_descriptor * @@ -339,14 +335,13 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) { struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); int ret; - struct dma_async_tx_descriptor *tx = NULL; spin_lock(&mic_ch->prep_lock); ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); if (!ret) - tx = allocate_tx(mic_ch); + return allocate_tx(mic_ch); spin_unlock(&mic_ch->prep_lock); - return tx; + return NULL; } /* Return the status of the transaction */ From b02bab6b0f928d49dbfb03e1e4e9dd43647623d7 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 7 Jan 2016 11:02:34 +1100 Subject: [PATCH 2/3] async_tx: use GFP_NOWAIT rather than GFP_IO These async_XX functions are called from md/raid5 in an atomic section, between get_cpu() and put_cpu(), so they must not sleep. So use GFP_NOWAIT rather than GFP_IO. Dan Williams writes: Longer term async_tx needs to be merged into md directly as we can allocate this unmap data statically per-stripe rather than per request. Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data") Cc: stable@vger.kernel.org (v3.13+) Reported-and-tested-by: Stanislav Samsonov Acked-by: Dan Williams Signed-off-by: NeilBrown Signed-off-by: Vinod Koul --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_pq.c | 4 ++-- crypto/async_tx/async_raid6_recov.c | 4 ++-- crypto/async_tx/async_xor.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index f8c0b8dbeb75..88bc8e6b2a54 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dmaengine_unmap_data *unmap = NULL; if (device) - unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 5d355e0c2633..c0748bbd4c08 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); /* XORing P/Q is only implemented in software */ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && @@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks < 4); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 934a84981495..8fab6275ea1f 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, u8 *a, *b, *c; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { struct device *dev = dma->dev; @@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, u8 *d, *s; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { dma_addr_t dma_dest[2]; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e1bce26cd4f9..da75777f2b3f 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; @@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { From b0b79024627fcbd4b4531f4e2bc8e133c8fb6a95 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Wed, 23 Dec 2015 18:28:15 +0530 Subject: [PATCH 3/3] dmaengine: xgene-dma: Fix double IRQ issue by setting IRQ_DISABLE_UNLAZY flag For interrupt controller that doesn't support irq_disable and hardware with level interrupt, an extra interrupt can be pending. This patch fixes the issue by setting IRQ_DISABLE_UNLAZY flag for the interrupt line. Reference: http://git.kernel.org/tip/e9849777d0e27cdd2902805be51da73e7c79578c Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 9dfa2b0fa5da..9cb93c5b655d 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) /* Register DMA channel rx irq */ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(chan->dev, chan->rx_irq, xgene_dma_chan_ring_isr, 0, chan->name, chan); @@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) for (j = 0; j < i; j++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } @@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma) for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } }