Commit 959297e2 authored by Tomasz Wlostowski's avatar Tomasz Wlostowski

kernel: use synchronous DMA transfer instead of DMA complete interrupt.…

kernel: use synchronous DMA transfer instead of DMA complete interrupt. Workaround for the unpin_current_cpu()/spinlock related issue
parent abe5de58
...@@ -206,8 +206,6 @@ struct fmctdc_dev { ...@@ -206,8 +206,6 @@ struct fmctdc_dev {
struct zio_dma_sgt *zdma; struct zio_dma_sgt *zdma;
int dma_chan_mask; int dma_chan_mask;
/* interrupt lock to prevent triggering a DMA access while the previous one is still running */
spinlock_t dma_lock;
}; };
static inline u32 ft_ioread(struct fmctdc_dev *ft, unsigned long addr) static inline u32 ft_ioread(struct fmctdc_dev *ft, unsigned long addr)
...@@ -384,6 +382,7 @@ static inline enum gncore_dma_status gn4124_dma_wait_done(struct fmctdc_dev *ft, ...@@ -384,6 +382,7 @@ static inline enum gncore_dma_status gn4124_dma_wait_done(struct fmctdc_dev *ft,
} }
} }
return GENNUM_DMA_STA_ERROR;
} }
/** /**
......
...@@ -131,9 +131,6 @@ static inline uint32_t ft_chan_to_irq_mask(struct fmctdc_dev *ft, uint32_t chan_ ...@@ -131,9 +131,6 @@ static inline uint32_t ft_chan_to_irq_mask(struct fmctdc_dev *ft, uint32_t chan_
static void ft_eic_irq_disable(struct fmctdc_dev *ft, uint32_t mask) static void ft_eic_irq_disable(struct fmctdc_dev *ft, uint32_t mask)
{ {
ft_iowrite(ft, mask, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR); ft_iowrite(ft, mask, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR);
printk(KERN_INFO "race69[disable_Save] %s:%d: ft = %p TDC_EIC_REG_EIC_IDR, mask = %x\n",
__FILE__, __LINE__ ,
ft, mask);
} }
/** /**
...@@ -144,9 +141,6 @@ static void ft_eic_irq_disable(struct fmctdc_dev *ft, uint32_t mask) ...@@ -144,9 +141,6 @@ static void ft_eic_irq_disable(struct fmctdc_dev *ft, uint32_t mask)
static void ft_eic_irq_enable(struct fmctdc_dev *ft, uint32_t mask) static void ft_eic_irq_enable(struct fmctdc_dev *ft, uint32_t mask)
{ {
ft_iowrite(ft, mask, ft->ft_irq_base + TDC_EIC_REG_EIC_IER); ft_iowrite(ft, mask, ft->ft_irq_base + TDC_EIC_REG_EIC_IER);
printk(KERN_INFO "race69[enable_restore] %s:%d: ft = %p TDC_EIC_REG_EIC_IER, mask = %x\n",
__FILE__, __LINE__ ,
ft, mask);
} }
...@@ -302,21 +296,6 @@ static void ft_dma_work(struct work_struct *work) ...@@ -302,21 +296,6 @@ static void ft_dma_work(struct work_struct *work)
int i, err, transfer, n_block; int i, err, transfer, n_block;
unsigned long *loop; unsigned long *loop;
/* The code below prevents a race condition between the ft_dma_work interrupt and
the DMA complete handler. On some systems (in particular multicore) and some patterns
of input timestamps, an IRQ for one of the TDC channels can be triggered while the DMA
transfer is still in progress. In the worst case, the call of zio_dma_alloc_sg few lines below
this comment will overwrite a pointer being used (in parallel CPU core) by ft_irq_handler_dma_complete().
Sometimes this would drop a kernel crash or produce incorrect timestamps.
Since DMA xfers in GN4124 cannot be queued, we prevent this by
a simple, crude spinlock (David, Fede, fixme please ;-) */
if (!spin_trylock(&ft->dma_lock))
{
ft_eic_irq_enable(ft, TDC_EIC_EIC_IMR_TDC_DMA_MASK);
return;
}
irq_stat = ft_ioread(ft, ft->ft_irq_base + TDC_EIC_REG_EIC_ISR); irq_stat = ft_ioread(ft, ft->ft_irq_base + TDC_EIC_REG_EIC_ISR);
irq_stat &= TDC_EIC_EIC_IMR_TDC_DMA_MASK; irq_stat &= TDC_EIC_EIC_IMR_TDC_DMA_MASK;
irq_stat >>= TDC_EIC_EIC_IMR_TDC_DMA_SHIFT; irq_stat >>= TDC_EIC_EIC_IMR_TDC_DMA_SHIFT;
...@@ -342,6 +321,14 @@ static void ft_dma_work(struct work_struct *work) ...@@ -342,6 +321,14 @@ static void ft_dma_work(struct work_struct *work)
n_block++; n_block++;
} }
/* interrupt occured (due to some data in the buffer) but the cset is not armed (acquisition off)
- just return, don't drop a scary warning */
if (n_block == 0) {
ft_eic_irq_enable(ft, TDC_EIC_EIC_IMR_TDC_DMA_MASK);
return;
}
cset = &ft->zdev->cset[0]; /* ZIO is not really using the channel, cset = &ft->zdev->cset[0]; /* ZIO is not really using the channel,
and probably it should not */ and probably it should not */
ft->zdma = zio_dma_alloc_sg(cset->chan, ft->fmc->hwdev, ft->zdma = zio_dma_alloc_sg(cset->chan, ft->fmc->hwdev,
...@@ -362,9 +349,9 @@ static void ft_dma_work(struct work_struct *work) ...@@ -362,9 +349,9 @@ static void ft_dma_work(struct work_struct *work)
dma_sync_single_for_device(ft->fmc->hwdev, ft->zdma->dma_page_desc_pool, dma_sync_single_for_device(ft->fmc->hwdev, ft->zdma->dma_page_desc_pool,
sizeof(struct gncore_dma_item) * ft->zdma->sgt.nents, sizeof(struct gncore_dma_item) * ft->zdma->sgt.nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
ft_irq_enable_restore(ft);
gn4124_dma_start(ft); gn4124_dma_start(ft);
gn4124_dma_wait_done(ft); /* what is this? *? */ err = gn4124_dma_wait_done(ft, 100); /* what is this? *? */
loop = (unsigned long *) &ft->dma_chan_mask; loop = (unsigned long *) &ft->dma_chan_mask;
if (WARN(!ft->zdma, "DMA not programmed correctly ")) { if (WARN(!ft->zdma, "DMA not programmed correctly ")) {
...@@ -378,9 +365,8 @@ static void ft_dma_work(struct work_struct *work) ...@@ -378,9 +365,8 @@ static void ft_dma_work(struct work_struct *work)
for_each_set_bit(i, loop, FT_NUM_CHANNELS) for_each_set_bit(i, loop, FT_NUM_CHANNELS)
zio_cset_busy_clear(&ft->zdev->cset[i], 1); zio_cset_busy_clear(&ft->zdev->cset[i], 1);
if (irq_stat & DMA_EIC_EIC_IDR_DMA_ERROR) { if (err != GENNUM_DMA_STA_DONE) {
dev_err(ft->fmc->hwdev, "0x%X 0x%X", dev_err(ft->fmc->hwdev, "DMA error/abort: 0x%X", err);
irq_stat, dma_readl(ft, GENNUM_DMA_STA));
for_each_set_bit(i, loop, FT_NUM_CHANNELS) for_each_set_bit(i, loop, FT_NUM_CHANNELS)
ft_abort_acquisition(&ft->zdev->cset[i]); ft_abort_acquisition(&ft->zdev->cset[i]);
...@@ -389,7 +375,8 @@ static void ft_dma_work(struct work_struct *work) ...@@ -389,7 +375,8 @@ static void ft_dma_work(struct work_struct *work)
/* perhpas WQ: it processes data */ /* perhpas WQ: it processes data */
for_each_set_bit(i, loop, FT_NUM_CHANNELS) for_each_set_bit(i, loop, FT_NUM_CHANNELS)
zio_trigger_data_done(&ft->zdev->cset[i]); zio_trigger_data_done(&ft->zdev->cset[i]);
>>>>>>> try to move things to synchr dma_work
ft_eic_irq_enable(ft, TDC_EIC_EIC_IMR_TDC_DMA_MASK);
return; return;
err_map: err_map:
...@@ -402,7 +389,6 @@ err_alloc: ...@@ -402,7 +389,6 @@ err_alloc:
zio_trigger_abort_disable(&ft->zdev->cset[i], 0); zio_trigger_abort_disable(&ft->zdev->cset[i], 0);
} }
spin_unlock(&ft->dma_lock);
ft_eic_irq_enable(ft, TDC_EIC_EIC_IMR_TDC_DMA_MASK); ft_eic_irq_enable(ft, TDC_EIC_EIC_IMR_TDC_DMA_MASK);
} }
...@@ -490,61 +476,6 @@ irq: ...@@ -490,61 +476,6 @@ irq:
return; return;
} }
static irqreturn_t ft_irq_handler_dma_complete(int irq, void *dev_id)
{
struct fmc_device *fmc = dev_id;
struct fmctdc_dev *ft = fmc->mezzanine_data;
uint32_t irq_stat;
irq_stat = ft_ioread(ft, ft->ft_dma_eic_base + DMA_EIC_REG_EIC_ISR);
if (!irq_stat)
{
spin_unlock(&ft->dma_lock);
return IRQ_NONE;
}
ft_iowrite(ft, irq_stat, ft->ft_dma_eic_base + TDC_EIC_REG_EIC_ISR);
/*
loop = (unsigned long *) &ft->dma_chan_mask;
if (WARN(!ft->zdma, "DMA not programmed correctly ")) {
for_each_set_bit(i, loop, FT_NUM_CHANNELS)
ft_abort_acquisition(&ft->zdev->cset[i]);
goto out;
}
zio_dma_unmap_sg(ft->zdma);
zio_dma_free_sg(ft->zdma);
for_each_set_bit(i, loop, FT_NUM_CHANNELS)
zio_cset_busy_clear(&ft->zdev->cset[i], 1);
if (irq_stat & DMA_EIC_EIC_IDR_DMA_ERROR) {
dev_err(ft->fmc->hwdev, "0x%X 0x%X",
irq_stat, dma_readl(ft, GENNUM_DMA_STA));
for_each_set_bit(i, loop, FT_NUM_CHANNELS)
ft_abort_acquisition(&ft->zdev->cset[i]);
goto out;
}
// perhpas WQ: it processes data
for_each_set_bit(i, loop, FT_NUM_CHANNELS)
zio_trigger_data_done(&ft->zdev->cset[i]);
*/
/* out: */
fmc_irq_ack(fmc);
spin_unlock(&ft->dma_lock);
/* Re-Enable interrupts that were disabled in the IRQ handler */
/* ft_irq_enable_restore(ft); */
return IRQ_HANDLED;
}
static irqreturn_t ft_irq_handler_ts(int irq, void *dev_id) static irqreturn_t ft_irq_handler_ts(int irq, void *dev_id)
{ {
struct fmc_device *fmc = dev_id; struct fmc_device *fmc = dev_id;
...@@ -696,8 +627,6 @@ int ft_irq_init(struct fmctdc_dev *ft) ...@@ -696,8 +627,6 @@ int ft_irq_init(struct fmctdc_dev *ft)
{ {
int ret; int ret;
spin_lock_init(&ft->dma_lock);
ft_irq_coalescing_timeout_set(ft, -1, irq_timeout_ms_default); ft_irq_coalescing_timeout_set(ft, -1, irq_timeout_ms_default);
ft_irq_coalescing_size_set(ft, -1, 40); ft_irq_coalescing_size_set(ft, -1, 40);
...@@ -721,43 +650,13 @@ int ft_irq_init(struct fmctdc_dev *ft) ...@@ -721,43 +650,13 @@ int ft_irq_init(struct fmctdc_dev *ft)
return ret; return ret;
} }
if (ft->mode == FT_ACQ_TYPE_DMA) {
/*
* DMA completion interrupt (from the GN4124 core), like in
* the FMCAdc design
*/
ft->fmc->irq = ft->ft_irq_base + 1;
ret = fmc_irq_request(ft->fmc, ft_irq_handler_dma_complete,
"fmc-tdc-dma", 0);
}
/* disable & kick off the interrupts (fixme: possible issue with the HDL) */ /* disable & kick off the interrupts (fixme: possible issue with the HDL) */
ft_iowrite(ft, ~0, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR); ft_iowrite(ft, ~0, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR);
printk(KERN_INFO "race69 %s:%d: ft = %p TDC_EIC_REG_EIC_IDR, ft->irq_imr = %x\n", ft_iowrite(ft, ~0, ft->ft_dma_eic_base + DMA_EIC_REG_EIC_IER);
__FILE__, __LINE__,
ft, 0);
fmc_irq_ack(ft->fmc); fmc_irq_ack(ft->fmc);
/*
* We enable interrupts on all channel. but if we do not enable
* the channel, we should not receive anything. So, even if ZIO is
* not ready to receive data at this time we should not see any trouble.
* If we have problems here, the HDL is broken!
*/
if (ft->mode == FT_ACQ_TYPE_DMA) {
ft_iowrite(ft,
DMA_EIC_EIC_IER_DMA_DONE | DMA_EIC_EIC_IER_DMA_ERROR,
ft->ft_dma_eic_base + DMA_EIC_REG_EIC_IER);
printk(KERN_INFO "race69 %s:%d: ft = %p DMA_EIC_REG_EIC_IER, DMA_EIC_EIC_IER_DMA_DONE|DMA_EIC_EIC_IER_DMA_ERROR = %x\n",
__FILE__, __LINE__ ,
ft, DMA_EIC_EIC_IER_DMA_DONE | DMA_EIC_EIC_IER_DMA_ERROR);
}
ft_iowrite(ft, ft_chan_to_irq_mask(ft, 0x1F), ft_iowrite(ft, ft_chan_to_irq_mask(ft, 0x1F),
ft->ft_irq_base + TDC_EIC_REG_EIC_IER); ft->ft_irq_base + TDC_EIC_REG_EIC_IER);
printk(KERN_INFO "race69 %s:%d: ft = %p TDC_EIC_REG_EIC_IER, ft_chan_to_irq_mask(ft, 0x1F) = %x\n",
__FILE__, __LINE__ ,
ft, ft_chan_to_irq_mask(ft, 0x1F));
return 0; return 0;
} }
...@@ -765,23 +664,7 @@ int ft_irq_init(struct fmctdc_dev *ft) ...@@ -765,23 +664,7 @@ int ft_irq_init(struct fmctdc_dev *ft)
void ft_irq_exit(struct fmctdc_dev *ft) void ft_irq_exit(struct fmctdc_dev *ft)
{ {
ft_iowrite(ft, ~0, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR); ft_iowrite(ft, ~0, ft->ft_irq_base + TDC_EIC_REG_EIC_IDR);
printk(KERN_INFO "race69 %s:%d: ft = %p TDC_EIC_REG_EIC_IDR, ft->irq_imr = %x\n",
__FILE__, __LINE__,
ft, 0);
if (ft->mode == FT_ACQ_TYPE_DMA) {
printk(KERN_INFO "race69 %s:%d: ft = %p DMA_EIC_REG_EIC_IER, DMA_EIC_EIC_IDR_DMA_DONE | DMA_EIC_EIC_IDR_DMA_ERROR = %x\n",
__FILE__, __LINE__,
ft, DMA_EIC_EIC_IDR_DMA_DONE | DMA_EIC_EIC_IDR_DMA_ERROR);
ft_iowrite(ft,
DMA_EIC_EIC_IDR_DMA_DONE | DMA_EIC_EIC_IDR_DMA_ERROR,
ft->ft_dma_eic_base + DMA_EIC_REG_EIC_IER);
}
ft->fmc->irq = ft->ft_irq_base; ft->fmc->irq = ft->ft_irq_base;
fmc_irq_free(ft->fmc); fmc_irq_free(ft->fmc);
if (ft->mode == FT_ACQ_TYPE_DMA) {
ft->fmc->irq = ft->ft_irq_base + 1;
fmc_irq_free(ft->fmc);
}
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment