Commit 4d733007 authored by Federico Vaga's avatar Federico Vaga

dma: do all transfer in a single run

Signed-off-by: 's avatarFederico Vaga <federico.vaga@gmail.com>
parent 69bd8d73
......@@ -28,21 +28,27 @@
*
* It calculates the number of necessary nents
*/
static int zfat_calculate_nents(struct zio_block *block)
static int zfat_calculate_nents(struct zfad_block *zfad_block,
unsigned int n_blocks)
{
int bytesleft = block->datalen;
void *bufp = block->data;
int i, bytesleft;
void *bufp;
int mapbytes;
int nents = 0;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
for (i = 0; i < n_blocks; ++i) {
bytesleft = zfad_block[i].block->datalen;
bufp = zfad_block[i].block->data;
zfad_block[i].first_nent = nents;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
}
}
return nents;
}
......@@ -53,17 +59,38 @@ static int zfat_calculate_nents(struct zio_block *block)
* Initialize each element of the scatter list
*/
static void zfad_setup_dma_scatter(struct fa_dev *fa,
struct zfad_block *zfad_block)
struct zfad_block *zfad_block,
unsigned int n_blocks)
{
struct scatterlist *sg;
int bytesleft = zfad_block->block->datalen;
void *bufp = zfad_block->block->data;
int bytesleft = 0;
void *bufp = NULL;
int mapbytes;
int i;
int i, i_blk;
dev_dbg(&fa->zdev->head.dev, "Setup dma scatterlist for %zu bytes",
zfad_block->block->datalen);
for_each_sg(zfad_block->sgt.sgl, sg, zfad_block->sgt.nents, i) {
i_blk = 0;
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
if (i == zfad_block[i_blk].first_nent) {
WARN(bytesleft, "unmapped byte in block %i\n",
i_blk - 1);
/*
* Configure the DMA for a new block, reset index and
* data pointer
*/
bytesleft = zfad_block[i_blk].block->datalen;
bufp = zfad_block[i_blk].block->data;
i_blk++; /* index the next block */
if (unlikely(i_blk > n_blocks)) {
dev_err(&fa->zdev->head.dev,
"DMA map out of block\n");
BUG();
}
}
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
......@@ -87,8 +114,6 @@ static void zfad_setup_dma_scatter(struct fa_dev *fa,
virt_to_page(bufp), offset_in_page(bufp),
mapbytes, bytesleft);
}
WARN(bytesleft, "Something Wrong");
}
/*
......@@ -100,58 +125,76 @@ static void zfad_setup_dma_scatter(struct fa_dev *fa,
* The DMA controller can store a single item, but more then one transfer
* could be necessary
*/
int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block)
int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block,
unsigned int n_blocks)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct zio_block *block = zfad_block->block;
struct scatterlist *sg;
struct dma_item *items;
uint32_t dev_mem_ptr;
unsigned int i, pages, sglen, size;
unsigned int i, pages, sglen, size, i_blk;
dma_addr_t tmp;
int err;
pages = zfat_calculate_nents(block);
pages = zfat_calculate_nents(zfad_block, n_blocks);
if (!pages) {
dev_err(&cset->head.dev, "No pages to transfer (datalen=%li)\n",
block->datalen);
dev_err(&cset->head.dev, "No pages to transfer %i\n",
n_blocks);
return -EINVAL;
}
dev_dbg(&cset->head.dev, "using %d pages for transfer\n", pages);
dev_dbg(&cset->head.dev, "using %d pages to transfer %i blocks\n",
pages, n_blocks);
/* Create sglists for the transfers */
err = sg_alloc_table(&zfad_block->sgt, pages, GFP_ATOMIC);
err = sg_alloc_table(&fa->sgt, pages, GFP_ATOMIC);
if (err) {
dev_err(&cset->head.dev, "cannot allocate sg table\n");
goto out;
}
/* Limited to 32-bit (kernel limit) */
size = sizeof(struct dma_item) * zfad_block->sgt.nents;
size = sizeof(struct dma_item) * fa->sgt.nents;
items = kzalloc(size, GFP_ATOMIC);
if (!items) {
dev_err(fa->fmc->hwdev, "cannot allocate coherent dma memory\n");
goto out_mem;
}
zfad_block->items = items;
zfad_block->dma_list_item = dma_map_single(fa->fmc->hwdev, items, size,
fa->items = items;
fa->dma_list_item = dma_map_single(fa->fmc->hwdev, items, size,
DMA_FROM_DEVICE);
if (!zfad_block->dma_list_item) {
if (!fa->dma_list_item) {
goto out_free;
}
/* Setup the scatter list for the provided block */
zfad_setup_dma_scatter(fa, zfad_block);
zfad_setup_dma_scatter(fa, zfad_block, n_blocks);
/* Map DMA buffers */
sglen = dma_map_sg(fa->fmc->hwdev, zfad_block->sgt.sgl,
zfad_block->sgt.nents, DMA_FROM_DEVICE);
sglen = dma_map_sg(fa->fmc->hwdev, fa->sgt.sgl,
fa->sgt.nents, DMA_FROM_DEVICE);
if (!sglen) {
dev_err(fa->fmc->hwdev, "cannot map dma memory\n");
goto out_map;
}
/* Configure DMA items */
dev_mem_ptr = zfad_block->dev_mem_ptr;
for_each_sg(zfad_block->sgt.sgl, sg, zfad_block->sgt.nents, i) {
i_blk = 0;
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
if (i == zfad_block[i_blk].first_nent) {
/*
* FIXME if we trust our configuration, dev_mem_ptr is
* useless in multishot
*/
dev_mem_ptr = zfad_block[i_blk].dev_mem_ptr;
i_blk++; /* index the next block */
if (unlikely(i_blk > n_blocks)) {
dev_err(&fa->zdev->head.dev,
"DMA map out of block\n");
BUG();
}
}
dev_dbg(&cset->head.dev, "configure DMA item %d"
"(addr: 0x%llx len: %d)(dev off: 0x%x)\n",
i, sg_dma_address(sg), sg_dma_len(sg), dev_mem_ptr);
......@@ -163,7 +206,7 @@ int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block)
dev_mem_ptr += items[i].dma_len;
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = zfad_block->dma_list_item;
tmp = fa->dma_list_item;
tmp += (sizeof(struct dma_item) * ( i+ 1 ));
items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)tmp) >> 32;
......@@ -171,6 +214,8 @@ int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block)
} else {
items[i].attribute = 0x0; /* last item */
}
/* The first item is written on the device */
if (i == 0) {
zfa_common_conf_set(fa, ZFA_DMA_ADDR,
......@@ -194,12 +239,12 @@ int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block)
return 0;
out_map:
dma_unmap_single(fa->fmc->hwdev, zfad_block->dma_list_item, size,
dma_unmap_single(fa->fmc->hwdev, fa->dma_list_item, size,
DMA_FROM_DEVICE);
out_free:
kfree(zfad_block->items);
kfree(fa->items);
out_mem:
sg_free_table(&zfad_block->sgt);
sg_free_table(&fa->sgt);
out:
return -ENOMEM;
}
......@@ -218,14 +263,14 @@ void zfad_unmap_dma(struct zio_cset *cset, struct zfad_block *zfad_block)
unsigned int size;
dev_dbg(fa->fmc->hwdev, "unmap DMA\n");
size = sizeof(struct dma_item) * zfad_block->sgt.nents;
dma_unmap_single(fa->fmc->hwdev, zfad_block->dma_list_item, size,
DMA_FROM_DEVICE);
dma_unmap_sg(fa->fmc->hwdev, zfad_block->sgt.sgl, zfad_block->sgt.nents,
size = sizeof(struct dma_item) * fa->sgt.nents;
dma_unmap_single(fa->fmc->hwdev, fa->dma_list_item, size,
DMA_FROM_DEVICE);
dma_unmap_sg(fa->fmc->hwdev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
kfree(zfad_block->items);
zfad_block->items = NULL;
zfad_block->dma_list_item = 0;
sg_free_table(&zfad_block->sgt);
kfree(fa->items);
fa->items = NULL;
fa->dma_list_item = 0;
sg_free_table(&fa->sgt);
}
......@@ -673,93 +673,100 @@ static void zfat_get_irq_status(struct fa_dev *fa,
zfa_common_conf_set(fa, ZFA_IRQ_MULTI, *irq_multi);
}
/*
* zfad_start_dma
/**
* It maps the ZIO blocks with an sg table, then it starts the DMA transfer
* from the ADC to the host memory.
*
* When all data from all triggers are ready, this function start DMA.
* We get the first block from the list and start the acquisition on it.
* When the DMA will end, the interrupt handler will invoke again this function
* until the list is empty
* @param cset
*/
static void zfad_start_dma(struct zio_cset *cset)
static void zfad_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_ti *ti = cset->ti;
int err;
if (fa->n_trans == fa->n_fires) {
/*
* All DMA transfers done! Inform the trigger about this, so
* it can store blocks into the buffer
*/
zio_trigger_data_done(cset);
dev_dbg(fa->fmc->hwdev, "%i blocks transfered\n", fa->n_trans);
fa->n_trans = 0;
/*
* we can safely re-enable triggers.
* Hardware trigger depends on the enable status
* of the trigger. Software trigger depends on the previous
* status taken form zio attributes (index 5 of extended one)
* If the user is using a software trigger, enable the software
* trigger.
*/
if (cset->trig == &zfat_type) {
zfa_common_conf_set(fa, ZFAT_CFG_HW_EN,
(ti->flags & ZIO_STATUS ? 0 : 1));
zfa_common_conf_set(fa, ZFAT_CFG_SW_EN,
ti->zattr_set.ext_zattr[5].value);
} else {
dev_dbg(&cset->head.dev, "Software acquisition over");
zfa_common_conf_set(fa, ZFAT_CFG_SW_EN, 1);
}
/* Map ZIO block for DMA acquisition */
err = zfad_map_dma(cset, zfad_block, fa->n_shots);
if (err)
return;
/* Automatic start next acquisition */
if (enable_auto_start) {
dev_dbg(fa->fmc->hwdev, "Automatic start\n");
zfad_fsm_command(fa, ZFA_START);
}
/* Start DMA transefer */
zfa_common_conf_set(fa, ZFA_DMA_CTL_START, 1);
dev_dbg(fa->fmc->hwdev, "Start DMA transfer\n");
}
/**
* It completes a DMA transfer.
* It tells to the ZIO framework that all blocks are done. Then, it re-enable
* the trigger for the next acquisition. If the device is configured for
* continuous acquisition, the function automatically start the next
* acquisition
*
* @param cset
*/
static void zfad_dma_done(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_ti *ti = cset->ti;
zfad_unmap_dma(cset, zfad_block);
/*
* All DMA transfers done! Inform the trigger about this, so
* it can store blocks into the buffer
*/
zio_trigger_data_done(cset);
dev_dbg(fa->fmc->hwdev, "%i blocks transfered\n", fa->n_shots);
/*
* we can safely re-enable triggers.
* Hardware trigger depends on the enable status
* of the trigger. Software trigger depends on the previous
* status taken form zio attributes (index 5 of extended one)
* If the user is using a software trigger, enable the software
* trigger.
*/
if (cset->trig == &zfat_type) {
zfa_common_conf_set(fa, ZFAT_CFG_HW_EN,
(ti->flags & ZIO_STATUS ? 0 : 1));
zfa_common_conf_set(fa, ZFAT_CFG_SW_EN,
ti->zattr_set.ext_zattr[5].value);
} else {
err = zfad_map_dma(cset, &zfad_block[fa->n_trans++]);
if (err)
return;
dev_dbg(&cset->head.dev, "Software acquisition over");
zfa_common_conf_set(fa, ZFAT_CFG_SW_EN, 1);
}
/* Start DMA transefer */
zfa_common_conf_set(fa, ZFA_DMA_CTL_START, 1);
dev_dbg(fa->fmc->hwdev, "Start DMA transfer\n");
/* Automatic start next acquisition */
if (enable_auto_start) {
dev_dbg(fa->fmc->hwdev, "Automatic start\n");
zfad_fsm_command(fa, ZFA_START);
}
}
/*
* zfat_irq_dma_done
/**
* It handles the error condition of a DMA transfer.
* The function turn off the state machine by sending the STOP command
*
* The DMA finished due to an error or not. We must handle both the condition.
* If DMA aborted by an error we abort the acquisition, so we lost all the data.
* If the DMA complete successfully, we can call zio_trigger_data_done which
* will complete the transfer
* @param cset
*/
static void zfat_irq_dma_done(struct zio_cset *cset, int status)
static void zfad_dma_error(struct zio_cset *cset)
{
struct zfad_block *zfad_block = cset->interleave->priv_d;
struct fa_dev *fa = cset->zdev->priv_d;
struct zfad_block *zfad_block = cset->interleave->priv_d;
uint32_t val;
/* unmap previous acquisition in any */
if (fa->n_trans > 0)
zfad_unmap_dma(cset, &zfad_block[fa->n_trans - 1]);
if (status & ZFAT_DMA_DONE) { /* DMA done*/
dev_dbg(fa->fmc->hwdev, "DMA transfer done\n");
zfad_unmap_dma(cset, zfad_block);
zfad_start_dma(cset);
} else { /* DMA error */
zfa_common_info_get(fa, ZFA_DMA_STA, &val);
dev_err(fa->fmc->hwdev,
"DMA error (status 0x%x). All acquisition lost\n", val);
zfad_fsm_command(fa, ZFA_STOP);
fa->n_dma_err++;
}
zfa_common_info_get(fa, ZFA_DMA_STA, &val);
dev_err(fa->fmc->hwdev,
"DMA error (status 0x%x). All acquisition lost\n", val);
zfad_fsm_command(fa, ZFA_STOP);
fa->n_dma_err++;
}
......@@ -845,6 +852,11 @@ static void zfat_irq_acq_end(struct zio_cset *cset)
int try = 5;
dev_dbg(fa->fmc->hwdev, "Acquisition done\n");
if (fa->n_fires != fa->n_shots) {
dev_err(fa->fmc->hwdev,
"Expected %i trigger fires, but %i occurs\n",
fa->n_shots, fa->n_fires);
}
/*
* All programmed triggers fire, so the acquisition is ended.
* If the state machine is _idle_ we can start the DMA transfer.
......@@ -870,8 +882,9 @@ static void zfat_irq_acq_end(struct zio_cset *cset)
*/
zfa_common_conf_set(fa, ZFAT_CFG_HW_EN, 0);
zfa_common_conf_set(fa, ZFAT_CFG_SW_EN, 0);
/* Start DMA acquisition */
zfad_start_dma(cset);
/* Start the DMA transfer */
zfad_dma_start(cset);
}
......@@ -915,8 +928,11 @@ static irqreturn_t zfad_irq(int irq, void *ptr)
* It cannot happen that DMA_DONE is in the multi register.
* It should not happen ...
*/
if (status & (ZFAT_DMA_DONE | ZFAT_DMA_ERR)) {
zfat_irq_dma_done(cset, status);
if (status & ZFAT_DMA_DONE) {
zfad_dma_done(cset);
}
if (unlikely((status | multi) & ZFAT_DMA_ERR)) {
zfad_dma_error(cset);
return IRQ_HANDLED;
}
......
......@@ -84,9 +84,12 @@ struct dma_item {
*
* @n_shots: total number of programmed shots for an acquisition
* @n_fires: number of trigger fire occurred within an acquisition
* @n_trans: number of DMA transfer done for an acquisition
*
* @n_dma_err: number of errors
*
* @sgt is the scatter/gather table that describe the DMA acquisition
* @item a list on dma_item to describe
* @dma_list_item is a DMA address pointer to the dma_item list
*/
struct fa_dev {
struct fmc_device *fmc;
......@@ -96,7 +99,6 @@ struct fa_dev {
/* Acquisition */
unsigned int n_shots;
unsigned int n_fires;
unsigned int n_trans;
/* Statistic informations */
unsigned int n_dma_err;
......@@ -109,28 +111,29 @@ struct fa_dev {
/* Calibration Data */
struct fa_calibration_data adc_cal_data[3];
struct fa_calibration_data dac_cal_data[3];
/* DMA attributes */
struct sg_table sgt;
struct dma_item *items;
dma_addr_t dma_list_item;
};
/*
* zfad_block
* @block is zio_block which contains data and metadata from a single shot
* @sgt is the scatter/gather table that describe the DMA acquisition
* @item a list on dma_item to describe
* @dma_list_item is a DMA address pointer to the dma_item list
* @dev_mem_ptr is pointer to the ADC internal memory. It points to the first
* samples of the stored shot
* @first_nent is the index of the first nent used for this block
*/
struct zfad_block {
struct zio_block *block;
struct sg_table sgt;
struct dma_item *items;
dma_addr_t dma_list_item;
uint32_t dev_mem_ptr;
unsigned int first_nent;
};
extern int zfad_map_dma(struct zio_cset *cset,
struct zfad_block *zfad_block);
struct zfad_block *zfad_block,
unsigned int n_blocks);
extern void zfad_unmap_dma(struct zio_cset *cset,
struct zfad_block *zfad_block);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment