Commit 1eb6fa3b authored by Federico Vaga's avatar Federico Vaga

fa-dma.c: fix SG DMA

Signed-off-by: 's avatarFederico Vaga <federico.vaga@gmail.com>
parent 4ad2a5af
......@@ -30,7 +30,7 @@ static void zfad_setup_dma_scatter(struct spec_fa *fa, struct zio_block *block)
int mapbytes;
int i;
pr_info("%s:%d\n", __func__, __LINE__);
dev_dbg(&fa->zdev->head.dev, "Setup dma scatterlist");
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
/*
* If there are less bytes left than what fits
......@@ -43,7 +43,7 @@ static void zfad_setup_dma_scatter(struct spec_fa *fa, struct zio_block *block)
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
sg_set_buf(sg, bufp, mapbytes);
/* Configure next values */
bufp += mapbytes;
bytesleft -= mapbytes;
}
......@@ -63,42 +63,57 @@ int zfad_map_dma(struct zio_cset *cset)
struct zio_block *block = cset->interleave->active_block;
struct dma_item *items;
unsigned int i, pages, sglen, dev_data_mem = 0;
dma_addr_t tmp;
int err;
pr_info("%s:%d\n", __func__, __LINE__);
/* Create sglists for the transfers, PAGE_SIZE granularity */
pages = DIV_ROUND_UP(block->datalen, PAGE_SIZE);
dev_dbg(&cset->head.dev, "using %d pages for transfer\n", pages);
/* Create sglists for the transfers */
err = sg_alloc_table(&fa->sgt, pages, GFP_ATOMIC);
if (err)
if (err) {
dev_err(&cset->head.dev, "cannot allocate sg table\n");
goto out;
}
items = kzalloc(sizeof(struct dma_item) * fa->sgt.nents, GFP_ATOMIC);
/* FIXME maybe dma_pool */
/* Limited to 32-bit (kernel limit) */
fa->items = dma_alloc_coherent(fa->fmc->hwdev,
sizeof(struct dma_item) * fa->sgt.nents,
&fa->dma_list_item, GFP_ATOMIC);
if (!items)
goto out_mem;
/* Setup the scatter list for the provided block */
zfad_setup_dma_scatter(fa, block);
/* Map DMA buffers */
sglen = dma_map_sg(fa->fmc->hwdev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen)
goto out_free;
/* Configure DMA items */
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
dev_dbg(&cset->head.dev, "configure DMA item %d(%p)"
"(addr: 0x%p, len: %d)\n",
i, sg, sg_dma_address(sg), sg_dma_len(sg));
/* Prepare DMA item */
items[i].start_addr = dev_data_mem;
items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
items[i].dma_addr_h = sg_dma_address(sg) >> 32;
items[i].dma_len = sg_dma_len(sg);
dev_data_mem += items[i].dma_len;
if (i < fa->sgt.nents - 1) {/* more transfers */
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
items[i].next_addr_l = ((uint64_t)&items[i+1]) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)&items[i+1]) >> 32;
tmp = fa->dma_list_item;
tmp += (sizeof(struct dma_item) * ( i+ 1 ));
items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)tmp) >> 32;
items[i].attribute = 0x1; /* more items */
} else {
items[i].attribute = 0x0; /* last item */
}
/* The first item is written on the device */
if (i == 0) {
zfa_common_conf_set(&cset->head.dev,
......@@ -126,17 +141,10 @@ int zfad_map_dma(struct zio_cset *cset)
}
}
/* Map DMA buffers */
sglen = dma_map_sg(&fa->spec->pdev->dev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen)
goto out_free;
cset->priv_d = items;
return 0;
out_free:
kfree(items);
kfree(fa->items);
out_mem:
sg_free_table(&fa->sgt);
out:
......@@ -149,9 +157,14 @@ void zfad_unmap_dma(struct zio_cset *cset)
{
struct spec_fa *fa = cset->zdev->priv_d;
pr_info("%s:%d\n", __func__, __LINE__);
dma_unmap_sg(&fa->spec->pdev->dev, fa->sgt.sgl, fa->sgt.nents,
dev_dbg(fa->fmc->hwdev, "unmap DMA\n");
dma_unmap_sg(fa->fmc->hwdev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
kfree(cset->priv_d);
dma_free_coherent(fa->fmc->hwdev,
sizeof(struct dma_item) * fa->sgt.nents,
fa->items, fa->dma_list_item);
fa->items = NULL;
fa->dma_list_item = 0;
sg_free_table(&fa->sgt);
}
......@@ -22,21 +22,6 @@
/* ADC DDR memory */
#define FA_MAX_ACQ_BYTE 0x10000000 /* 256MB */
struct spec_fa {
struct fmc_device *fmc;
struct spec_dev *spec;
struct zio_device *zdev;
struct zio_device *hwzdev;
struct sg_table sgt; /* scatter/gather table */
unsigned char __iomem *base; /* regs files are byte-oriented */
/* one-wire */
uint8_t ds18_id[8];
unsigned long next_t;
int temp; /* temperature: scaled by 4 bits */
};
/* The information about a DMA transfer */
struct dma_item {
uint32_t start_addr; /* 0x00 */
......@@ -52,6 +37,25 @@ struct dma_item {
*/
};
struct spec_fa {
struct fmc_device *fmc;
struct spec_dev *spec;
struct zio_device *zdev;
struct zio_device *hwzdev;
/* DMA variable */
struct sg_table sgt; /* scatter/gather table */
struct dma_item *items; /* items for DMA transfers */
dma_addr_t dma_list_item; /* DMA address for items */
unsigned char __iomem *base; /* regs files are byte-oriented */
/* one-wire */
uint8_t ds18_id[8];
unsigned long next_t;
int temp; /* temperature: scaled by 4 bits */
};
extern int zfad_map_dma(struct zio_cset *cset);
extern void zfad_unmap_dma(struct zio_cset *cset);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment