Commit 0d2c60fd authored by Federico Vaga's avatar Federico Vaga

Merge branch 'feature/debugfs-dma' into develop

parents 8c641a3b 282dfa29
......@@ -14,4 +14,5 @@ Module.symvers
GTAGS
GPATH
GRTAGS
Makefile.specific
\ No newline at end of file
Makefile.specific
compile_commands.json
......@@ -12,9 +12,9 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
sys.path.insert(0, os.path.abspath('../software/PySPEC/PySPEC'))
# -- Project information -----------------------------------------------------
......@@ -39,7 +39,8 @@ release = 'v1.4'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
......@@ -171,3 +172,7 @@ epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
autodoc_default_options = {
'member-order': 'bysource',
}
......@@ -27,6 +27,7 @@ You can clone the GIT project with the following command::
hdl-spec-base
sw-driver
sw-python
.. _`Open HardWare Repository`: https://ohwr.org/
.. _`SPEC project`: https://ohwr.org/project/spec
......@@ -34,7 +34,7 @@ GN4124 GPIO
GN4124 FCL
This driver provides support for the GN4124 FCL (FPGA Configuration Loader).
It uses the `FPGA manager interface`_ to program the FPGA at runtime.
It uses the `FPGA manager interface`_ to program the FPGA at run-time.
If the SPEC based application is using the :ref:`SPEC
base<spec_hdl_spec_base>` component then it can profit from the
......@@ -220,7 +220,7 @@ attributes. Here we focus only on those.
Miscellaneous information about the card status: IRQ mapping.
``<pci-id>/fpga_firmware`` [W]
It configure the FPGA with a bitstream which name is provided as input.
It configures the FPGA with a bitstream which name is provided as input.
Remember that firmwares are installed in ``/lib/firmware`` and alternatively
you can provide your own path by setting it in
``/sys/module/firmware_class/parameters/path``.
......@@ -231,3 +231,9 @@ attributes. Here we focus only on those.
``<pci-id>/spec-<pci-id>/build_info`` [R]
It shows the FPGA configuration synthesis information
``<pci-id>/spec-<pci-id>/dma`` [RW]
It exports DMA capabilities to user-space. The user can ``open(2)``
and ``close(2)`` to request and release a DMA engine channel. Then,
the user can use ``lseek(2)`` to set the offset in the DDR, and
``read(2)``/``write(2)`` to start the DMA transfer.
..
SPDX-License-Identifier: CC-BY-SA-4.0
SPDX-FileCopyrightText: 2019-2020 CERN
.. _spec_python:
SPEC Python: PySPEC
===================
.. autoclass:: PySPEC.PySPEC
:members:
"""
SPDX-License-Identifier: GPL-3.0-or-later
SPDX-FileCopyrightText: 2020 CERN
"""
import pytest
from PySPEC import PySPEC
@pytest.fixture(scope="function")
def spec():
spec_dev = PySPEC(pytest.pci_id)
yield spec_dev
def pytest_addoption(parser):
parser.addoption("--pci-id",
required=True, help="SPEC PCI Identifier")
def pytest_configure(config):
pytest.pci_id = config.getoption("--pci-id")
# SPDX-License-Identifier: GPL-3.0-or-later
# SPDX-FileCopyrightText: 2020 CERN
[pytest]
addopts = -v -p no:cacheprovider
\ No newline at end of file
"""
SPDX-License-Identifier: GPL-3.0-or-later
SPDX-FileCopyrightText: 2020 CERN
"""
import pytest
import random
import math
from PySPEC import PySPEC
random_repetitions = 0
class TestDma(object):
def test_acquisition_release(self, spec):
"""
Users can open and close the DMA channel
"""
with spec.dma() as dma:
pass
def test_acquisition_release_contention(self, spec):
"""
Refuse simultaneous DMA transfers
"""
with spec.dma() as dma:
spec_c = PySPEC(spec.pci_id)
with pytest.raises(OSError) as error:
with spec_c.dma() as dma2:
pass
def test_dma_no_buffer(self, spec):
"""
The read/write will return immediatelly if asked to perform
0-length transfer.
"""
with spec.dma() as dma:
data = dma.read(0, 0)
assert len(data) == 0
with spec.dma() as dma:
count = dma.write(0, b"")
assert count == 0
@pytest.mark.parametrize("buffer_size",
[2**i for i in range(3, 22)])
def test_dma_read(self, spec, buffer_size):
"""
We just want to see if the DMA engine reports errors. Test the
engine with different sizes, but same offset (default:
0x0). On the engine side we will get several transfers
(scatterlist) depending on the size.
"""
with spec.dma() as dma:
data1 = dma.read(0, buffer_size)
data2 = dma.read(0, buffer_size)
assert len(data1) == buffer_size
assert len(data2) == buffer_size
assert data1 == data2
@pytest.mark.parametrize("buffer_size",
[2**i for i in range(3, 22)])
def test_dma_write(self, spec, buffer_size):
"""
We just want to see if the DMA engine reports errors. Test the
engine with different sizes, but same offset (default:
0x0). On the engine side we will get several transfers
(scatterlist) depending on the size.
"""
with spec.dma() as dma:
count = dma.write(0, b"\x00" * buffer_size)
assert count == buffer_size
@pytest.mark.parametrize("ddr_offset",
[2**i for i in range(2, int(math.log2(PySPEC.DDR_SIZE)))])
@pytest.mark.parametrize("unaligned", range(1, PySPEC.DDR_ALIGN))
def test_dma_unaligned_offset_read(self, spec, ddr_offset, unaligned):
"""
The DDR access is 4byte aligned.
"""
with spec.dma() as dma:
with pytest.raises(OSError) as error:
dma.read(ddr_offset + unaligned, 16)
@pytest.mark.parametrize("ddr_offset",
[2**i for i in range(2, int(math.log2(PySPEC.DDR_SIZE)))])
@pytest.mark.parametrize("unaligned", range(1, PySPEC.DDR_ALIGN))
def test_dma_unaligned_offset_write(self, spec, ddr_offset, unaligned):
"""
The DDR access is 4byte aligned.
"""
with spec.dma() as dma:
with pytest.raises(OSError) as error:
dma.write(ddr_offset + unaligned, b"\x00" * 16)
@pytest.mark.parametrize("ddr_offset",
[2**i for i in range(2, int(math.log2(PySPEC.DDR_SIZE)))])
@pytest.mark.parametrize("unaligned", range(1, PySPEC.DDR_ALIGN))
def test_dma_unaligned_size_read(self, spec, ddr_offset, unaligned):
"""
The DDR access is 4byte aligned.
"""
with spec.dma() as dma:
with pytest.raises(OSError) as error:
dma.read(ddr_offset, (16 + unaligned))
@pytest.mark.parametrize("ddr_offset",
[2**i for i in range(2, int(math.log2(PySPEC.DDR_SIZE)))])
@pytest.mark.parametrize("unaligned", range(1, PySPEC.DDR_ALIGN))
def test_dma_unaligned_size_write(self, spec, ddr_offset, unaligned):
"""
The DDR access is 4byte aligned.
"""
with spec.dma() as dma:
with pytest.raises(OSError) as error:
dma.write(ddr_offset, b"\x00" * (16 + unaligned))
@pytest.mark.parametrize("split", [2**i for i in range(3, 14)])
@pytest.mark.parametrize("ddr_offset", [0x0, ])
@pytest.mark.parametrize("buffer_size", [2**14, ])
def test_dma_split_read(self, spec, buffer_size, ddr_offset, split, capsys):
"""
Write and read back buffers using DMA. We test different combinations
of offset and size. Here we atrificially split the **read** in small
pieces.
In the past we had problems with transfers greater than 4KiB. Be sure
that we can perform transfers (read) of "any" size.
"""
data = bytes([random.randrange(0, 0xFF, 1) for i in range(buffer_size)])
with spec.dma() as dma:
dma.write(ddr_offset, data)
data_rb = b""
for offset in range(0, buffer_size, split):
data_rb += dma.read(ddr_offset + offset, split)
assert data == data_rb
@pytest.mark.parametrize("split", [2**i for i in range(3, 14)])
@pytest.mark.parametrize("ddr_offset", [0x0, ])
@pytest.mark.parametrize("buffer_size", [2**14, ])
def test_dma_split_write(self, spec, buffer_size, ddr_offset, split):
"""
Write and read back buffers using DMA. We test different combinations
of offset and size. Here we atrificially split the **write** in small
pieces.
In the past we had problems with transfers greater than 4KiB. Be sure
that we can perform transfers (write) of "any" size.
"""
data = bytes([random.randrange(0, 0xFF, 1) for i in range(buffer_size)])
with spec.dma() as dma:
for offset in range(0, buffer_size, split):
dma.write(offset, data[offset:min(offset+split, buffer_size)])
data_rb = dma.read(0x0, buffer_size)
assert data == data_rb
@pytest.mark.parametrize("split", [2**i for i in range(3, 14)])
@pytest.mark.parametrize("buffer_size", [2**14, ])
def test_dma_split(self, spec, buffer_size, split):
"""
Write and read back buffers using DMA. We test different combinations
of offset and size. Here we atrificially split transfers in small
pieces.
In the past we had problems with transfers greater than 4KiB. Be sure
that we can perform transfers of "any" size.
"""
data = bytes([random.randrange(0, 0xFF, 1) for i in range(buffer_size)])
with spec.dma() as dma:
for offset in range(0, buffer_size, split):
dma.write(offset, data[offset:min(offset+split, buffer_size)])
for offset in range(0, buffer_size, split):
data_rb = dma.read(offset, split)
assert data[offset:min(offset+split, buffer_size)] == data_rb
@pytest.mark.parametrize("ddr_offset",
[0x0] + \
[2**i for i in range(int(math.log2(PySPEC.DDR_ALIGN)), int(math.log2(PySPEC.DDR_SIZE)))] + \
[random.randrange(0, PySPEC.DDR_SIZE, PySPEC.DDR_ALIGN) for x in range(random_repetitions)])
@pytest.mark.parametrize("buffer_size",
[2**i for i in range(int(math.log2(PySPEC.DDR_ALIGN)) + 1, 22)] + \
[random.randrange(PySPEC.DDR_ALIGN * 2, 4096, PySPEC.DDR_ALIGN) for x in range(random_repetitions)])
def test_dma(self, spec, ddr_offset, buffer_size):
"""
Write and read back buffers using DMA. We test different combinations
of offset and size. Here we try to perform transfers as large as
possible (short scatterlist)
"""
if ddr_offset + buffer_size >= PySPEC.DDR_SIZE:
pytest.skip("DDR Overflow!")
data = bytes([random.randrange(0, 0xFF, 1) for i in range(buffer_size)])
with spec.dma() as dma:
dma.write(ddr_offset, data)
data_rb = dma.read(ddr_offset, buffer_size)
assert data == data_rb
# SPDX-License-Identifier: CC0-1.0
#
# SPDX-FileCopyrightText: 2020 CERN
*.pyc
MANIFEST
# SPDX-License-Identifier: CC0-1.0
#
# SPDX-FileCopyrightText: 2020 CERN
-include Makefile.specific
all:
clean:
install:
python setup.py install
.PHONY: all clean install
"""
@package docstring
@author: Federico Vaga <federico.vaga@cern.ch>
SPDX-License-Identifier: LGPL-3.0-or-later
SPDX-FileCopyrightText: 2020 CERN (home.cern)
"""
import os
from contextlib import contextmanager
class PySPEC:
"""
This class gives access to SPEC features.
"""
#: SPEC DDR size
DDR_SIZE = 256 * 1024 * 1024
#: SPEC DDR access alignment
DDR_ALIGN = 4
def __init__(self, pci_id):
self.pci_id = pci_id
self.debugfs = "/sys/kernel/debug/0000:{:s}".format(self.pci_id)
self.debugfs_fpga = os.path.join(self.debugfs, "spec-0000:{:s}".format(self.pci_id))
@contextmanager
def dma(self):
"""
Create a DMA context from which users can do DMA
transfers. Within this context the user can use
PySPECDMA.read() and PySPECDMA.write(). Here an example.
>>> from PySPEC import PySPEC
>>> spec = PySPEC("06:00.0")
>>> with spec.dma() as dma:
>>> cnt = dma.write(0, b"\x00" * 16)
>>> buffer = dma.read(0, 16)
Which is equivalent to:
>>> from PySPEC import PySPEC
>>> spec = PySPEC("06:00.0")
>>> spec_dma = PySPEC.PySPECDMA(spec)
>>> spec_dma.open()
>>> cnt = spec_dma.write(0, b"\x00" * 16)
>>> buffer = spec_dma.read(0, 16)
>>> spec_dma.close()
"""
spec_dma = self.PySPECDMA(self)
spec_dma.request()
try:
yield spec_dma
finally:
spec_dma.release()
class PySPECDMA:
"""
This class wraps DMA features in a single object.
The SPEC has
only one DMA channel. On request() the user will get exclusive
access. The user must release() the DMA channel as soon as
possible to let other users or drivers to access it. For this reason,
avoid to use this class directly. Instead, use the DMA context
from the PySPEC class which is less error prone.
>>> from PySPEC import PySPEC
>>> spec = PySPEC("06:00.0")
>>> with spec.dma() as dma:
>>> cnt = dma.write(0, b"\x00" * 16)
>>> buffer = dma.read(0, 16)
>>> print(buffer)
"""
def __init__(self, spec):
"""
Create a new instance
:var spec: a valid PySPEC instance
"""
self.spec = spec
def request(self):
"""
Open a DMA file descriptor
:raise OSError: if the open(2) or the driver fails
"""
self.dma_file = open(os.path.join(self.spec.debugfs_fpga, "dma"),
"rb+", buffering=0)
def release(self):
"""
Close the DMA file descriptor
:raise OSError: if the close(2) or the driver fails
"""
if hasattr(self, "dma_file"):
self.dma_file.close()
def read(self, offset, size):
"""
Trigger a *device to memory* DMA transfer
:var offset: offset within the DDR
:var size: number of bytes to be transferred
:return: the data transfered as bytes() array
:raise OSError: if the read(2) or the driver fails
"""
self.__seek(offset)
data = []
while size - len(data) > 0:
data += self.dma_file.read(size - len(data))
return bytes(data)
def write(self, offset, data):
"""
Trigger a *memory to device* DMA transfer
:var offset: offset within the DDR
:var size: number of bytes to be transferred
:return: the number of transfered bytes
:raise OSError: if the write(2) or the driver fails
"""
self.__seek(offset)
start = 0
while len(data) - start > 0:
start += self.dma_file.write(bytes(data[start:]))
return start
def __seek(self, offset):
"""
Change DDR offset
:var offset: offset within the DDR
:raise OSError: if lseek(2) fails or the driver
"""
self.dma_file.seek(offset)
"""
@package docstring
@author: Federico Vaga <federico.vaga@cern.ch>
SPDX-License-Identifier: LGPL-3.0-or-later
SPDX-FileCopyrightText: 2020 CERN (home.cern)
"""
from .PySPEC import PySPEC
__all__ = (
"PySPEC",
)
#!/usr/bin/env python
"""
SPDX-License-Identifier: CC0-1.0
SPDX-FileCopyrightText: 2020 CERN
"""
from distutils.core import setup
setup(name='PySPEC',
version='1.4.14',
description='Python Module to handle SPEC cards',
author='Federico Vaga',
author_email='federico.vaga@cern.ch',
maintainer="Federico Vaga",
maintainer_email="federico.vaga@cern.ch",
url='https://www.ohwr.org/project/spec',
packages=['PySPEC'],
license='LGPL-3.0-or-later',
)
This diff is collapsed.
......@@ -16,7 +16,10 @@
#include <linux/bitops.h>
#include <linux/fmc.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/jiffies.h>
#include "linux/printk.h"
#include "spec.h"
#include "spec-compat.h"
......@@ -122,6 +125,245 @@ static const struct file_operations spec_fpga_dbg_bld_ops = {
.release = single_release,
};
struct spec_fpga_dbg_dma {
struct spec_fpga *spec_fpga;
struct dma_chan *dchan;
size_t datalen;
void *data;
dma_addr_t datadma;
struct dmaengine_result dma_res;
struct completion compl;
};
struct spec_fmca_dbg_dma_tx_ctxt {
struct dmaengine_result dma_res;
};
static void spec_fmca_dbg_dma_tx_complete(void *arg,
const struct dmaengine_result *result)
{
struct spec_fpga_dbg_dma *dbgdma = arg;
memcpy(&dbgdma->dma_res, result, sizeof(*result));
complete(&dbgdma->compl);
}
static int spec_fpga_dbg_dma_transfer(struct spec_fpga_dbg_dma *dbgdma,
enum dma_transfer_direction dir,
size_t count, loff_t offset)
{
int err;
struct dma_slave_config sconfig;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
size_t max_segment;
struct sg_table sgt;
struct scatterlist *sg;
int i;
dev_dbg(dbgdma->dchan->device->dev,
"arg: {dir: %d, size: %ld, offset: 0x%08llx}\n",
dir, count, offset);
max_segment = dma_get_max_seg_size(dbgdma->dchan->device->dev) & PAGE_MASK;
err = sg_alloc_table(&sgt, (count / max_segment) + 1, GFP_KERNEL);
if (err)
goto err_sgt;
for_each_sg(sgt.sgl, sg, sgt.nents, i) {
sg_dma_address(sg) = dbgdma->datadma + (i * max_segment);
if (sg_is_last(sg))
sg_dma_len(sg) = count % max_segment;
else
sg_dma_len(sg) = max_segment;
}
memset(&sconfig, 0, sizeof(sconfig));
sconfig.direction = dir;
sconfig.src_addr = offset;
err = dmaengine_slave_config(dbgdma->dchan, &sconfig);
if (err)
goto err_cfg;
tx = dmaengine_prep_slave_sg(dbgdma->dchan, sgt.sgl, sgt.nents,
dir, 0);
if (!tx) {
err = -EINVAL;
goto err_prep;
}
/* Setup the DMA completion callback */
dbgdma->dma_res.result = DMA_TRANS_NOERROR;
dbgdma->dma_res.residue = 0;
tx->callback_result = spec_fmca_dbg_dma_tx_complete;
tx->callback_param = (void *)dbgdma;
cookie = dmaengine_submit(tx);
if (cookie < 0) {
err = cookie;
goto err_sub;
}
dma_async_issue_pending(dbgdma->dchan);
err = wait_for_completion_interruptible_timeout(
&dbgdma->compl, msecs_to_jiffies(60000));
if (err == 0)
err = -ETIMEDOUT;
if (err > 0) {
switch (dbgdma->dma_res.result) {
case DMA_TRANS_NOERROR:
err = 0;
break;
default:
err = -EIO;
break;
}
}
err_sub:
err_prep:
err_cfg:
sg_free_table(&sgt);
err_sgt:
return err;
}
static ssize_t spec_fpga_dbg_dma_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct spec_fpga_dbg_dma *dbgdma = file->private_data;
int err;
if (*ppos >= SPEC_DDR_SIZE)
return -EINVAL;
count = min(dbgdma->datalen, count);
err = spec_fpga_dbg_dma_transfer(file->private_data, DMA_DEV_TO_MEM,
count, *ppos);
if (err)
goto err_trans;
err = copy_to_user(buf, dbgdma->data, count);
if (err)
goto err_cpy;
*ppos += count;
return count;
err_cpy:
err_trans:
return err;
}
static ssize_t spec_fpga_dbg_dma_write(struct file *file,
const char __user *buf, size_t count,
loff_t *ppos)
{
struct spec_fpga_dbg_dma *dbgdma = file->private_data;
int err;
if (*ppos >= SPEC_DDR_SIZE)
return -EINVAL;
count = min(dbgdma->datalen, count);
err = copy_from_user(dbgdma->data, buf, count);
if (err)
goto err_cpy;
err = spec_fpga_dbg_dma_transfer(dbgdma, DMA_MEM_TO_DEV,
count, *ppos);
if (err)
goto err_trans;
*ppos += count;
return count;
err_trans:
err_cpy:
return err;
}
static bool spec_fpga_dbg_dma_filter(struct dma_chan *dchan, void *arg)
{
return dchan->device == arg;
}
static int spec_fpga_dbg_dma_open(struct inode *inode, struct file *file)
{
struct spec_fpga_dbg_dma *dbgdma;
struct spec_fpga *spec_fpga = inode->i_private;
dma_cap_mask_t dma_mask;
int err;
if (!spec_fpga->dma_pdev) {
dev_warn(&spec_fpga->dev,
"Not able to find DMA engine: platform_device missing\n");
return -ENODEV;
}
dbgdma = kzalloc(sizeof(*dbgdma), GFP_KERNEL);
if (!dbgdma)
return -ENOMEM;
init_completion(&dbgdma->compl);
dbgdma->spec_fpga = spec_fpga;
dbgdma->datalen = 4 * 1024 *1024;
dbgdma->data = dma_alloc_coherent(dbgdma->spec_fpga->dev.parent,
dbgdma->datalen, &dbgdma->datadma,
GFP_KERNEL);
if (!dbgdma->data) {
err = -ENOMEM;
goto err_dma_alloc;
}
dma_cap_zero(dma_mask);
dma_cap_set(DMA_SLAVE, dma_mask);
dma_cap_set(DMA_PRIVATE, dma_mask);
dbgdma->dchan = dma_request_channel(dma_mask, spec_fpga_dbg_dma_filter,
platform_get_drvdata(spec_fpga->dma_pdev));
if (!dbgdma->dchan) {
dev_dbg(&spec_fpga->dev,
"DMA transfer Failed: can't request channel\n");
err = -EBUSY;
goto err_req;
}
file->private_data = dbgdma;
return 0;
err_req:
dma_free_coherent(dbgdma->spec_fpga->dev.parent,
dbgdma->datalen, dbgdma->data, dbgdma->datadma);
err_dma_alloc:
kfree(dbgdma);
return err;
}
static int spec_fpga_dbg_dma_flush(struct file *file, fl_owner_t id)
{
return 0;
}
static int spec_fpga_dbg_dma_release(struct inode *inode, struct file *file)
{
struct spec_fpga_dbg_dma *dbgdma = file->private_data;
dma_free_coherent(dbgdma->spec_fpga->dev.parent,
dbgdma->datalen, dbgdma->data, dbgdma->datadma);
dma_release_channel(dbgdma->dchan);
kfree(dbgdma);
return 0;
}
static const struct file_operations spec_fpga_dbg_dma_ops = {
.owner = THIS_MODULE,
.llseek = default_llseek,
.read = spec_fpga_dbg_dma_read,
.write = spec_fpga_dbg_dma_write,
.open = spec_fpga_dbg_dma_open,
.flush = spec_fpga_dbg_dma_flush,
.release = spec_fpga_dbg_dma_release,
};
static int spec_fpga_dbg_init(struct spec_fpga *spec_fpga)
{
struct pci_dev *pdev = to_pci_dev(spec_fpga->dev.parent);
......@@ -142,13 +384,13 @@ static int spec_fpga_dbg_init(struct spec_fpga *spec_fpga)
spec_fpga->dbg_csr_reg.nregs = ARRAY_SIZE(spec_fpga_debugfs_reg32);
spec_fpga->dbg_csr_reg.base = spec_fpga->fpga;
spec_fpga->dbg_csr = debugfs_create_regset32(SPEC_DBG_CSR_NAME, 0200,
spec_fpga->dbg_dir_fpga,
&spec_fpga->dbg_csr_reg);
spec_fpga->dbg_dir_fpga,
&spec_fpga->dbg_csr_reg);
if (IS_ERR_OR_NULL(spec_fpga->dbg_csr)) {
err = PTR_ERR(spec_fpga->dbg_csr);
dev_warn(&spec_fpga->dev,
"Cannot create debugfs file \"%s\" (%d)\n",
SPEC_DBG_CSR_NAME, err);
"Cannot create debugfs file \"%s\" (%d)\n",
SPEC_DBG_CSR_NAME, err);
goto err;
}
......@@ -165,6 +407,19 @@ static int spec_fpga_dbg_init(struct spec_fpga *spec_fpga)
goto err;
}
spec_fpga->dbg_dma = debugfs_create_file(SPEC_DBG_DMA_NAME,
0444,
spec_fpga->dbg_dir_fpga,
spec_fpga,
&spec_fpga_dbg_dma_ops);
if (IS_ERR_OR_NULL(spec_fpga->dbg_dma)) {
err = PTR_ERR(spec_fpga->dbg_dma);
dev_err(&spec_fpga->dev,
"Cannot create debugfs file \"%s\" (%d)\n",
SPEC_DBG_DMA_NAME, err);
goto err;
}
return 0;
err:
debugfs_remove_recursive(spec_fpga->dbg_dir_fpga);
......
......@@ -144,6 +144,7 @@ enum gn412x_dma_state {
};
#define GN412X_DMA_STAT_ACK BIT(2)
#define GN412X_DMA_DDR_ALIGN 4
/**
* Transfer descriptor an hardware transfer
......@@ -349,7 +350,6 @@ static int gn412x_dma_alloc_chan_resources(struct dma_chan *dchan)
struct gn412x_dma_chan *chan = to_gn412x_dma_chan(dchan);
memset(&chan->sconfig, 0, sizeof(struct dma_slave_config));
chan->sconfig.direction = DMA_DEV_TO_MEM;
return 0;
}
......@@ -389,8 +389,8 @@ static void gn412x_dma_prep_fixup(struct gn412x_dma_tx_hw *tx_hw,
}
static void gn412x_dma_prep(struct gn412x_dma_tx_hw *tx_hw,
struct scatterlist *sg,
dma_addr_t start_addr)
struct scatterlist *sg, dma_addr_t start_addr,
enum dma_transfer_direction direction)
{
tx_hw->start_addr = start_addr & 0xFFFFFFFF;
tx_hw->dma_addr_l = sg_dma_address(sg);
......@@ -401,8 +401,10 @@ static void gn412x_dma_prep(struct gn412x_dma_tx_hw *tx_hw,
tx_hw->next_addr_l = 0x00000000;
tx_hw->next_addr_h = 0x00000000;
tx_hw->attribute = 0x0;
if (direction == DMA_MEM_TO_DEV)
tx_hw->attribute |= GN412X_DMA_ATTR_DIR_MEM_TO_DEV;
if (!sg_is_last(sg))
tx_hw->attribute = GN412X_DMA_ATTR_CHAIN;
tx_hw->attribute |= GN412X_DMA_ATTR_CHAIN;
}
static struct dma_async_tx_descriptor *gn412x_dma_prep_slave_sg(
......@@ -417,12 +419,6 @@ static struct dma_async_tx_descriptor *gn412x_dma_prep_slave_sg(
dma_addr_t src_addr;
int i;
if (unlikely(direction != DMA_DEV_TO_MEM)) {
dev_err(&chan->dev->device,
"Support only DEV -> MEM transfers\n");
goto err;
}
if (unlikely(sconfig->direction != direction)) {
dev_err(&chan->dev->device,
"Transfer and slave configuration disagree on DMA direction\n");
......@@ -458,7 +454,14 @@ static struct dma_async_tx_descriptor *gn412x_dma_prep_slave_sg(
if (sg_dma_len(sg) > dma_get_max_seg_size(chan->device->dev)) {
dev_err(&chan->dev->device,
"Maximum transfer size %d, got %d on transfer %d\n",
0x3FFF, sg_dma_len(sg), i);
dma_get_max_seg_size(chan->device->dev),
sg_dma_len(sg), i);
goto err_alloc_pool;
}
if (sg_dma_len(sg) & (GN412X_DMA_DDR_ALIGN - 1)) {
dev_err(&chan->dev->device,
"Transfer size must be aligne to %d Bytes, got %d Bytes\n",
GN412X_DMA_DDR_ALIGN, sg_dma_len(sg));
goto err_alloc_pool;
}
gn412x_dma_tx->sgl_hw[i] = dma_pool_alloc(gn412x_dma->pool,
......@@ -478,7 +481,8 @@ static struct dma_async_tx_descriptor *gn412x_dma_prep_slave_sg(
} else {
gn412x_dma_tx->tx.phys = phys;
}
gn412x_dma_prep(gn412x_dma_tx->sgl_hw[i], sg, src_addr);
gn412x_dma_prep(gn412x_dma_tx->sgl_hw[i], sg, src_addr,
direction);
src_addr += sg_dma_len(sg);
}
......@@ -588,12 +592,16 @@ static int gn412x_dma_slave_config(struct dma_chan *chan,
sizeof(struct dma_slave_config));
spin_unlock_irqrestore(&gn412x_dma_chan->lock, flags);
if (gn412x_dma_chan->sconfig.src_addr & (GN412X_DMA_DDR_ALIGN - 1))
return -EINVAL;
return 0;
}
static int gn412x_dma_terminate_all(struct dma_chan *chan)
{
struct gn412x_dma_device *gn412x_dma;
struct gn412x_dma_tx *tx;
gn412x_dma = to_gn412x_dma_device(chan->device);
gn412x_dma_ctrl_abort(gn412x_dma);
......@@ -604,6 +612,15 @@ static int gn412x_dma_terminate_all(struct dma_chan *chan)
return -EINVAL;
}
tx = to_gn412x_dma_chan(chan)->tx_curr;
if (tx && tx->tx.callback_result) {
const struct dmaengine_result result = {
.result = DMA_TRANS_ABORTED,
.residue = 0,
};
tx->tx.callback_result(tx->tx.callback_param, &result);
}
return 0;
}
......@@ -651,12 +668,27 @@ static irqreturn_t gn412x_dma_irq_handler(int irq, void *arg)
switch (state) {
case GN412X_DMA_STAT_IDLE:
dma_cookie_complete(&tx->tx);
if (tx->tx.callback)
if (tx->tx.callback_result) {
const struct dmaengine_result result = {
.result = DMA_TRANS_NOERROR,
.residue = 0,
};
tx->tx.callback_result(tx->tx.callback_param, &result);
} else if (tx->tx.callback) {
tx->tx.callback(tx->tx.callback_param);
}
break;
case GN412X_DMA_STAT_ERROR:
dev_err(&gn412x_dma->pdev->dev,
"DMA transfer failed: error\n");
if (tx->tx.callback_result) {
const struct dmaengine_result result = {
.result = DMA_TRANS_READ_FAILED,
.residue = 0,
};
tx->tx.callback_result(tx->tx.callback_param, &result);
}
dev_err(&gn412x_dma->pdev->dev, "DMA transfer failed: error\n");
break;
default:
dev_err(&gn412x_dma->pdev->dev,
......
......@@ -39,6 +39,8 @@
#define GN4124_GPIO_SCL 5
#define GN4124_GPIO_SDA 4
#define SPEC_DDR_SIZE (256 * 1024 * 1024)
/**
* @SPEC_FPGA_SELECT_FPGA_FLASH: (default) the FPGA is an SPI master that can
* access the flash (at boot it takes its
......@@ -125,6 +127,8 @@ struct spec_fpga {
struct debugfs_regset32 dbg_csr_reg;
#define SPEC_DBG_BLD_INFO_NAME "build_info"
struct dentry *dbg_bld;
#define SPEC_DBG_DMA_NAME "dma"
struct dentry *dbg_dma;
};
/**
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment