Commit 685175b6 authored by Wesley W. Terpstra's avatar Wesley W. Terpstra

Merge remote branch 'remotes/pcie/master'

parents c407c89d ab51d74c
#export PATH=$PATH:/share/eldk/bin:/share/eldk/usr/bin
#export CROSS_COMPILE=ppc_4xxFP-
#export ARCH=powerpc
# This is useful if cross-compiling. Taken from kernel Makefile (CC changed)
#AS =$(CROSS_COMPILE)as
#LD =$(CROSS_COMPILE)ld
#CC =$(CROSS_COMPILE)gcc
#CPP =$(CC) -E
#AR =$(CROSS_COMPILE)ar
#NM =$(CROSS_COMPILE)nm
#STRIP =$(CROSS_COMPILE)strip
#OBJCOPY =$(CROSS_COMPILE)objcopy
#OBJDUMP =$(CROSS_COMPILE)objdump
KERNELVER ?= `uname -r`
KERNELDIR ?= /lib/modules/$(KERNELVER)/build
ifneq ($(KERNELRELEASE),)
obj-m := pcie_wb.o wishbone.o spec_wb.o
else
KERNELDIR ?= /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd)
all:
$(MAKE) -C $(KERNELDIR) M=$(PWD)
install:
$(MAKE) -C $(KERNELDIR) M=$(PWD) modules_install
endif
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/poll.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/aer.h>
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <asm/io.h>
#include <asm/spinlock.h>
#include <asm/byteorder.h>
#include "pcie_wb.h"
#include "wishbone.h"
#if defined(__BIG_ENDIAN)
#define endian_addr(width, shift) (sizeof(wb_data_t)-width)-shift
#elif defined(__LITTLE_ENDIAN)
#define endian_addr(width, shift) shift
#else
#error "unknown machine byte order (endian)"
#endif
static unsigned int debug = 0;
static void wb_cycle(struct wishbone* wb, int on)
{
struct pcie_wb_dev* dev;
unsigned char* control;
dev = container_of(wb, struct pcie_wb_dev, wb);
control = dev->pci_res[0].addr;
if (on) mutex_lock(&dev->mutex);
if (unlikely(debug))
printk(KERN_ALERT PCIE_WB ": cycle(%d)\n", on);
iowrite32(on?0x80000000UL:0, control + CONTROL_REGISTER_HIGH);
if (!on) mutex_unlock(&dev->mutex);
}
static void wb_byteenable(struct wishbone* wb, unsigned char be)
{
struct pcie_wb_dev* dev;
dev = container_of(wb, struct pcie_wb_dev, wb);
switch (be) {
case 0x1:
dev->width = 1;
dev->shift = 0;
dev->low_addr = endian_addr(1, 0);
break;
case 0x2:
dev->width = 1;
dev->shift = 8;
dev->low_addr = endian_addr(1, 1);
break;
case 0x4:
dev->width = 1;
dev->shift = 16;
dev->low_addr = endian_addr(1, 2);
break;
case 0x8:
dev->width = 1;
dev->shift = 24;
dev->low_addr = endian_addr(1, 3);
break;
case 0x3:
dev->width = 2;
dev->shift = 0;
dev->low_addr = endian_addr(2, 0);
break;
case 0xC:
dev->width = 2;
dev->shift = 16;
dev->low_addr = endian_addr(2, 2);
break;
case 0xF:
dev->width = 4;
dev->shift = 0;
dev->low_addr = endian_addr(4, 0);
break;
default:
/* noop -- ignore the strange bitmask */
break;
}
}
static void wb_write(struct wishbone* wb, wb_addr_t addr, wb_data_t data)
{
struct pcie_wb_dev* dev;
unsigned char* control;
unsigned char* window;
wb_addr_t window_offset;
dev = container_of(wb, struct pcie_wb_dev, wb);
control = dev->pci_res[0].addr;
window = dev->pci_res[1].addr;
window_offset = addr & WINDOW_HIGH;
if (window_offset != dev->window_offset) {
iowrite32(window_offset, control + WINDOW_OFFSET_LOW);
dev->window_offset = window_offset;
}
switch (dev->width) {
case 4:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite32(0x%x, 0x%x)\n", data, addr & ~3);
iowrite32(data, window + (addr & WINDOW_LOW));
break;
case 2:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite16(0x%x, 0x%x)\n", data >> dev->shift, (addr & ~3) + dev->low_addr);
iowrite16(data >> dev->shift, window + (addr & WINDOW_LOW) + dev->low_addr);
break;
case 1:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite8(0x%x, 0x%x)\n", data >> dev->shift, (addr & ~3) + dev->low_addr);
iowrite8 (data >> dev->shift, window + (addr & WINDOW_LOW) + dev->low_addr);
break;
}
}
static wb_data_t wb_read(struct wishbone* wb, wb_addr_t addr)
{
wb_data_t out;
struct pcie_wb_dev* dev;
unsigned char* control;
unsigned char* window;
wb_addr_t window_offset;
dev = container_of(wb, struct pcie_wb_dev, wb);
control = dev->pci_res[0].addr;
window = dev->pci_res[1].addr;
window_offset = addr & WINDOW_HIGH;
if (window_offset != dev->window_offset) {
iowrite32(window_offset, control + WINDOW_OFFSET_LOW);
dev->window_offset = window_offset;
}
switch (dev->width) {
case 4:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread32(0x%x)\n", addr & ~3);
out = ((wb_data_t)ioread32(window + (addr & WINDOW_LOW)));
break;
case 2:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread16(0x%x)\n", (addr & ~3) + dev->low_addr);
out = ((wb_data_t)ioread16(window + (addr & WINDOW_LOW) + dev->low_addr)) << dev->shift;
break;
case 1:
if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread8(0x%x)\n", (addr & ~3) + dev->low_addr);
out = ((wb_data_t)ioread8 (window + (addr & WINDOW_LOW) + dev->low_addr)) << dev->shift;
break;
default: // technically should be unreachable
out = 0;
break;
}
mb(); /* ensure serial ordering of non-posted operations for wishbone */
return out;
}
static wb_data_t wb_read_cfg(struct wishbone *wb, wb_addr_t addr)
{
wb_data_t out;
struct pcie_wb_dev* dev;
unsigned char* control;
dev = container_of(wb, struct pcie_wb_dev, wb);
control = dev->pci_res[0].addr;
switch (addr) {
case 0: out = ioread32(control + ERROR_FLAG_HIGH); break;
case 4: out = ioread32(control + ERROR_FLAG_LOW); break;
case 8: out = ioread32(control + SDWB_ADDRESS_HIGH); break;
case 12: out = ioread32(control + SDWB_ADDRESS_LOW); break;
default: out = 0; break;
}
mb(); /* ensure serial ordering of non-posted operations for wishbone */
return out;
}
static const struct wishbone_operations wb_ops = {
.cycle = wb_cycle,
.byteenable = wb_byteenable,
.write = wb_write,
.read = wb_read,
.read_cfg = wb_read_cfg,
};
#if 0
static irq_handler_t irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{
return (irq_handler_t)IRQ_HANDLED;
}
#endif
static int setup_bar(struct pci_dev* pdev, struct pcie_wb_resource* res, int bar)
{
/*init of pci_res0 */
res->start = pci_resource_start(pdev, bar);
res->end = pci_resource_end(pdev, bar);
res->size = res->end - res->start + 1;
if (debug)
printk(KERN_ALERT PCIE_WB "/BAR%d 0x%lx - 0x%lx\n", bar, res->start, res->end);
// is_mem = pci_resource_flags(pdev, 0);
// is_mem = is_mem & IORESOURCE_MEM;
if (!request_mem_region(res->start, res->size, PCIE_WB)) {
printk(KERN_ALERT PCIE_WB "/BAR%d: request_mem_region failed\n", bar);
return -ENOMEM;
}
res->addr = ioremap_nocache(res->start, res->size);
if (debug)
printk(KERN_ALERT PCIE_WB "/BAR%d: ioremap to %lx\n", bar, (unsigned long)res->addr);
return 0;
}
static void destroy_bar(struct pcie_wb_resource* res)
{
if (debug)
printk(KERN_ALERT "released io 0x%lx\n", res->start);
iounmap(res->addr);
release_mem_region(res->start, res->size);
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
/* Do probing type stuff here.
* Like calling request_region();
* reading BARs
* reading IRQ
* register char dev
*/
u8 revision;
struct pcie_wb_dev *dev;
unsigned char* control;
pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
if (revision != 0x01) {
printk(KERN_ALERT PCIE_WB ": revision ID wrong!\n");
goto fail_out;
}
dev = kmalloc(sizeof(struct pcie_wb_dev), GFP_KERNEL);
if (!dev) {
printk(KERN_ALERT PCIE_WB ": could not allocate memory for pcie_wb_dev structure!\n");
goto fail_out;
}
/* Initialize structure */
dev->pci_dev = pdev;
dev->wb.wops = &wb_ops;
strcpy(dev->wb.name, PCIE_WB "%d");
dev->wb.parent = &pdev->dev;
mutex_init(&dev->mutex);
dev->window_offset = 0;
dev->low_addr = 0;
dev->width = 4;
dev->shift = 0;
pci_set_drvdata(pdev, dev);
/* enable message signaled interrupts */
if (pci_enable_msi(pdev) != 0) {
/* resort to legacy interrupts */
printk(KERN_ALERT PCIE_WB ": could not enable MSI interrupting\n");
goto fail_free;
}
if (setup_bar(pdev, &dev->pci_res[0], 0) < 0) goto fail_msi;
if (setup_bar(pdev, &dev->pci_res[1], 1) < 0) goto fail_bar0;
if (wishbone_register(&dev->wb) < 0) {
printk(KERN_ALERT PCIE_WB ": could not register wishbone bus\n");
goto fail_bar1;
}
/* Initialize device */
control = dev->pci_res[0].addr;
iowrite32(0, control + WINDOW_OFFSET_LOW);
iowrite32(0, control + CONTROL_REGISTER_HIGH);
return pci_enable_device(pdev);
/* cleaning up */
fail_bar1:
destroy_bar(&dev->pci_res[1]);
fail_bar0:
destroy_bar(&dev->pci_res[0]);
fail_msi:
pci_disable_msi(pdev);
fail_free:
kfree(dev);
fail_out:
return -EIO;
}
static void remove(struct pci_dev *pdev)
{
/* clean up any allocated resources and stuff here.
* like call release_mem_region();
*/
struct pcie_wb_dev *dev;
dev = pci_get_drvdata(pdev);
wishbone_unregister(&dev->wb);
destroy_bar(&dev->pci_res[1]);
destroy_bar(&dev->pci_res[0]);
pci_disable_msi(pdev);
kfree(dev);
}
static struct pci_device_id ids[] = {
{ PCI_DEVICE(PCIE_WB_VENDOR_ID, PCIE_WB_DEVICE_ID), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
static struct pci_driver pcie_wb_driver = {
.name = PCIE_WB,
.id_table = ids,
.probe = probe,
.remove = remove,
};
static int __init pcie_wb_init(void)
{
return pci_register_driver(&pcie_wb_driver);
}
static void __exit pcie_wb_exit(void)
{
pci_unregister_driver(&pcie_wb_driver);
}
MODULE_AUTHOR("Stefan Rauch <s.rauch@gsi.de>");
MODULE_DESCRIPTION("GSI Altera-Wishbone bridge driver");
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable debugging information");
MODULE_LICENSE("GPL");
MODULE_VERSION(PCIE_WB_VERSION);
module_init(pcie_wb_init);
module_exit(pcie_wb_exit);
#ifndef PCIE_WB_DRIVER_H
#define PCIE_WB_DRIVER_H
#include "wishbone.h"
#define PCIE_WB "pcie_wb"
#define PCIE_WB_VERSION "0.1"
#define PCIE_WB_VENDOR_ID 0x1172
#define PCIE_WB_DEVICE_ID 0x0004
#define CONTROL_REGISTER_HIGH 0
#define CONTROL_REGISTER_LOW 4
#define ERROR_FLAG_HIGH 8
#define ERROR_FLAG_LOW 12
#define WINDOW_OFFSET_HIGH 16
#define WINDOW_OFFSET_LOW 20
#define SDWB_ADDRESS_HIGH 24
#define SDWB_ADDRESS_LOW 28
#define WINDOW_HIGH 0xFFFF0000UL
#define WINDOW_LOW 0x0000FFFCUL
/* One per BAR */
struct pcie_wb_resource {
unsigned long start; /* start addr of BAR */
unsigned long end; /* end addr of BAR */
unsigned long size; /* size of BAR */
void *addr; /* remapped addr */
};
/* One per physical card */
struct pcie_wb_dev {
struct pci_dev* pci_dev;
struct pcie_wb_resource pci_res[2];
int pci_irq[4];
struct wishbone wb;
struct mutex mutex; /* only one user can open a cycle at a time */
unsigned int window_offset;
unsigned int low_addr, width, shift;
};
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/poll.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/aer.h>
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <asm/io.h>
#include <asm/spinlock.h>
#include <asm/byteorder.h>
#include "spec_wb.h"
#include "wishbone.h"
#if defined(__BIG_ENDIAN)
#define endian_addr(width, shift) (sizeof(wb_data_t)-width)-shift
#elif defined(__LITTLE_ENDIAN)
#define endian_addr(width, shift) shift
#else
#error "unknown machine byte order (endian)"
#endif
static unsigned int debug = 0;
static void wb_cycle(struct wishbone* wb, int on)
{
struct spec_wb_dev* dev;
dev = container_of(wb, struct spec_wb_dev, wb);
if (on) mutex_lock(&dev->mutex);
if (unlikely(debug))
printk(KERN_ALERT SPEC_WB ": cycle(%d)\n", on);
if (!on) mutex_unlock(&dev->mutex);
}
static void wb_byteenable(struct wishbone* wb, unsigned char be)
{
struct spec_wb_dev* dev;
dev = container_of(wb, struct spec_wb_dev, wb);
switch (be) {
case 0x1:
dev->width = 1;
dev->shift = 0;
dev->low_addr = endian_addr(1, 0);
break;
case 0x2:
dev->width = 1;
dev->shift = 8;
dev->low_addr = endian_addr(1, 1);
break;
case 0x4:
dev->width = 1;
dev->shift = 16;
dev->low_addr = endian_addr(1, 2);
break;
case 0x8:
dev->width = 1;
dev->shift = 24;
dev->low_addr = endian_addr(1, 3);
break;
case 0x3:
dev->width = 2;
dev->shift = 0;
dev->low_addr = endian_addr(2, 0);
break;
case 0xC:
dev->width = 2;
dev->shift = 16;
dev->low_addr = endian_addr(2, 2);
break;
case 0xF:
dev->width = 4;
dev->shift = 0;
dev->low_addr = endian_addr(4, 0);
break;
default:
/* noop -- ignore the strange bitmask */
break;
}
}
static void wb_write(struct wishbone* wb, wb_addr_t addr, wb_data_t data)
{
struct spec_wb_dev* dev;
unsigned char* window;
dev = container_of(wb, struct spec_wb_dev, wb);
window = (unsigned char*)dev->pci_res[WB_BAR].addr + WB_OFFSET;
addr = (addr & WB_LOW) + dev->low_addr;
switch (dev->width) {
case 4:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite32(0x%x, 0x%x)\n", data, addr);
writel(data, window + addr);
break;
case 2:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite16(0x%x, 0x%x)\n", data >> dev->shift, addr);
iowrite16(data >> dev->shift, window + addr);
break;
case 1:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite8(0x%x, 0x%x)\n", data >> dev->shift, addr);
iowrite8 (data >> dev->shift, window + addr);
break;
}
}
static wb_data_t wb_read(struct wishbone* wb, wb_addr_t addr)
{
wb_data_t out;
struct spec_wb_dev* dev;
unsigned char* window;
dev = container_of(wb, struct spec_wb_dev, wb);
window = (unsigned char*)dev->pci_res[WB_BAR].addr + WB_OFFSET;
addr = (addr & WB_LOW) + dev->low_addr;
switch (dev->width) {
case 4:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread32(0x%x)\n", addr);
out = ((wb_data_t)readl(window + addr));
break;
case 2:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread16(0x%x)\n", addr);
out = ((wb_data_t)ioread16(window + addr)) << dev->shift;
break;
case 1:
if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread8(0x%x)\n", addr);
out = ((wb_data_t)ioread8 (window + addr)) << dev->shift;
break;
default: // technically should be unreachable
out = 0;
break;
}
mb(); /* ensure serial ordering of non-posted operations for wishbone */
return out;
}
static wb_data_t wb_read_cfg(struct wishbone *wb, wb_addr_t addr)
{
wb_data_t out;
switch (addr) {
case 0: out = 0; break;
case 4: out = 0; break;
case 8: out = 0; break;
case 12: out = 0x30000; break;
default: out = 0; break;
}
mb(); /* ensure serial ordering of non-posted operations for wishbone */
return out;
}
static const struct wishbone_operations wb_ops = {
.cycle = wb_cycle,
.byteenable = wb_byteenable,
.write = wb_write,
.read = wb_read,
.read_cfg = wb_read_cfg,
};
#if 0
static irq_handler_t irq_handler(int irq, void *dev_id, struct pt_regs *regs)
{
return (irq_handler_t)IRQ_HANDLED;
}
#endif
static int setup_bar(struct pci_dev* pdev, struct spec_wb_resource* res, int bar)
{
/*init of pci_res0 */
res->start = pci_resource_start(pdev, bar);
res->end = pci_resource_end(pdev, bar);
res->size = res->end - res->start + 1;
if (debug)
printk(KERN_ALERT SPEC_WB "/BAR%d 0x%lx - 0x%lx\n", bar, res->start, res->end);
// is_mem = pci_resource_flags(pdev, 0);
// is_mem = is_mem & IORESOURCE_MEM;
if (!request_mem_region(res->start, res->size, SPEC_WB)) {
printk(KERN_ALERT SPEC_WB "/BAR%d: request_mem_region failed\n", bar);
return -ENOMEM;
}
res->addr = ioremap_nocache(res->start, res->size);
if (debug)
printk(KERN_ALERT SPEC_WB "/BAR%d: ioremap to %lx\n", bar, (unsigned long)res->addr);
return 0;
}
static void destroy_bar(struct spec_wb_resource* res)
{
if (debug)
printk(KERN_ALERT "released io 0x%lx\n", res->start);
iounmap(res->addr);
release_mem_region(res->start, res->size);
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
/* Do probing type stuff here.
* Like calling request_region();
* reading BARs
* reading IRQ
* register char dev
*/
u8 revision;
struct spec_wb_dev *dev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
if (revision != 0x03) {
printk(KERN_ALERT SPEC_WB ": revision ID wrong!\n");
goto fail_out;
}
dev = kmalloc(sizeof(struct spec_wb_dev), GFP_KERNEL);
if (!dev) {
printk(KERN_ALERT SPEC_WB ": could not allocate memory for spec_wb_dev structure!\n");
goto fail_out;
}
/* Initialize structure */
dev->pci_dev = pdev;
dev->wb.wops = &wb_ops;
strcpy(dev->wb.name, SPEC_WB "%d");
dev->wb.parent = &pdev->dev;
mutex_init(&dev->mutex);
dev->window_offset = 0;
dev->low_addr = 0;
dev->width = 4;
dev->shift = 0;
pci_set_drvdata(pdev, dev);
/* enable message signaled interrupts */
if (pci_enable_msi(pdev) != 0) {
/* resort to legacy interrupts */
printk(KERN_ALERT SPEC_WB ": could not enable MSI interrupting\n");
goto fail_free;
}
if (setup_bar(pdev, &dev->pci_res[0], 0) < 0) goto fail_msi;
if (setup_bar(pdev, &dev->pci_res[1], 2) < 0) goto fail_bar0;
if (setup_bar(pdev, &dev->pci_res[2], 4) < 0) goto fail_bar1;
if (wishbone_register(&dev->wb) < 0) {
printk(KERN_ALERT SPEC_WB ": could not register wishbone bus\n");
goto fail_bar2;
}
return pci_enable_device(pdev);
/* cleaning up */
fail_bar2:
destroy_bar(&dev->pci_res[2]);
fail_bar1:
destroy_bar(&dev->pci_res[1]);
fail_bar0:
destroy_bar(&dev->pci_res[0]);
fail_msi:
pci_disable_msi(pdev);
fail_free:
kfree(dev);
fail_out:
return -EIO;
}
static void remove(struct pci_dev *pdev)
{
/* clean up any allocated resources and stuff here.
* like call release_mem_region();
*/
struct spec_wb_dev *dev;
dev = pci_get_drvdata(pdev);
wishbone_unregister(&dev->wb);
destroy_bar(&dev->pci_res[2]);
destroy_bar(&dev->pci_res[1]);
destroy_bar(&dev->pci_res[0]);
pci_disable_msi(pdev);
kfree(dev);
}
static struct pci_device_id ids[] = {
{ PCI_DEVICE(SPEC_WB_VENDOR_ID, SPEC_WB_DEVICE_ID), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
static struct pci_driver spec_wb_driver = {
.name = SPEC_WB,
.id_table = ids,
.probe = probe,
.remove = remove,
};
static int __init spec_wb_init(void)
{
return pci_register_driver(&spec_wb_driver);
}
static void __exit spec_wb_exit(void)
{
pci_unregister_driver(&spec_wb_driver);
}
MODULE_AUTHOR("Wesley W. Terpstra <w.tersptra@gsi.de>");
MODULE_DESCRIPTION("CERN SPEC card bridge");
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable debugging information");
MODULE_LICENSE("GPL");
MODULE_VERSION(SPEC_WB_VERSION);
module_init(spec_wb_init);
module_exit(spec_wb_exit);
#ifndef SPEC_WB_DRIVER_H
#define SPEC_WB_DRIVER_H
#include "wishbone.h"
#define SPEC_WB "spec_wb"
#define SPEC_WB_VERSION "0.1"
#define SPEC_WB_VENDOR_ID 0x10dc
#define SPEC_WB_DEVICE_ID 0x018d
#define WB_BAR 0
#define WB_OFFSET 0x80000
#define WB_LOW 0x3fffc
/* One per BAR */
struct spec_wb_resource {
unsigned long start; /* start addr of BAR */
unsigned long end; /* end addr of BAR */
unsigned long size; /* size of BAR */
void *addr; /* remapped addr */
};
/* One per physical card */
struct spec_wb_dev {
struct pci_dev* pci_dev;
struct spec_wb_resource pci_res[3];
int pci_irq[4];
struct wishbone wb;
struct mutex mutex; /* only one user can open a cycle at a time */
unsigned int window_offset;
unsigned int low_addr, width, shift;
};
#endif
This diff is collapsed.
#ifndef WISHBONE_H
#define WISHBONE_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/cdev.h>
#define WISHBONE_VERSION "0.1"
#define WISHONE_MAX_DEVICES 32 /* default only */
#define ETHERBONE_BCA 0x80
#define ETHERBONE_RCA 0x40
#define ETHERBONE_RFF 0x20
#define ETHERBONE_CYC 0x08
#define ETHERBONE_WCA 0x04
#define ETHERBONE_WFF 0x02
/* Implementation assumes these have the same size: */
typedef unsigned int wb_addr_t;
typedef unsigned int wb_data_t;
struct wishbone;
struct wishbone_operations
{
void (*cycle)(struct wishbone* wb, int on);
void (*byteenable)(struct wishbone* wb, unsigned char mask);
void (*write)(struct wishbone* wb, wb_addr_t addr, wb_data_t);
wb_data_t (*read)(struct wishbone* wb, wb_addr_t addr);
wb_data_t (*read_cfg)(struct wishbone* wb, wb_addr_t addr);
};
/* One per wishbone backend hardware */
struct wishbone
{
char name[32];
const struct wishbone_operations* wops;
struct device *parent;
/* internal: */
dev_t dev;
struct cdev cdev;
struct list_head list;
struct device *device;
};
#define RING_SIZE 8192
#define RING_INDEX(x) ((x) & (RING_SIZE-1))
#define RING_POS(x) ((x) & (RING_SIZE*2-1))
/* One per open of character device */
struct etherbone_context
{
struct wishbone* wishbone;
struct fasync_struct *fasync;
struct mutex mutex;
wait_queue_head_t waitq;
enum { header, idle, cycle } state;
unsigned int sent, processed, received; /* sent <= processed <= received */
unsigned char buf[RING_SIZE]; /* Ring buffer */
};
#define RING_READ_LEN(ctx) RING_POS((ctx)->processed - (ctx)->sent)
#define RING_PROC_LEN(ctx) RING_POS((ctx)->received - (ctx)->processed)
#define RING_WRITE_LEN(ctx) RING_POS((ctx)->sent + RING_SIZE - (ctx)->received)
#define RING_POINTER(ctx, idx) (&(ctx)->buf[RING_INDEX((ctx)->idx)])
int wishbone_register(struct wishbone* wb);
int wishbone_unregister(struct wishbone* wb);
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment