diff --git a/pcie-wb/Makefile b/pcie-wb/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..927c8b3ee5ef3c4f61e36dc75127fffd52ee7823
--- /dev/null
+++ b/pcie-wb/Makefile
@@ -0,0 +1,35 @@
+#export PATH=$PATH:/share/eldk/bin:/share/eldk/usr/bin
+#export CROSS_COMPILE=ppc_4xxFP-
+#export ARCH=powerpc
+
+# This is useful if cross-compiling. Taken from kernel Makefile (CC changed)
+#AS      =$(CROSS_COMPILE)as
+#LD      =$(CROSS_COMPILE)ld
+#CC      =$(CROSS_COMPILE)gcc
+#CPP     =$(CC) -E
+#AR      =$(CROSS_COMPILE)ar
+#NM      =$(CROSS_COMPILE)nm
+#STRIP   =$(CROSS_COMPILE)strip
+#OBJCOPY =$(CROSS_COMPILE)objcopy
+#OBJDUMP =$(CROSS_COMPILE)objdump
+
+
+KERNELVER ?= `uname -r`
+KERNELDIR ?= /lib/modules/$(KERNELVER)/build
+ifneq ($(KERNELRELEASE),)
+	obj-m	:= pcie_wb.o wishbone.o spec_wb.o
+else
+	KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+	PWD       := $(shell pwd)
+
+all:
+	$(MAKE) -C $(KERNELDIR) M=$(PWD)
+
+install:
+	$(MAKE) -C $(KERNELDIR) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
diff --git a/pcie-wb/pcie_wb.c b/pcie-wb/pcie_wb.c
new file mode 100644
index 0000000000000000000000000000000000000000..f5536a692ad9e60478a2bf8afcd7d97e139f205e
--- /dev/null
+++ b/pcie-wb/pcie_wb.c
@@ -0,0 +1,364 @@
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/aer.h>
+#include <linux/sched.h> 
+#include <linux/miscdevice.h>
+
+#include <asm/io.h>
+#include <asm/spinlock.h>
+#include <asm/byteorder.h>
+
+#include "pcie_wb.h"
+#include "wishbone.h"
+
+#if defined(__BIG_ENDIAN)
+#define endian_addr(width, shift) (sizeof(wb_data_t)-width)-shift
+#elif defined(__LITTLE_ENDIAN)
+#define endian_addr(width, shift) shift
+#else
+#error "unknown machine byte order (endian)"
+#endif
+
+static unsigned int debug = 0;
+
+static void wb_cycle(struct wishbone* wb, int on)
+{
+	struct pcie_wb_dev* dev;
+	unsigned char* control;
+	
+	dev = container_of(wb, struct pcie_wb_dev, wb);
+	control = dev->pci_res[0].addr;
+	
+	if (on) mutex_lock(&dev->mutex);
+	
+	if (unlikely(debug))
+		printk(KERN_ALERT PCIE_WB ": cycle(%d)\n", on);
+	
+	iowrite32(on?0x80000000UL:0, control + CONTROL_REGISTER_HIGH);
+	
+	if (!on) mutex_unlock(&dev->mutex);
+}
+
+static void wb_byteenable(struct wishbone* wb, unsigned char be)
+{
+	struct pcie_wb_dev* dev;
+	
+	dev = container_of(wb, struct pcie_wb_dev, wb);
+	
+	switch (be) {
+	case 0x1:
+		dev->width = 1;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(1, 0);
+		break;
+	case 0x2:
+		dev->width = 1;
+		dev->shift = 8;
+		dev->low_addr = endian_addr(1, 1);
+		break;
+	case 0x4:
+		dev->width = 1;
+		dev->shift = 16;
+		dev->low_addr = endian_addr(1, 2);
+		break;
+	case 0x8:
+		dev->width = 1;
+		dev->shift = 24;
+		dev->low_addr = endian_addr(1, 3);
+		break;
+	case 0x3:
+		dev->width = 2;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(2, 0);
+		break;
+	case 0xC:
+		dev->width = 2;
+		dev->shift = 16;
+		dev->low_addr = endian_addr(2, 2);
+		break;
+	case 0xF:
+		dev->width = 4;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(4, 0);
+		break;
+	default:
+		/* noop -- ignore the strange bitmask */
+		break;
+	}
+}
+
+static void wb_write(struct wishbone* wb, wb_addr_t addr, wb_data_t data)
+{
+	struct pcie_wb_dev* dev;
+	unsigned char* control;
+	unsigned char* window;
+	wb_addr_t window_offset;
+	
+	dev = container_of(wb, struct pcie_wb_dev, wb);
+	control = dev->pci_res[0].addr;
+	window = dev->pci_res[1].addr;
+	
+	window_offset = addr & WINDOW_HIGH;
+	if (window_offset != dev->window_offset) {
+		iowrite32(window_offset, control + WINDOW_OFFSET_LOW);
+		dev->window_offset = window_offset;
+	}
+	
+	switch (dev->width) {
+	case 4:	
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite32(0x%x, 0x%x)\n", data, addr & ~3);
+		iowrite32(data, window + (addr & WINDOW_LOW)); 
+		break;
+	case 2: 
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite16(0x%x, 0x%x)\n", data >> dev->shift, (addr & ~3) + dev->low_addr);
+		iowrite16(data >> dev->shift, window + (addr & WINDOW_LOW) + dev->low_addr); 
+		break;
+	case 1: 
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": iowrite8(0x%x, 0x%x)\n", data >> dev->shift, (addr & ~3) + dev->low_addr);
+		iowrite8 (data >> dev->shift, window + (addr & WINDOW_LOW) + dev->low_addr); 
+		break;
+	}
+}
+
+static wb_data_t wb_read(struct wishbone* wb, wb_addr_t addr)
+{
+	wb_data_t out;
+	struct pcie_wb_dev* dev;
+	unsigned char* control;
+	unsigned char* window;
+	wb_addr_t window_offset;
+	
+	dev = container_of(wb, struct pcie_wb_dev, wb);
+	control = dev->pci_res[0].addr;
+	window = dev->pci_res[1].addr;
+	
+	window_offset = addr & WINDOW_HIGH;
+	if (window_offset != dev->window_offset) {
+		iowrite32(window_offset, control + WINDOW_OFFSET_LOW);
+		dev->window_offset = window_offset;
+	}
+	
+	switch (dev->width) {
+	case 4:	
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread32(0x%x)\n", addr & ~3);
+		out = ((wb_data_t)ioread32(window + (addr & WINDOW_LOW)));
+		break;
+	case 2: 
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread16(0x%x)\n", (addr & ~3) + dev->low_addr);
+		out = ((wb_data_t)ioread16(window + (addr & WINDOW_LOW) + dev->low_addr)) << dev->shift;
+		break;
+	case 1: 
+		if (unlikely(debug)) printk(KERN_ALERT PCIE_WB ": ioread8(0x%x)\n", (addr & ~3) + dev->low_addr);
+		out = ((wb_data_t)ioread8 (window + (addr & WINDOW_LOW) + dev->low_addr)) << dev->shift;
+		break;
+	default: // technically should be unreachable
+		out = 0;
+		break;
+	}
+
+	mb(); /* ensure serial ordering of non-posted operations for wishbone */
+	
+	return out;
+}
+
+static wb_data_t wb_read_cfg(struct wishbone *wb, wb_addr_t addr)
+{
+	wb_data_t out;
+	struct pcie_wb_dev* dev;
+	unsigned char* control;
+	
+	dev = container_of(wb, struct pcie_wb_dev, wb);
+	control = dev->pci_res[0].addr;
+	
+	switch (addr) {
+	case 0:  out = ioread32(control + ERROR_FLAG_HIGH);   break;
+	case 4:  out = ioread32(control + ERROR_FLAG_LOW);    break;
+	case 8:  out = ioread32(control + SDWB_ADDRESS_HIGH); break;
+	case 12: out = ioread32(control + SDWB_ADDRESS_LOW);  break;
+	default: out = 0; break;
+	}
+	
+	mb(); /* ensure serial ordering of non-posted operations for wishbone */
+	
+	return out;
+}
+
+static const struct wishbone_operations wb_ops = {
+	.cycle      = wb_cycle,
+	.byteenable = wb_byteenable,
+	.write      = wb_write,
+	.read       = wb_read,
+	.read_cfg   = wb_read_cfg,
+};
+
+#if 0
+static irq_handler_t irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	return (irq_handler_t)IRQ_HANDLED;
+}
+#endif
+
+static int setup_bar(struct pci_dev* pdev, struct pcie_wb_resource* res, int bar)
+{
+	/*init of pci_res0 */
+	res->start = pci_resource_start(pdev, bar);
+	res->end = pci_resource_end(pdev, bar);
+	res->size = res->end - res->start + 1;
+	
+	if (debug)
+		printk(KERN_ALERT PCIE_WB "/BAR%d  0x%lx - 0x%lx\n", bar, res->start, res->end);
+
+	// is_mem = pci_resource_flags(pdev, 0);
+ 	// is_mem = is_mem & IORESOURCE_MEM;
+
+	if (!request_mem_region(res->start, res->size, PCIE_WB)) {
+		printk(KERN_ALERT PCIE_WB "/BAR%d: request_mem_region failed\n", bar);
+		return -ENOMEM;
+	}
+	
+	res->addr = ioremap_nocache(res->start, res->size);
+	if (debug)
+		printk(KERN_ALERT PCIE_WB "/BAR%d: ioremap to %lx\n", bar, (unsigned long)res->addr);
+	
+	return 0;
+}
+
+static void destroy_bar(struct pcie_wb_resource* res)
+{
+	if (debug)
+		printk(KERN_ALERT "released io 0x%lx\n", res->start);
+		
+	iounmap(res->addr);
+	release_mem_region(res->start, res->size);
+}
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	/* Do probing type stuff here.  
+	 * Like calling request_region();
+	 * reading BARs
+	 * reading IRQ
+	 * register char dev
+	 */
+	u8 revision;
+	struct pcie_wb_dev *dev;
+	unsigned char* control;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
+	if (revision != 0x01) {
+		printk(KERN_ALERT PCIE_WB ": revision ID wrong!\n");
+		goto fail_out;
+	}
+
+	dev = kmalloc(sizeof(struct pcie_wb_dev), GFP_KERNEL);
+	if (!dev) {
+		printk(KERN_ALERT PCIE_WB ": could not allocate memory for pcie_wb_dev structure!\n");
+		goto fail_out;
+	}
+	
+	/* Initialize structure */
+	dev->pci_dev = pdev;
+	dev->wb.wops = &wb_ops;
+	strcpy(dev->wb.name, PCIE_WB "%d");
+	dev->wb.parent = &pdev->dev;
+	mutex_init(&dev->mutex);
+	dev->window_offset = 0;
+	dev->low_addr = 0;
+	dev->width = 4;
+	dev->shift = 0;
+	pci_set_drvdata(pdev, dev);
+	
+	/* enable message signaled interrupts */
+	if (pci_enable_msi(pdev) != 0) {
+		/* resort to legacy interrupts */
+		printk(KERN_ALERT PCIE_WB ": could not enable MSI interrupting\n");
+		goto fail_free;
+	}
+
+	if (setup_bar(pdev, &dev->pci_res[0], 0) < 0) goto fail_msi;
+	if (setup_bar(pdev, &dev->pci_res[1], 1) < 0) goto fail_bar0;
+	
+	if (wishbone_register(&dev->wb) < 0) {
+		printk(KERN_ALERT PCIE_WB ": could not register wishbone bus\n");
+		goto fail_bar1;
+	}
+	
+	/* Initialize device */
+	control = dev->pci_res[0].addr;
+	iowrite32(0, control + WINDOW_OFFSET_LOW);
+	iowrite32(0, control + CONTROL_REGISTER_HIGH);
+
+	return pci_enable_device(pdev);
+
+	/* cleaning up */
+fail_bar1:
+	destroy_bar(&dev->pci_res[1]);
+fail_bar0:
+	destroy_bar(&dev->pci_res[0]);
+fail_msi:	
+	pci_disable_msi(pdev);
+fail_free:
+	kfree(dev);
+fail_out:
+	return -EIO;
+}
+
+static void remove(struct pci_dev *pdev)
+{
+	/* clean up any allocated resources and stuff here.
+	 * like call release_mem_region();
+	 */
+
+	struct pcie_wb_dev *dev;
+	
+	dev = pci_get_drvdata(pdev);
+	wishbone_unregister(&dev->wb);
+	
+	destroy_bar(&dev->pci_res[1]);
+	destroy_bar(&dev->pci_res[0]);
+	
+	pci_disable_msi(pdev);
+
+	kfree(dev);
+}
+
+static struct pci_device_id ids[] = {
+	{ PCI_DEVICE(PCIE_WB_VENDOR_ID, PCIE_WB_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+static struct pci_driver pcie_wb_driver = {
+	.name = PCIE_WB,
+	.id_table = ids,
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init pcie_wb_init(void)
+{
+	return pci_register_driver(&pcie_wb_driver);
+}
+
+static void __exit pcie_wb_exit(void)
+{	
+	pci_unregister_driver(&pcie_wb_driver);
+}
+
+MODULE_AUTHOR("Stefan Rauch <s.rauch@gsi.de>");
+MODULE_DESCRIPTION("GSI Altera-Wishbone bridge driver");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debugging information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PCIE_WB_VERSION);
+
+module_init(pcie_wb_init);
+module_exit(pcie_wb_exit);
diff --git a/pcie-wb/pcie_wb.h b/pcie-wb/pcie_wb.h
new file mode 100644
index 0000000000000000000000000000000000000000..0d51eca8ddf87a788288b6c86139910558a0d85b
--- /dev/null
+++ b/pcie-wb/pcie_wb.h
@@ -0,0 +1,44 @@
+#ifndef PCIE_WB_DRIVER_H
+#define PCIE_WB_DRIVER_H
+
+#include "wishbone.h"
+
+#define PCIE_WB "pcie_wb"
+#define PCIE_WB_VERSION	"0.1"
+
+#define PCIE_WB_VENDOR_ID	0x1172
+#define	PCIE_WB_DEVICE_ID	0x0004
+
+#define CONTROL_REGISTER_HIGH	0
+#define CONTROL_REGISTER_LOW	4
+#define ERROR_FLAG_HIGH		8
+#define ERROR_FLAG_LOW		12
+#define WINDOW_OFFSET_HIGH	16
+#define WINDOW_OFFSET_LOW	20
+#define SDWB_ADDRESS_HIGH	24
+#define SDWB_ADDRESS_LOW	28
+
+#define WINDOW_HIGH	0xFFFF0000UL
+#define WINDOW_LOW	0x0000FFFCUL
+
+/* One per BAR */
+struct pcie_wb_resource {
+	unsigned long start;			/* start addr of BAR */
+	unsigned long end;			/* end addr of BAR */
+	unsigned long size;			/* size of BAR */
+	void *addr;				/* remapped addr */
+};
+
+/* One per physical card */
+struct pcie_wb_dev {
+	struct pci_dev* pci_dev;
+	struct pcie_wb_resource pci_res[2];
+	int    pci_irq[4];
+	
+	struct wishbone wb;
+	struct mutex mutex; /* only one user can open a cycle at a time */
+	unsigned int window_offset;
+	unsigned int low_addr, width, shift;
+};
+
+#endif
diff --git a/pcie-wb/spec_wb.c b/pcie-wb/spec_wb.c
new file mode 100644
index 0000000000000000000000000000000000000000..174a543e546c98eb7202b3420785f265f25f60a0
--- /dev/null
+++ b/pcie-wb/spec_wb.c
@@ -0,0 +1,337 @@
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/aer.h>
+#include <linux/sched.h> 
+#include <linux/miscdevice.h>
+
+#include <asm/io.h>
+#include <asm/spinlock.h>
+#include <asm/byteorder.h>
+
+#include "spec_wb.h"
+#include "wishbone.h"
+
+#if defined(__BIG_ENDIAN)
+#define endian_addr(width, shift) (sizeof(wb_data_t)-width)-shift
+#elif defined(__LITTLE_ENDIAN)
+#define endian_addr(width, shift) shift
+#else
+#error "unknown machine byte order (endian)"
+#endif
+
+static unsigned int debug = 0;
+
+static void wb_cycle(struct wishbone* wb, int on)
+{
+	struct spec_wb_dev* dev;
+	
+	dev = container_of(wb, struct spec_wb_dev, wb);
+	
+	if (on) mutex_lock(&dev->mutex);
+	
+	if (unlikely(debug))
+		printk(KERN_ALERT SPEC_WB ": cycle(%d)\n", on);
+	
+	if (!on) mutex_unlock(&dev->mutex);
+}
+
+static void wb_byteenable(struct wishbone* wb, unsigned char be)
+{
+	struct spec_wb_dev* dev;
+	
+	dev = container_of(wb, struct spec_wb_dev, wb);
+	
+	switch (be) {
+	case 0x1:
+		dev->width = 1;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(1, 0);
+		break;
+	case 0x2:
+		dev->width = 1;
+		dev->shift = 8;
+		dev->low_addr = endian_addr(1, 1);
+		break;
+	case 0x4:
+		dev->width = 1;
+		dev->shift = 16;
+		dev->low_addr = endian_addr(1, 2);
+		break;
+	case 0x8:
+		dev->width = 1;
+		dev->shift = 24;
+		dev->low_addr = endian_addr(1, 3);
+		break;
+	case 0x3:
+		dev->width = 2;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(2, 0);
+		break;
+	case 0xC:
+		dev->width = 2;
+		dev->shift = 16;
+		dev->low_addr = endian_addr(2, 2);
+		break;
+	case 0xF:
+		dev->width = 4;
+		dev->shift = 0;
+		dev->low_addr = endian_addr(4, 0);
+		break;
+	default:
+		/* noop -- ignore the strange bitmask */
+		break;
+	}
+}
+
+static void wb_write(struct wishbone* wb, wb_addr_t addr, wb_data_t data)
+{
+	struct spec_wb_dev* dev;
+	unsigned char* window;
+	
+	dev = container_of(wb, struct spec_wb_dev, wb);
+	window = (unsigned char*)dev->pci_res[WB_BAR].addr + WB_OFFSET;
+	addr = (addr & WB_LOW) + dev->low_addr;
+	
+	switch (dev->width) {
+	case 4:	
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite32(0x%x, 0x%x)\n", data, addr);
+		writel(data, window + addr); 
+		break;
+	case 2: 
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite16(0x%x, 0x%x)\n", data >> dev->shift, addr);
+		iowrite16(data >> dev->shift, window + addr); 
+		break;
+	case 1: 
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": iowrite8(0x%x, 0x%x)\n", data >> dev->shift, addr);
+		iowrite8 (data >> dev->shift, window + addr); 
+		break;
+	}
+}
+
+static wb_data_t wb_read(struct wishbone* wb, wb_addr_t addr)
+{
+	wb_data_t out;
+	struct spec_wb_dev* dev;
+	unsigned char* window;
+	
+	dev = container_of(wb, struct spec_wb_dev, wb);
+	window = (unsigned char*)dev->pci_res[WB_BAR].addr + WB_OFFSET;
+	addr = (addr & WB_LOW) + dev->low_addr;
+	
+	switch (dev->width) {
+	case 4:	
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread32(0x%x)\n", addr);
+		out = ((wb_data_t)readl(window + addr));
+		break;
+	case 2: 
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread16(0x%x)\n", addr);
+		out = ((wb_data_t)ioread16(window + addr)) << dev->shift;
+		break;
+	case 1: 
+		if (unlikely(debug)) printk(KERN_ALERT SPEC_WB ": ioread8(0x%x)\n", addr);
+		out = ((wb_data_t)ioread8 (window + addr)) << dev->shift;
+		break;
+	default: // technically should be unreachable
+		out = 0;
+		break;
+	}
+
+	mb(); /* ensure serial ordering of non-posted operations for wishbone */
+	
+	return out;
+}
+
+static wb_data_t wb_read_cfg(struct wishbone *wb, wb_addr_t addr)
+{
+	wb_data_t out;
+	
+	switch (addr) {
+	case 0:  out = 0; break;
+	case 4:  out = 0; break;
+	case 8:  out = 0; break;
+	case 12: out = 0x30000;  break;
+	default: out = 0; break;
+	}
+	
+	mb(); /* ensure serial ordering of non-posted operations for wishbone */
+	
+	return out;
+}
+
+static const struct wishbone_operations wb_ops = {
+	.cycle      = wb_cycle,
+	.byteenable = wb_byteenable,
+	.write      = wb_write,
+	.read       = wb_read,
+	.read_cfg   = wb_read_cfg,
+};
+
+#if 0
+static irq_handler_t irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	return (irq_handler_t)IRQ_HANDLED;
+}
+#endif
+
+static int setup_bar(struct pci_dev* pdev, struct spec_wb_resource* res, int bar)
+{
+	/*init of pci_res0 */
+	res->start = pci_resource_start(pdev, bar);
+	res->end = pci_resource_end(pdev, bar);
+	res->size = res->end - res->start + 1;
+	
+	if (debug)
+		printk(KERN_ALERT SPEC_WB "/BAR%d  0x%lx - 0x%lx\n", bar, res->start, res->end);
+
+	// is_mem = pci_resource_flags(pdev, 0);
+ 	// is_mem = is_mem & IORESOURCE_MEM;
+
+	if (!request_mem_region(res->start, res->size, SPEC_WB)) {
+		printk(KERN_ALERT SPEC_WB "/BAR%d: request_mem_region failed\n", bar);
+		return -ENOMEM;
+	}
+	
+	res->addr = ioremap_nocache(res->start, res->size);
+	if (debug)
+		printk(KERN_ALERT SPEC_WB "/BAR%d: ioremap to %lx\n", bar, (unsigned long)res->addr);
+	
+	return 0;
+}
+
+static void destroy_bar(struct spec_wb_resource* res)
+{
+	if (debug)
+		printk(KERN_ALERT "released io 0x%lx\n", res->start);
+		
+	iounmap(res->addr);
+	release_mem_region(res->start, res->size);
+}
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	/* Do probing type stuff here.  
+	 * Like calling request_region();
+	 * reading BARs
+	 * reading IRQ
+	 * register char dev
+	 */
+	u8 revision;
+	struct spec_wb_dev *dev;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
+	if (revision != 0x03) {
+		printk(KERN_ALERT SPEC_WB ": revision ID wrong!\n");
+		goto fail_out;
+	}
+
+	dev = kmalloc(sizeof(struct spec_wb_dev), GFP_KERNEL);
+	if (!dev) {
+		printk(KERN_ALERT SPEC_WB ": could not allocate memory for spec_wb_dev structure!\n");
+		goto fail_out;
+	}
+	
+	/* Initialize structure */
+	dev->pci_dev = pdev;
+	dev->wb.wops = &wb_ops;
+	strcpy(dev->wb.name, SPEC_WB "%d");
+	dev->wb.parent = &pdev->dev;
+	mutex_init(&dev->mutex);
+	dev->window_offset = 0;
+	dev->low_addr = 0;
+	dev->width = 4;
+	dev->shift = 0;
+	pci_set_drvdata(pdev, dev);
+	
+	/* enable message signaled interrupts */
+	if (pci_enable_msi(pdev) != 0) {
+		/* resort to legacy interrupts */
+		printk(KERN_ALERT SPEC_WB ": could not enable MSI interrupting\n");
+		goto fail_free;
+	}
+
+	if (setup_bar(pdev, &dev->pci_res[0], 0) < 0) goto fail_msi;
+	if (setup_bar(pdev, &dev->pci_res[1], 2) < 0) goto fail_bar0;
+	if (setup_bar(pdev, &dev->pci_res[2], 4) < 0) goto fail_bar1;
+	
+	if (wishbone_register(&dev->wb) < 0) {
+		printk(KERN_ALERT SPEC_WB ": could not register wishbone bus\n");
+		goto fail_bar2;
+	}
+	
+	return pci_enable_device(pdev);
+
+	/* cleaning up */
+fail_bar2:
+	destroy_bar(&dev->pci_res[2]);
+fail_bar1:
+	destroy_bar(&dev->pci_res[1]);
+fail_bar0:
+	destroy_bar(&dev->pci_res[0]);
+fail_msi:	
+	pci_disable_msi(pdev);
+fail_free:
+	kfree(dev);
+fail_out:
+	return -EIO;
+}
+
+static void remove(struct pci_dev *pdev)
+{
+	/* clean up any allocated resources and stuff here.
+	 * like call release_mem_region();
+	 */
+
+	struct spec_wb_dev *dev;
+	
+	dev = pci_get_drvdata(pdev);
+	wishbone_unregister(&dev->wb);
+	
+	destroy_bar(&dev->pci_res[2]);
+	destroy_bar(&dev->pci_res[1]);
+	destroy_bar(&dev->pci_res[0]);
+	
+	pci_disable_msi(pdev);
+
+	kfree(dev);
+}
+
+static struct pci_device_id ids[] = {
+	{ PCI_DEVICE(SPEC_WB_VENDOR_ID, SPEC_WB_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+static struct pci_driver spec_wb_driver = {
+	.name = SPEC_WB,
+	.id_table = ids,
+	.probe = probe,
+	.remove = remove,
+};
+
+static int __init spec_wb_init(void)
+{
+	return pci_register_driver(&spec_wb_driver);
+}
+
+static void __exit spec_wb_exit(void)
+{	
+	pci_unregister_driver(&spec_wb_driver);
+}
+
+MODULE_AUTHOR("Wesley W. Terpstra <w.tersptra@gsi.de>");
+MODULE_DESCRIPTION("CERN SPEC card bridge");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debugging information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPEC_WB_VERSION);
+
+module_init(spec_wb_init);
+module_exit(spec_wb_exit);
diff --git a/pcie-wb/spec_wb.h b/pcie-wb/spec_wb.h
new file mode 100644
index 0000000000000000000000000000000000000000..53bfaeb522624f9a16b990034bc4ed2eb80e538a
--- /dev/null
+++ b/pcie-wb/spec_wb.h
@@ -0,0 +1,36 @@
+#ifndef SPEC_WB_DRIVER_H
+#define SPEC_WB_DRIVER_H
+
+#include "wishbone.h"
+
+#define SPEC_WB "spec_wb"
+#define SPEC_WB_VERSION	"0.1"
+
+#define SPEC_WB_VENDOR_ID	0x10dc
+#define	SPEC_WB_DEVICE_ID	0x018d
+
+#define WB_BAR		0
+#define WB_OFFSET	0x80000
+#define WB_LOW		0x3fffc
+
+/* One per BAR */
+struct spec_wb_resource {
+	unsigned long start;			/* start addr of BAR */
+	unsigned long end;			/* end addr of BAR */
+	unsigned long size;			/* size of BAR */
+	void *addr;				/* remapped addr */
+};
+
+/* One per physical card */
+struct spec_wb_dev {
+	struct pci_dev* pci_dev;
+	struct spec_wb_resource pci_res[3];
+	int    pci_irq[4];
+	
+	struct wishbone wb;
+	struct mutex mutex; /* only one user can open a cycle at a time */
+	unsigned int window_offset;
+	unsigned int low_addr, width, shift;
+};
+
+#endif
diff --git a/pcie-wb/wishbone.c b/pcie-wb/wishbone.c
new file mode 100644
index 0000000000000000000000000000000000000000..e2d4c3157c088de7bd909ad75bfa19f574145fb7
--- /dev/null
+++ b/pcie-wb/wishbone.c
@@ -0,0 +1,433 @@
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/aio.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/socket.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#include "wishbone.h"
+
+/* Module parameters */
+static unsigned int max_devices = WISHONE_MAX_DEVICES;
+
+/* Module globals */
+static LIST_HEAD(wishbone_list); /* Sorted by ascending minor number */
+static DEFINE_MUTEX(wishbone_mutex);
+static struct class *wishbone_class;
+static dev_t wishbone_dev_first;
+
+/* Compiler should be able to optimize this to one inlined instruction */
+static inline wb_data_t eb_to_cpu(unsigned char* x)
+{
+	switch (sizeof(wb_data_t)) {
+	case 8: return be64_to_cpu(*(wb_data_t*)x);
+	case 4: return be32_to_cpu(*(wb_data_t*)x);
+	case 2: return be16_to_cpu(*(wb_data_t*)x);
+	case 1: return *(wb_data_t*)x;
+	}
+}
+
+/* Compiler should be able to optimize this to one inlined instruction */
+static inline void eb_from_cpu(unsigned char* x, wb_data_t dat)
+{
+	switch (sizeof(wb_data_t)) {
+	case 8: *(wb_data_t*)x = cpu_to_be64(dat); break;
+	case 4: *(wb_data_t*)x = cpu_to_be32(dat); break;
+	case 2: *(wb_data_t*)x = cpu_to_be16(dat); break;
+	case 1: *(wb_data_t*)x = dat;              break;
+	}
+}
+
+static void etherbone_process(struct etherbone_context* context)
+{
+	struct wishbone *wb;
+	const struct wishbone_operations *wops;
+	unsigned int size, left, i, record_len;
+	unsigned char *buf;
+	
+	if (context->state == header) {
+		if (context->received < 8) {
+			/* no-op */
+			return;
+		}
+		
+		context->buf[0] = 0x4E;
+		context->buf[1] = 0x6F;
+		context->buf[2] = 0x12; /* V.1 Probe-Response */
+		context->buf[3] = (sizeof(wb_addr_t)<<4) | sizeof(wb_data_t);
+		/* Echo back bytes 4-7, the probe identifier */
+		context->processed = 8;
+		context->state = idle;
+	}
+	
+	buf = &context->buf[0];
+	wb = context->wishbone;
+	wops = wb->wops;
+	
+	i = RING_INDEX(context->processed);
+	size = RING_PROC_LEN(context);
+	
+	for (left = size; left >= 4; left -= record_len) {
+		unsigned char flags, be, wcount, rcount;
+		
+		/* Determine record size */
+		flags  = buf[i+0];
+		be     = buf[i+1];
+		wcount = buf[i+2];
+		rcount = buf[i+3];
+		
+		record_len = 1 + wcount + rcount + (wcount > 0) + (rcount > 0);
+		record_len *= sizeof(wb_data_t);
+		
+		if (left < record_len) break;
+		
+		/* Configure byte enable and raise cycle line */
+		if (context->state == idle) {
+			wops->cycle(wb, 1);
+			context->state = cycle;
+		}
+		wops->byteenable(wb, be);
+
+		/* Process the writes */
+		if (wcount > 0) {
+			wb_addr_t base_address;
+			unsigned char j;
+			int wff = flags & ETHERBONE_WFF;
+			int wca = flags & ETHERBONE_WCA;
+			
+			/* Erase the header */
+			eb_from_cpu(buf+i, 0);
+			i = RING_INDEX(i + sizeof(wb_data_t));
+			base_address = eb_to_cpu(buf+i);
+			
+			if (wca) {
+				for (j = wcount; j > 0; --j) {
+					eb_from_cpu(buf+i, 0);
+					i = RING_INDEX(i + sizeof(wb_data_t));
+				}
+			} else {
+				for (j = wcount; j > 0; --j) {
+					eb_from_cpu(buf+i, 0);
+					i = RING_INDEX(i + sizeof(wb_data_t));
+					wops->write(wb, base_address, eb_to_cpu(buf+i));
+					
+					if (!wff) base_address += sizeof(wb_data_t);
+				}
+			}
+		}
+		
+		buf[i+0] = (flags & ETHERBONE_CYC) | 
+		           (((flags & ETHERBONE_RFF) != 0) ? ETHERBONE_WFF : 0) |
+		           (((flags & ETHERBONE_BCA) != 0) ? ETHERBONE_WCA : 0);
+		buf[i+1] = be;
+		buf[i+2] = rcount; /* rcount -> wcount */
+		buf[i+3] = 0;
+		
+		if (rcount > 0) {
+			unsigned char j;
+			int rca = flags & ETHERBONE_RCA;
+			
+			/* Move past header, and leave BaseRetAddr intact */
+			i = RING_INDEX(i + sizeof(wb_data_t) + sizeof(wb_data_t));
+			
+			if (rca) {
+				for (j = rcount; j > 0; --j) {
+					eb_from_cpu(buf+i, wops->read_cfg(wb, eb_to_cpu(buf+i)));
+					i = RING_INDEX(i + sizeof(wb_data_t));
+				}
+			} else {
+				for (j = rcount; j > 0; --j) {
+					eb_from_cpu(buf+i, wops->read(wb, eb_to_cpu(buf+i)));
+					i = RING_INDEX(i + sizeof(wb_data_t));
+				}
+			}
+		} else {
+			i = RING_INDEX(i + sizeof(wb_data_t));
+		}
+		
+		if ((flags & ETHERBONE_CYC) != 0) {
+			wops->cycle(wb, 0);
+			context->state = idle;
+		}
+	}
+	
+	context->processed = RING_POS(context->processed + size - left);
+}
+
+static int char_open(struct inode *inode, struct file *filep)
+{	
+	struct etherbone_context *context;
+	
+	context = kmalloc(sizeof(struct etherbone_context), GFP_KERNEL);
+	if (!context) return -ENOMEM;
+	
+	context->wishbone = container_of(inode->i_cdev, struct wishbone, cdev);
+	context->fasync = 0;
+	mutex_init(&context->mutex);
+	init_waitqueue_head(&context->waitq);
+	context->state = header;
+	context->sent = 0;
+	context->processed = 0;
+	context->received = 0;
+	
+	filep->private_data = context;
+	
+	return 0;
+}
+
+static int char_release(struct inode *inode, struct file *filep)
+{
+	struct etherbone_context *context = filep->private_data;
+	
+	/* Did the bad user forget to drop the cycle line? */
+	if (context->state == cycle) {
+		context->wishbone->wops->cycle(context->wishbone, 0);
+	}
+	
+	kfree(context);
+	return 0;
+}
+
+static ssize_t char_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
+{
+	struct file *filep = iocb->ki_filp;
+	struct etherbone_context *context = filep->private_data;
+	unsigned int len, iov_len, ring_len, buf_len;
+	
+	iov_len = iov_length(iov, nr_segs);
+	if (unlikely(iov_len == 0)) return 0;
+	
+	mutex_lock(&context->mutex);
+	
+	ring_len = RING_READ_LEN(context);
+	len = min_t(unsigned int, ring_len, iov_len);
+	
+	/* How far till we must wrap?  */
+	buf_len = sizeof(context->buf) - RING_INDEX(context->sent);
+	
+	if (buf_len < len) {
+		memcpy_toiovecend(iov, RING_POINTER(context, sent), 0, buf_len);
+		memcpy_toiovecend(iov, &context->buf[0],            buf_len, len-buf_len);
+	} else {
+		memcpy_toiovecend(iov, RING_POINTER(context, sent), 0, len);
+	}
+	context->sent = RING_POS(context->sent + len);
+	
+	mutex_unlock(&context->mutex);
+	
+	/* Wake-up polling descriptors */
+	wake_up_interruptible(&context->waitq);
+	kill_fasync(&context->fasync, SIGIO, POLL_OUT);
+	
+	if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0)
+		return -EAGAIN;
+	
+	return len;
+}
+
+static ssize_t char_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
+{
+	struct file *filep = iocb->ki_filp;
+	struct etherbone_context *context = filep->private_data;
+	unsigned int len, iov_len, ring_len, buf_len;
+	
+	iov_len = iov_length(iov, nr_segs);
+	if (unlikely(iov_len == 0)) return 0;
+	
+	mutex_lock(&context->mutex);
+	
+	ring_len = RING_WRITE_LEN(context);
+	len = min_t(unsigned int, ring_len, iov_len);
+	
+	/* How far till we must wrap?  */
+	buf_len = sizeof(context->buf) - RING_INDEX(context->received);
+	
+	if (buf_len < len) {
+		memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, buf_len);
+		memcpy_fromiovecend(&context->buf[0],                iov, buf_len, len-buf_len);
+	} else {
+		memcpy_fromiovecend(RING_POINTER(context, received), iov, 0, len);
+	}
+	context->received = RING_POS(context->received + len);
+	
+	/* Process buffers */
+	etherbone_process(context);
+	
+	mutex_unlock(&context->mutex);
+	
+	/* Wake-up polling descriptors */
+	wake_up_interruptible(&context->waitq);
+	kill_fasync(&context->fasync, SIGIO, POLL_IN);
+	
+	if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0)
+		return -EAGAIN;
+	
+	return len;
+}
+
+static unsigned int char_poll(struct file *filep, poll_table *wait)
+{
+	unsigned int mask = 0;
+	struct etherbone_context *context = filep->private_data;
+	
+	poll_wait(filep, &context->waitq, wait);
+	
+	mutex_lock(&context->mutex);
+	
+	if (RING_READ_LEN (context) != 0) mask |= POLLIN  | POLLRDNORM;
+	if (RING_WRITE_LEN(context) != 0) mask |= POLLOUT | POLLWRNORM;
+	
+	mutex_unlock(&context->mutex);
+	
+	return mask;
+}
+
+static int char_fasync(int fd, struct file *file, int on)
+{
+	struct etherbone_context* context;
+	context = file->private_data;
+
+        /* No locking - fasync_helper does its own locking */
+        return fasync_helper(fd, file, on, &context->fasync);
+}
+
+static const struct file_operations etherbone_fops = {
+        .owner          = THIS_MODULE,
+        .llseek         = no_llseek,
+        .read           = do_sync_read,
+        .aio_read       = char_aio_read,
+        .write          = do_sync_write,
+        .aio_write      = char_aio_write,
+        .open           = char_open,
+        .poll           = char_poll,
+        .release        = char_release,
+        .fasync         = char_fasync,
+};
+
+int wishbone_register(struct wishbone* wb)
+{
+	struct list_head *list_pos;
+	struct device *device;
+	dev_t dev;
+	
+	INIT_LIST_HEAD(&wb->list);
+	
+	mutex_lock(&wishbone_mutex);
+	
+	/* Search the list for gaps, stopping past the gap.
+	 * If we overflow the list (ie: not gaps), minor already points past end.
+	 */
+	dev = wishbone_dev_first;
+	list_for_each(list_pos, &wishbone_list) {
+		struct wishbone *entry =
+			container_of(list_pos, struct wishbone, list);
+		
+		if (entry->dev != dev) {
+			/* We found a gap! */
+			break;
+		} else {
+			/* Run out of minors? */
+			if (MINOR(dev) - MINOR(wishbone_dev_first) == max_devices-1) goto fail_out;
+			
+			/* Try the next minor */
+			dev = MKDEV(MAJOR(dev), MINOR(dev) + 1);
+		}
+	}
+	
+	/* Connect the file operations with the cdev */
+	cdev_init(&wb->cdev, &etherbone_fops);
+	wb->cdev.owner = THIS_MODULE;
+	
+	/* Connect the major/minor number to the cdev */
+	if (cdev_add(&wb->cdev, dev, 1)) goto fail_out;
+	
+	/* Create the sysfs entry */
+	device = device_create(wishbone_class, wb->parent, dev, NULL, wb->name, MINOR(dev));
+	if (IS_ERR(device)) goto fail_del;
+	
+	/* Insert the device into the gap */
+	wb->dev = dev;
+	wb->device = device;
+	list_add_tail(&wb->list, list_pos);
+	
+	mutex_unlock(&wishbone_mutex);
+	return 0;
+
+fail_del:
+	cdev_del(&wb->cdev);
+fail_out:
+	mutex_unlock(&wishbone_mutex);
+	return -ENOMEM;
+}
+
+int wishbone_unregister(struct wishbone* wb)
+{
+	if (WARN_ON(list_empty(&wb->list)))
+		return -EINVAL;
+	
+	mutex_lock(&wishbone_mutex);
+	list_del(&wb->list);
+	device_destroy(wishbone_class, wb->dev);
+	cdev_del(&wb->cdev);
+	mutex_unlock(&wishbone_mutex);
+	
+	return 0;
+}
+
+static int __init wishbone_init(void)
+{
+	int err;
+	dev_t overflow;
+	
+	overflow = MKDEV(0, max_devices-1);
+	if (MINOR(overflow) != max_devices-1) {
+		err = -ENOMEM;
+		goto fail_last;
+	}
+	
+	wishbone_class = class_create(THIS_MODULE, "wb");
+	if (IS_ERR(wishbone_class)) {
+		err = PTR_ERR(wishbone_class);
+		goto fail_last;
+	}
+	
+	if (alloc_chrdev_region(&wishbone_dev_first, 0, max_devices, "wb") < 0) {
+		err = -EIO;
+		goto fail_class;
+	}
+	
+	return 0;
+
+fail_class:
+	class_destroy(wishbone_class);
+fail_last:
+	return err;
+}
+
+static void __exit wishbone_exit(void)
+{
+	unregister_chrdev_region(wishbone_dev_first, max_devices);
+	class_destroy(wishbone_class);
+}
+
+MODULE_AUTHOR("Wesley W. Terpstra <w.terpstra@gsi.de>");
+MODULE_DESCRIPTION("Wishbone character device class");
+module_param(max_devices, int, 0644);
+MODULE_PARM_DESC(max_devices, "Maximum number of attached wishbone devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(WISHBONE_VERSION);
+
+EXPORT_SYMBOL(wishbone_register);
+EXPORT_SYMBOL(wishbone_unregister);
+
+module_init(wishbone_init);
+module_exit(wishbone_exit);
diff --git a/pcie-wb/wishbone.h b/pcie-wb/wishbone.h
new file mode 100644
index 0000000000000000000000000000000000000000..7ed3da74c9b994941e9bac1ee3d4d7a060489755
--- /dev/null
+++ b/pcie-wb/wishbone.h
@@ -0,0 +1,72 @@
+#ifndef WISHBONE_H
+#define WISHBONE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+
+#define WISHBONE_VERSION "0.1"
+#define WISHONE_MAX_DEVICES 32	/* default only */
+
+#define ETHERBONE_BCA	0x80
+#define ETHERBONE_RCA	0x40
+#define ETHERBONE_RFF	0x20
+#define ETHERBONE_CYC	0x08
+#define ETHERBONE_WCA	0x04
+#define ETHERBONE_WFF	0x02
+
+/* Implementation assumes these have the same size: */
+typedef unsigned int wb_addr_t;
+typedef unsigned int wb_data_t;
+
+struct wishbone;
+struct wishbone_operations 
+{
+	void (*cycle)(struct wishbone* wb, int on);
+	void (*byteenable)(struct wishbone* wb, unsigned char mask);
+	void (*write)(struct wishbone* wb, wb_addr_t addr, wb_data_t);
+	wb_data_t (*read)(struct wishbone* wb, wb_addr_t addr);
+	wb_data_t (*read_cfg)(struct wishbone* wb, wb_addr_t addr);
+};
+
+/* One per wishbone backend hardware */
+struct wishbone 
+{
+	char name[32];
+	const struct wishbone_operations* wops;
+	struct device *parent;
+	
+	/* internal: */
+	dev_t dev;
+	struct cdev cdev;
+	struct list_head list;
+	struct device *device;
+};
+
+#define RING_SIZE	8192
+#define RING_INDEX(x)	((x) & (RING_SIZE-1))
+#define RING_POS(x)	((x) & (RING_SIZE*2-1))
+
+/* One per open of character device */
+struct etherbone_context
+{
+	struct wishbone* wishbone;
+	struct fasync_struct *fasync;
+	struct mutex mutex;
+	wait_queue_head_t waitq;
+	
+	enum { header, idle, cycle } state;
+	unsigned int sent, processed, received; /* sent <= processed <= received */
+	
+	unsigned char buf[RING_SIZE]; /* Ring buffer */
+};
+
+#define RING_READ_LEN(ctx)   RING_POS((ctx)->processed - (ctx)->sent)
+#define RING_PROC_LEN(ctx)   RING_POS((ctx)->received  - (ctx)->processed)
+#define RING_WRITE_LEN(ctx)  RING_POS((ctx)->sent + RING_SIZE - (ctx)->received)
+#define RING_POINTER(ctx, idx) (&(ctx)->buf[RING_INDEX((ctx)->idx)])
+
+int wishbone_register(struct wishbone* wb);
+int wishbone_unregister(struct wishbone* wb);
+
+#endif