Skip to content
Snippets Groups Projects
Commit ce4f5155 authored by Wesley W. Terpstra's avatar Wesley W. Terpstra
Browse files

wishbone: remove dev/wbs0

parent 0d2fa706
No related merge requests found
...@@ -25,9 +25,7 @@ static unsigned int max_devices = WISHONE_MAX_DEVICES; ...@@ -25,9 +25,7 @@ static unsigned int max_devices = WISHONE_MAX_DEVICES;
static LIST_HEAD(wishbone_list); /* Sorted by ascending minor number */ static LIST_HEAD(wishbone_list); /* Sorted by ascending minor number */
static DEFINE_MUTEX(wishbone_mutex); static DEFINE_MUTEX(wishbone_mutex);
static struct class *wishbone_master_class; static struct class *wishbone_master_class;
static struct class *wishbone_slave_class;
static dev_t wishbone_master_dev_first; static dev_t wishbone_master_dev_first;
static dev_t wishbone_slave_dev_first;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
...@@ -229,115 +227,6 @@ static void etherbone_master_process(struct etherbone_master_context* context) ...@@ -229,115 +227,6 @@ static void etherbone_master_process(struct etherbone_master_context* context)
context->processed = RING_POS(context->processed + size - left); context->processed = RING_POS(context->processed + size - left);
} }
static void etherbone_slave_out_process(struct etherbone_slave_context *context)
{
struct wishbone_request request;
uint8_t *wptr;
if (context->rbuf_done != context->rbuf_end || /* unread data? */
!context->negotiated || /* negotiation incomplete? */
!context->wishbone->wops->request(context->wishbone, &request)) {
return;
}
++context->pending_err;
context->rbuf_done = 0;
wptr = &context->rbuf[0];
wptr[0] = ETHERBONE_BCA;
wptr[1] = request.mask;
if (request.write) {
wptr[2] = 1;
wptr[3] = 0;
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, request.addr);
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, request.data);
wptr += sizeof(wb_data_t);
} else {
wptr[2] = 0;
wptr[3] = 1;
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, WBA_DATA);
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, request.addr);
wptr += sizeof(wb_data_t);
}
wptr[0] = ETHERBONE_CYC /* !!! */ | ETHERBONE_BCA | ETHERBONE_RCA;
wptr[1] = 0xf;
wptr[2] = 0;
wptr[3] = 1;
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, WBA_ERR);
wptr += sizeof(wb_data_t);
eb_from_cpu(wptr, 4); /* low bits of error status register */
wptr += sizeof(wb_data_t);
context->rbuf_end = wptr - &context->rbuf[0];
}
static int etherbone_slave_in_process(struct etherbone_slave_context *context)
{
struct wishbone *wb;
const struct wishbone_operations *wops;
unsigned char flags, be, wcount, rcount;
uint8_t *rptr;
wb_addr_t base_address;
wb_data_t data;
unsigned char j;
int wff;
wb = context->wishbone;
wops = wb->wops;
/* Process record header */
rptr = &context->wbuf[0];
flags = rptr[0];
be = rptr[1];
wcount = rptr[2];
rcount = rptr[3];
rptr += sizeof(wb_data_t);
/* Must be a full write to the config space! */
if (rcount != 0) return -EIO;
if (wcount == 0) return 0;
if (be != 0xf) return -EIO;
if ((flags & ETHERBONE_WCA) == 0) return -EIO;
/* Process the writes */
base_address = eb_to_cpu(rptr);
rptr += sizeof(wb_data_t);
wff = flags & ETHERBONE_WFF;
for (j = 0; j < wcount; ++j) {
data = eb_to_cpu(rptr);
rptr += sizeof(wb_data_t);
switch (base_address) {
case WBA_DATA:
context->data = data;
break;
case WBA_ERR:
if (context->pending_err == 0) return -EIO;
--context->pending_err;
wops->reply(wb, data&1, context->data);
break;
default:
return -EIO;
}
if (!wff) base_address += sizeof(wb_data_t);
}
return 0;
}
static int char_master_open(struct inode *inode, struct file *filep) static int char_master_open(struct inode *inode, struct file *filep)
{ {
struct etherbone_master_context *context; struct etherbone_master_context *context;
...@@ -489,222 +378,6 @@ static const struct file_operations etherbone_master_fops = { ...@@ -489,222 +378,6 @@ static const struct file_operations etherbone_master_fops = {
.fasync = char_master_fasync, .fasync = char_master_fasync,
}; };
void wishbone_slave_ready(struct wishbone* wb)
{
wake_up_interruptible(&wb->waitq);
kill_fasync(&wb->fasync, SIGIO, POLL_IN);
}
static int char_slave_open(struct inode *inode, struct file *filep)
{
struct etherbone_slave_context *context;
context = kmalloc(sizeof(struct etherbone_slave_context), GFP_KERNEL);
if (!context) return -ENOMEM;
context->wishbone = container_of(inode->i_cdev, struct wishbone, slave_cdev);
mutex_init(&context->mutex);
filep->private_data = context;
/* Only a single open of the slave device is allowed */
mutex_lock(&context->wishbone->mutex);
if (context->wishbone->slave) {
mutex_unlock(&context->wishbone->mutex);
kfree(context);
return -EBUSY;
}
context->wishbone->slave = context;
/* Setup Etherbone state machine */
context->pending_err = 0;
context->negotiated = 0;
context->rbuf_done = 0;
context->wbuf_fill = 0;
/* Fill in the EB request */
context->rbuf[0] = 0x4E;
context->rbuf[1] = 0x6F;
context->rbuf[2] = 0x11; /* V1 probe */
context->rbuf[3] = 0x44; /* 32-bit only */
memset(&context->rbuf[4], 0, 4);
context->rbuf_end = 8;
mutex_unlock(&context->wishbone->mutex);
return 0;
}
static int char_slave_release(struct inode *inode, struct file *filep)
{
struct etherbone_slave_context *context = filep->private_data;
const struct wishbone_operations *wops = context->wishbone->wops;
int pending, errors;
mutex_lock(&context->mutex);
/* We need to answer any requests pending, even if the bad user did not */
errors = context->pending_err;
for (pending = errors; pending > 0; --pending)
wops->reply(context->wishbone, 1, 0);
mutex_lock(&context->wishbone->mutex);
context->wishbone->slave = 0;
mutex_unlock(&context->wishbone->mutex);
mutex_unlock(&context->mutex);
kfree(context);
if (errors) return -EIO;
return 0;
}
static ssize_t char_slave_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
struct file *filep = iocb->ki_filp;
struct etherbone_slave_context *context = filep->private_data;
unsigned int iov_len, buf_len, len;
iov_len = iov_length(iov, nr_segs);
if (unlikely(iov_len == 0)) return 0;
if (mutex_lock_interruptible(&context->mutex))
return -EINTR;
etherbone_slave_out_process(context);
buf_len = context->rbuf_end - context->rbuf_done;
if (buf_len > iov_len) {
len = iov_len;
} else {
len = buf_len;
}
memcpy_toiovecend(iov, context->rbuf + context->rbuf_done, 0, len);
context->rbuf_done += len;
mutex_unlock(&context->mutex);
if (len == 0 && (filep->f_flags & O_NONBLOCK) != 0)
return -EAGAIN;
return len;
}
static ssize_t char_slave_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
struct file *filep = iocb->ki_filp;
struct etherbone_slave_context *context = filep->private_data;
unsigned int iov_len, iov_off, buf_len, len, outlen;
outlen = iov_length(iov, nr_segs);
if (mutex_lock_interruptible(&context->mutex))
return -EINTR;
for (iov_off = 0, iov_len = outlen; iov_len > 0; iov_off += len, iov_len -= len) {
/* Optimization to avoid quadratic complexity */
while (iov_off >= iov->iov_len) {
iov_off -= iov->iov_len;
++iov;
}
if (context->wbuf_fill < 4) {
buf_len = 4 - context->wbuf_fill;
if (buf_len > iov_len) {
len = iov_len;
} else {
len = buf_len;
}
memcpy_fromiovecend(context->wbuf + context->wbuf_fill, iov, iov_off, len);
context->wbuf_fill += len;
} else {
if (!context->negotiated) {
if (context->wbuf[0] != 0x4E ||
context->wbuf[1] != 0x6F ||
(context->wbuf[2] & 0x7) != 0x6 ||
(context->wbuf[3] & 0x44) != 0x44) {
break;
} else {
context->wbuf_fill = 0;
context->negotiated = 1;
wake_up_interruptible(&context->wishbone->waitq);
kill_fasync(&context->wishbone->fasync, SIGIO, POLL_IN);
}
len = 0;
} else {
buf_len =
((context->wbuf[2] > 0) +
(context->wbuf[3] > 0) +
context->wbuf[2] +
context->wbuf[3] +
1) * 4;
buf_len -= context->wbuf_fill;
if (buf_len > iov_len) {
len = iov_len;
memcpy_fromiovecend(context->wbuf + context->wbuf_fill, iov, iov_off, len);
context->wbuf_fill += len;
} else {
len = buf_len;
memcpy_fromiovecend(context->wbuf + context->wbuf_fill, iov, iov_off, len);
context->wbuf_fill = 0;
if (etherbone_slave_in_process(context) != 0) break;
}
}
}
}
mutex_unlock(&context->mutex);
if (iov_len > 0) return -EIO;
return outlen;
}
static unsigned int char_slave_poll(struct file *filep, poll_table *wait)
{
struct etherbone_slave_context *context = filep->private_data;
unsigned int mask;
poll_wait(filep, &context->wishbone->waitq, wait);
mutex_lock(&context->mutex);
etherbone_slave_out_process(context);
if (context->rbuf_done != context->rbuf_end) {
mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
} else {
mask = POLLOUT | POLLWRNORM;
}
mutex_unlock(&context->mutex);
return mask;
}
static int char_slave_fasync(int fd, struct file *file, int on)
{
struct etherbone_slave_context* context = file->private_data;
/* No locking - fasync_helper does its own locking */
return fasync_helper(fd, file, on, &context->wishbone->fasync);
}
static const struct file_operations etherbone_slave_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = do_sync_read,
.aio_read = char_slave_aio_read,
.write = do_sync_write,
.aio_write = char_slave_aio_write,
.open = char_slave_open,
.poll = char_slave_poll,
.release = char_slave_release,
.fasync = char_slave_fasync,
};
int wishbone_register(struct wishbone* wb) int wishbone_register(struct wishbone* wb)
{ {
struct list_head *list_pos; struct list_head *list_pos;
...@@ -739,36 +412,21 @@ int wishbone_register(struct wishbone* wb) ...@@ -739,36 +412,21 @@ int wishbone_register(struct wishbone* wb)
} }
} }
init_waitqueue_head(&wb->waitq);
wb->fasync = 0;
mutex_init(&wb->mutex);
wb->slave = 0;
/* Connect the file operations with the cdevs */ /* Connect the file operations with the cdevs */
cdev_init(&wb->master_cdev, &etherbone_master_fops); cdev_init(&wb->master_cdev, &etherbone_master_fops);
wb->master_cdev.owner = wb->wops->owner; wb->master_cdev.owner = wb->wops->owner;
cdev_init(&wb->slave_cdev, &etherbone_slave_fops);
wb->slave_cdev.owner = wb->wops->owner;
wb->master_dev = wb->master_dev =
MKDEV( MKDEV(
MAJOR(wishbone_master_dev_first), MAJOR(wishbone_master_dev_first),
MINOR(wishbone_master_dev_first) + devoff); MINOR(wishbone_master_dev_first) + devoff);
wb->slave_dev =
MKDEV(
MAJOR(wishbone_slave_dev_first),
MINOR(wishbone_slave_dev_first) + devoff);
/* Connect the major/minor number to the cdev */ /* Connect the major/minor number to the cdev */
if (cdev_add(&wb->master_cdev, wb->master_dev, 1)) goto fail_out; if (cdev_add(&wb->master_cdev, wb->master_dev, 1)) goto fail_out;
if (cdev_add(&wb->slave_cdev, wb->slave_dev, 1)) goto fail_master_cdev;
/* Create the sysfs entry */ /* Create the sysfs entry */
wb->master_device = device_create(wishbone_master_class, wb->parent, wb->master_dev, NULL, "wbm%d", devoff); wb->master_device = device_create(wishbone_master_class, wb->parent, wb->master_dev, NULL, "wbm%d", devoff);
if (IS_ERR(wb->master_device)) goto fail_slave_cdev; if (IS_ERR(wb->master_device)) goto fail_master_cdev;
wb->slave_device = device_create(wishbone_slave_class, wb->parent, wb->slave_dev, NULL, "wbs%d", devoff);
if (IS_ERR(wb->slave_device)) goto fail_master_sys;
/* Insert the device into the gap */ /* Insert the device into the gap */
list_add_tail(&wb->list, list_pos); list_add_tail(&wb->list, list_pos);
...@@ -776,10 +434,6 @@ int wishbone_register(struct wishbone* wb) ...@@ -776,10 +434,6 @@ int wishbone_register(struct wishbone* wb)
mutex_unlock(&wishbone_mutex); mutex_unlock(&wishbone_mutex);
return 0; return 0;
fail_master_sys:
device_destroy(wishbone_master_class, wb->master_dev);
fail_slave_cdev:
cdev_del(&wb->slave_cdev);
fail_master_cdev: fail_master_cdev:
cdev_del(&wb->master_cdev); cdev_del(&wb->master_cdev);
fail_out: fail_out:
...@@ -794,15 +448,19 @@ int wishbone_unregister(struct wishbone* wb) ...@@ -794,15 +448,19 @@ int wishbone_unregister(struct wishbone* wb)
mutex_lock(&wishbone_mutex); mutex_lock(&wishbone_mutex);
list_del(&wb->list); list_del(&wb->list);
device_destroy(wishbone_slave_class, wb->slave_dev);
device_destroy(wishbone_master_class, wb->master_dev); device_destroy(wishbone_master_class, wb->master_dev);
cdev_del(&wb->slave_cdev);
cdev_del(&wb->master_cdev); cdev_del(&wb->master_cdev);
mutex_unlock(&wishbone_mutex); mutex_unlock(&wishbone_mutex);
return 0; return 0;
} }
void wishbone_slave_ready(struct wishbone* wb)
{
// wake_up_interruptible(&wb->waitq);
// kill_fasync(&wb->fasync, SIGIO, POLL_IN);
}
static int __init wishbone_init(void) static int __init wishbone_init(void)
{ {
int err; int err;
...@@ -822,28 +480,13 @@ static int __init wishbone_init(void) ...@@ -822,28 +480,13 @@ static int __init wishbone_init(void)
goto fail_last; goto fail_last;
} }
wishbone_slave_class = class_create(THIS_MODULE, "wbs");
if (IS_ERR(wishbone_slave_class)) {
err = PTR_ERR(wishbone_slave_class);
goto fail_master_class;
}
if (alloc_chrdev_region(&wishbone_master_dev_first, 0, max_devices, "wbm") < 0) { if (alloc_chrdev_region(&wishbone_master_dev_first, 0, max_devices, "wbm") < 0) {
err = -EIO; err = -EIO;
goto fail_slave_class; goto fail_master_class;
}
if (alloc_chrdev_region(&wishbone_slave_dev_first, 0, max_devices, "wbs") < 0) {
err = -EIO;
goto fail_master_dev;
} }
return 0; return 0;
fail_master_dev:
unregister_chrdev_region(wishbone_master_dev_first, max_devices);
fail_slave_class:
class_destroy(wishbone_slave_class);
fail_master_class: fail_master_class:
class_destroy(wishbone_master_class); class_destroy(wishbone_master_class);
fail_last: fail_last:
...@@ -852,9 +495,7 @@ fail_last: ...@@ -852,9 +495,7 @@ fail_last:
static void __exit wishbone_exit(void) static void __exit wishbone_exit(void)
{ {
unregister_chrdev_region(wishbone_slave_dev_first, max_devices);
unregister_chrdev_region(wishbone_master_dev_first, max_devices); unregister_chrdev_region(wishbone_master_dev_first, max_devices);
class_destroy(wishbone_slave_class);
class_destroy(wishbone_master_class); class_destroy(wishbone_master_class);
} }
......
...@@ -24,7 +24,6 @@ typedef unsigned int wb_data_t; ...@@ -24,7 +24,6 @@ typedef unsigned int wb_data_t;
struct wishbone; struct wishbone;
struct etherbone_master_context; struct etherbone_master_context;
struct etherbone_slave_context;
struct wishbone_request struct wishbone_request
{ {
...@@ -59,15 +58,9 @@ struct wishbone ...@@ -59,15 +58,9 @@ struct wishbone
/* internal (guarded by global mutex--register/unregister): */ /* internal (guarded by global mutex--register/unregister): */
struct list_head list; struct list_head list;
dev_t master_dev, slave_dev; dev_t master_dev;
struct cdev master_cdev, slave_cdev; struct cdev master_cdev;
struct device *master_device, *slave_device; struct device *master_device;
/* wake-q for blocking slave io */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
struct mutex mutex; /* guards slave below */
struct etherbone_slave_context *slave;
}; };
#define RING_SIZE 8192 #define RING_SIZE 8192
...@@ -88,24 +81,6 @@ struct etherbone_master_context ...@@ -88,24 +81,6 @@ struct etherbone_master_context
unsigned char buf[RING_SIZE]; /* Ring buffer */ unsigned char buf[RING_SIZE]; /* Ring buffer */
}; };
struct etherbone_slave_context
{
struct wishbone* wishbone;
struct mutex mutex;
unsigned int pending_err; /* unanswered operations */
int negotiated;
wb_data_t data;
unsigned int rbuf_done; /* data remaining to be read: [rbuf_done, rbuf_end) */
unsigned int rbuf_end;
unsigned char rbuf[sizeof(wb_data_t)*6];
unsigned int wbuf_fill; /* data remaining to be processed: [0, wbuf_full) */
unsigned char wbuf[sizeof(wb_data_t)*(255*2+3)];
};
#define RING_READ_LEN(ctx) RING_POS((ctx)->processed - (ctx)->sent) #define RING_READ_LEN(ctx) RING_POS((ctx)->processed - (ctx)->sent)
#define RING_PROC_LEN(ctx) RING_POS((ctx)->received - (ctx)->processed) #define RING_PROC_LEN(ctx) RING_POS((ctx)->received - (ctx)->processed)
#define RING_WRITE_LEN(ctx) RING_POS((ctx)->sent + RING_SIZE - (ctx)->received) #define RING_WRITE_LEN(ctx) RING_POS((ctx)->sent + RING_SIZE - (ctx)->received)
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment