Newer
Older
/* Federico Vaga for CERN, 2011, GNU GPLv2 or later */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#define __ZIO_INTERNAL__
#include <linux/zio.h>
#include <linux/zio-sysfs.h>
#include <linux/zio-buffer.h>
#include <linux/zio-trigger.h>
static struct zio_status *zstat = &zio_global_status; /* Always use ptr */
const char zio_zdev_attr_names[ZATTR_STD_NUM_ZDEV][ZIO_NAME_LEN] = {
[ZATTR_GAIN] = "gain_factor",
[ZATTR_OFFSET] = "offset",
[ZATTR_NBIT] = "resolution-bits",
[ZATTR_MAXRATE] = "max-sample-rate",
[ZATTR_VREFTYPE] = "vref-src",
EXPORT_SYMBOL(zio_zdev_attr_names);
const char zio_trig_attr_names[ZATTR_STD_NUM_TRIG][ZIO_NAME_LEN] = {
[ZATTR_TRIG_REENABLE] = "re-enable",
[ZATTR_TRIG_NSAMPLES] = "nsamples",
};
EXPORT_SYMBOL(zio_trig_attr_names);
const char zio_zbuf_attr_names[ZATTR_STD_NUM_ZBUF][ZIO_NAME_LEN] = {
[ZATTR_ZBUF_MAXLEN] = "max-buffer-len",
};
EXPORT_SYMBOL(zio_zbuf_attr_names);
/*
* Top-level ZIO objects has a unique name.
* You can find a particular object by searching its name.
*/
static inline struct zio_object_list_item *__find_by_name(
struct zio_object_list *zobj_list, char *name)
{
struct zio_object_list_item *cur;
if (!name)
return NULL;
list_for_each_entry(cur, &zobj_list->list, list) {
pr_debug("%s:%d %s=%s\n", __func__, __LINE__, cur->name, name);
if (strcmp(cur->name, name) == 0)
return cur; /* object found */
static inline struct zio_object_list_item *__zio_object_get(
struct zio_object_list *zobj_list, char *name)
struct zio_object_list_item *list_item;
/* search for default trigger */
list_item = __find_by_name(zobj_list, name);
if (!list_item)
/* increment trigger usage to prevent rmmod */
if (!try_module_get(list_item->owner))
return NULL;
return list_item;
}
static struct zio_buffer_type *zio_buffer_get(char *name)
{
struct zio_object_list_item *list_item;
list_item = __zio_object_get(&zstat->all_buffer_types, name);
return container_of(list_item->obj_head, struct zio_buffer_type, head);
static inline void zio_buffer_put(struct zio_buffer_type *zbuf)
{
pr_debug("%s:%d %p\n", __func__, __LINE__, zbuf->owner);
module_put(zbuf->owner);
}
static struct zio_trigger_type *zio_trigger_get(char *name)
struct zio_object_list_item *list_item;
list_item = __zio_object_get(&zstat->all_trigger_types, name);
return container_of(list_item->obj_head, struct zio_trigger_type, head);
static inline void zio_trigger_put(struct zio_trigger_type *trig)
{
pr_debug("%s:%d %p\n", __func__, __LINE__, trig->owner);
module_put(trig->owner);
}
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
/* data_done is called by the driver, after {in,out}put_cset */
void zio_generic_data_done(struct zio_cset *cset)
{
struct zio_buffer_type *zbuf;
struct zio_device *zdev;
struct zio_channel *chan;
struct zio_block *block;
struct zio_ti *ti;
struct zio_bi *bi;
pr_debug("%s:%d\n", __func__, __LINE__);
ti = cset->ti;
zdev = cset->zdev;
zbuf = cset->zbuf;
if (unlikely((ti->flags & ZIO_DIR) == ZIO_DIR_OUTPUT)) {
cset_for_each(cset, chan) {
bi = chan->bi;
block = chan->active_block;
if (block)
zbuf->b_op->free_block(chan->bi, block);
/* We may have a new block ready, or not */
chan->active_block = zbuf->b_op->retr_block(chan->bi);
}
}
/* DIR_INPUT */
cset_for_each(cset, chan) {
bi = chan->bi;
block = chan->active_block;
if (!block)
continue;
if (zbuf->b_op->store_block(bi, block)) /* may fail, no prob */
zbuf->b_op->free_block(bi, block);
}
out:
ti->flags &= (~ZTI_STATUS);
}
EXPORT_SYMBOL(zio_generic_data_done);
static void __zio_fire_input_trigger(struct zio_ti *ti)
{
struct zio_buffer_type *zbuf;
struct zio_block *block;
struct zio_device *zdev;
struct zio_cset *cset;
struct zio_channel *chan;
struct zio_control *ctrl;
cset = ti->cset;
zdev = cset->zdev;
zbuf = cset->zbuf;
pr_debug("%s:%d\n", __func__, __LINE__);
/* Allocate the buffer for the incoming sample, in active channels */
cset_for_each(cset, chan) {
ctrl = zio_alloc_control(GFP_ATOMIC);
if (!errdone++)
pr_err("%s: can't alloc control\n", __func__);
continue;
}
memcpy(ctrl, ti->current_ctrl, ZIO_CONTROL_SIZE);
ctrl->chan_i = chan->index;
block = zbuf->b_op->alloc_block(chan->bi, ctrl,
ctrl->ssize * ctrl->nsamples,
GFP_ATOMIC);
if (IS_ERR(block)) {
if (!errdone++)
pr_err("%s: can't alloc block\n", __func__);
zio_free_control(ctrl);
chan->active_block = block;
}
if (!zdev->d_op->input_cset(cset)) {
/* It succeeded immediately */
ti->t_op->data_done(cset);
static void __zio_fire_output_trigger(struct zio_ti *ti)
struct zio_cset *cset = ti->cset;
struct zio_device *zdev = cset->zdev;
pr_debug("%s:%d\n", __func__, __LINE__);
/* We are expected to already have a block in active channels */
if (!zdev->d_op->output_cset(cset)) {
/* It succeeded immediately */
ti->t_op->data_done(cset);
}
}
/*
* When a software trigger fires, it should call this function. Hw ones don't
*/
void zio_fire_trigger(struct zio_ti *ti)
{
/* If the trigger runs too early, ti->cset is still NULL */
if (!ti->cset)
/* check if previouvs fire is still running*/
if ((ti->flags & ZTI_STATUS) == ZTI_STATUS_ON)
return;
ti->flags |= ZTI_STATUS_ON;
/* Copy the stamp (we are software driven anyways) */
ti->current_ctrl->tstamp.secs = ti->tstamp.tv_sec;
ti->current_ctrl->tstamp.ticks = ti->tstamp.tv_nsec;
ti->current_ctrl->tstamp.bins = ti->tstamp_extra;
/*
* And the sequence number too (first returned seq is 1).
* Sequence number is always increased to identify un-stored
* blocks or other errors in trigger activation.
*/
ti->current_ctrl->seq_num++;
if (likely((ti->flags & ZIO_DIR) == ZIO_DIR_INPUT))
__zio_fire_input_trigger(ti);
else
__zio_fire_output_trigger(ti);
}
EXPORT_SYMBOL(zio_fire_trigger);
static int __has_auto_index(char *s)
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
{
int i = 0;
for (i = 0; i < ZIO_NAME_LEN-1; i++) {
if (s[i] != '%')
continue;
i++;
if (s[i] == 'd')
return 1;
}
return 0;
}
static int __next_strlen(char *str)
{
int increment = 0, i;
for (i = strlen(str)-1; i > 0; i--) {
/* if is an ascii number */
if (str[i] >= '0' && str[i] <= '9') {
if (str[i] == '9')
continue;
else
break;
} else {
increment++;
break;
}
}
return strlen(str) + increment;
}
/*
* The zio device name must be unique. If it is not unique, a busy error is
* returned.
*/
static int zobj_unique_name(struct zio_object_list *zobj_list, char *name)
{
struct zio_object_list_item *cur;
struct zio_obj_head *tmp;
unsigned int counter = 0, again, len;
char name_to_check[ZIO_NAME_LEN];
int auto_index = __has_auto_index(name);
pr_debug("%s\n", __func__);
if (!name)
return -EINVAL;
len = strlen(name);
if (!len)
return -EINVAL;
strncpy(name_to_check, name, ZIO_NAME_LEN);
do {
again = 0;
if (auto_index) { /* TODO when zio become bus, it is useless */
sprintf(name_to_check, name, counter++);
len = strlen(name_to_check);
}
list_for_each_entry(cur, &zobj_list->list, list) {
tmp = cur->obj_head;
if (strcmp(tmp->name, name_to_check))
continue; /* no conflict */
/* conflict found */
/* if not auto-assigned, then error */
if (!auto_index) {
pr_err("ZIO: name \"%s\" is already taken\n",
name);
return -EBUSY;
}
if (__next_strlen(name_to_check) > ZIO_NAME_LEN) {
pr_err("ZIO: invalid name \"%s\"\n", name);
return -EINVAL;
}
again = 1;
break;
}
} while (again);
strncpy(name, name_to_check, ZIO_NAME_LEN);
return 0;
}
static struct zio_attribute *__zattr_clone(const struct zio_attribute *src,
unsigned int n)
{
struct zio_attribute *dest = NULL;
unsigned int size;
if (!src)
return NULL;
size = n * sizeof(struct zio_attribute);
dest = kmalloc(size, GFP_KERNEL);
if (!dest)
dest = memcpy(dest, src, size);
return dest;
}
static void __zattr_unclone(struct zio_attribute *zattr)
{
kfree(zattr);
}
static int __zattr_set_copy(struct zio_attribute_set *dest,
struct zio_attribute_set *src)
{
if (!dest || !src)
return -EINVAL;
dest->n_std_attr = src->n_std_attr;
dest->n_ext_attr = src->n_ext_attr;
dest->std_zattr = __zattr_clone(src->std_zattr, dest->n_std_attr);
dest->ext_zattr = __zattr_clone(src->ext_zattr, dest->n_ext_attr);
return 0;
}
static void __zattr_set_free(struct zio_attribute_set *zattr_set)
{
if (!zattr_set)
return;
__zattr_unclone(zattr_set->ext_zattr);
__zattr_unclone(zattr_set->std_zattr);
}
/* When touching attributes, we always use the spinlock for the hosting dev */
static spinlock_t *__get_spinlock(struct zio_obj_head *head)
{
spinlock_t *lock;
switch (head->zobj_type) {
case ZDEV:
lock = &to_zio_dev(&head->kobj)->lock;
break;
case ZCSET:
lock = &to_zio_cset(&head->kobj)->zdev->lock;
break;
case ZCHAN:
lock = &to_zio_chan(&head->kobj)->cset->zdev->lock;
break;
case ZTI: /* we might not want to take a lock but... */
lock = &to_zio_ti(&head->kobj)->lock;
lock = &to_zio_bi(&head->kobj)->lock;
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
return NULL;
}
return lock;
}
/* Retrieve an attribute set from an object head */
static struct zio_attribute_set *__get_zattr_set(struct zio_obj_head *head)
{
struct zio_attribute_set *zattr_set;
switch (head->zobj_type) {
case ZDEV:
zattr_set = &to_zio_dev(&head->kobj)->zattr_set;
break;
case ZCSET:
zattr_set = &to_zio_cset(&head->kobj)->zattr_set;
break;
case ZCHAN:
zattr_set = &to_zio_chan(&head->kobj)->zattr_set;
break;
zattr_set = &to_zio_trig(&head->kobj)->zattr_set;
break;
case ZBUF:
zattr_set = &to_zio_buf(&head->kobj)->zattr_set;
case ZTI:
zattr_set = &to_zio_ti(&head->kobj)->zattr_set;
break;
case ZBI:
zattr_set = &to_zio_bi(&head->kobj)->zattr_set;
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
return NULL;
}
return zattr_set;
}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
static inline void __zattr_copy_value(struct zio_ctrl_attr *ctrl,
enum zattr_flags flags,
uint32_t index, uint32_t value)
{
int i;
if (flags & ZATTR_TYPE)
ctrl->ext_val[index] = value;
else
ctrl->std_val[index] = value;
pr_info("Standard\n");
for (i = 0; i < 16; ++i)
pr_info("%d\n", ctrl->std_val[i]);
pr_info("Extended\n");
for (i = 0; i < 32; ++i)
pr_info("%d\n", ctrl->ext_val[i]);
}
static void __zattr_propagate_value(struct zio_obj_head *head,
struct zio_attribute *zattr)
{
int i, j, index, value, flags;
struct zio_ti *ti;
struct zio_device *zdev;
struct zio_cset *cset;
struct zio_channel *chan;
index = zattr->index;
value = zattr->value;
flags = zattr->flags;
switch (head->zobj_type) {
case ZDEV:
zdev = to_zio_dev(&head->kobj);
for (i = 0; i < zdev->n_cset; ++i) {
cset = &zdev->cset[i];
for (j = 0; j < cset->n_chan; ++j)
__zattr_copy_value(&cset->chan[j].zattr_val,
flags, index, value);
}
break;
case ZCSET:
cset = to_zio_cset(&head->kobj);
for (i = 0; i < cset->n_chan; ++i)
__zattr_copy_value(&cset->chan[i].zattr_val,
flags, index, value);
break;
case ZCHAN:
chan = to_zio_chan(&head->kobj);
__zattr_copy_value(&chan->zattr_val, flags, index, value);
break;
case ZTI:
ti = to_zio_ti(&head->kobj);
__zattr_copy_value(&ti->zattr_val, flags, index, value);
break;
default:
return;
}
}
/*
* Zio objects all handle uint32_t values. So the show and store
* are centralized here, and each device has its own get_info and set_conf
* which handle binary 32-bit numbers. Both the function are locked to prevent
* concurrency issue when editing device register.
static ssize_t zattr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
int err = 0;
ssize_t len = 0;
spinlock_t *lock;
struct zio_attribute *zattr = to_zio_zattr(attr);
pr_debug("%s\n", __func__);
if (unlikely(strcmp(attr->name, "name") == 0)) {
/* print device name*/
return sprintf(buf, "%s\n", to_zio_head(kobj)->name);
}
if (zattr->s_op->info_get) {
lock = __get_spinlock(to_zio_head(kobj));
err = zattr->s_op->info_get(kobj, zattr, &zattr->value);
spin_unlock(lock);
if (err)
return err;
}
len = sprintf(buf, "%i\n", zattr->value);
return len;
}
static ssize_t zattr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
long val;
int err = 0;
struct zio_attribute *zattr = to_zio_zattr(attr);
spinlock_t *lock;
pr_debug("%s\n", __func__);
err = strict_strtol(buf, 0, &val);
if (zattr->s_op->conf_set) {
lock = __get_spinlock(to_zio_head(kobj));
err = zattr->s_op->conf_set(kobj, zattr, (uint32_t)val);
if (err) {
spin_unlock(lock);
return err;
}
zattr->value = (uint32_t)val;
__zattr_propagate_value(to_zio_head(kobj), zattr);
}
static const struct sysfs_ops zio_attribute_ktype_ops = {
.show = zattr_show,
.store = zattr_store,
static struct attribute default_attrs[] = {
.name = "name", /* show the name */
.mode = 0444, /* read only */
},
};
static struct attribute *def_attr_ptr[] = {
static struct kobj_type zdktype = { /* For standard and extended attribute */
.release = NULL,
.sysfs_ops = &zio_attribute_ktype_ops,
.default_attrs = def_attr_ptr,
};
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
static int __check_dev_zattr(struct zio_attribute_set *parent,
struct zio_attribute_set *this)
{
int i, j;
/* verify standard attribute */
for (i = 0; i < this->n_std_attr; ++i) {
if (this->std_zattr[i].index == ZATTR_INDEX_NONE)
continue; /* next attribute */
for (j = 0; j < parent->n_std_attr; ++j) {
/*
* a standard attribute must be unique from a child
* to the root. This allow to create a consistent
* vector of value in control structure
*/
if (this->std_zattr[i].index ==
parent->std_zattr[j].index) {
pr_err("ZIO: attribute conflict for %s\n",
this->std_zattr[i].attr.name);
return -EINVAL;
}
}
}
return 0;
}
static int __check_attr(struct attribute *attr,
const struct zio_sysfs_operations *s_op)
{
/* check name*/
if (!attr->name)
return -EINVAL;
/* check mode */
if ((attr->mode & S_IWUGO) == S_IWUGO && !s_op->conf_set) {
pr_err("ZIO: %s: attribute %s has write permission but "
"no write function\n", __func__, attr->name);
return -ENOSYS;
}
return 0;
}
/* create a set of zio attributes: the standard one and the extended one */
static int zattr_set_create(struct zio_obj_head *head,
const struct zio_sysfs_operations *s_op)
struct zio_attribute_set *zattr_set;
struct attribute_group *group;
pr_debug("%s\n", __func__);
zattr_set = __get_zattr_set(head);
if (!zattr_set)
return -EINVAL; /* message already printed */
group = &zattr_set->group;
n_attr = (zattr_set->n_std_attr + zattr_set->n_ext_attr);
if (!n_attr || (!zattr_set->std_zattr && !zattr_set->ext_zattr)) {
zattr_set->n_std_attr = 0;
zattr_set->n_ext_attr = 0;
return 0;
}
group->attrs = kzalloc(sizeof(struct attribute) * n_attr, GFP_KERNEL);
if (!group->attrs)
return -ENOMEM;
for (i = 0; i < zattr_set->n_std_attr; ++i) {
err = __check_attr(&zattr_set->std_zattr[i].attr, s_op);
switch (err) {
case 0:
/* valid attribute */
group->attrs[attr_count++] =
&zattr_set->std_zattr[i].attr;
zattr_set->std_zattr[i].s_op = s_op;
zattr_set->std_zattr[i].index = i;
break;
case -EINVAL: /* unused std attribute */
zattr_set->std_zattr[i].index = ZATTR_INDEX_NONE;
break;
default:
return err;
ext:
if (!zattr_set->ext_zattr)
goto out;
for (j = 0; j < zattr_set->n_ext_attr; ++j) {
err = __check_attr(&zattr_set->ext_zattr[j].attr, s_op);
if (err)
return err;
/* valid attribute */
group->attrs[attr_count++] = &zattr_set->ext_zattr[j].attr;
zattr_set->ext_zattr[j].s_op = s_op;
zattr_set->ext_zattr[j].index = j;
zattr_set->ext_zattr[j].flags |= ZATTR_TYPE_EXT;
return sysfs_create_group(&head->kobj, group);
/* Remove an existent set of attributes */
static void zattr_set_remove(struct zio_obj_head *head)
{
struct zio_attribute_set *zattr_set;
zattr_set = __get_zattr_set(head);
if (!zattr_set)
return;
if (!zattr_set->group.attrs)
return;
/* remove all standard and extended attributes */
sysfs_remove_group(&head->kobj, &zattr_set->group);
kfree(zattr_set->group.attrs);
/* Create a buffer instance according to the buffer type defined in cset */
static int __buffer_create_instance(struct zio_channel *chan)
{
struct zio_buffer_type *zbuf = chan->cset->zbuf;
struct zio_bi *bi;
int err;
/* create buffer */
bi = zbuf->b_op->create(zbuf, chan, FMODE_READ);
if (IS_ERR(bi))
return PTR_ERR(bi);
/* Now fill the trigger instance, ops, head, then the rest */
bi->b_op = zbuf->b_op;
bi->f_op = zbuf->f_op;
bi->flags |= (chan->flags & ZIO_DIR);
bi->head.zobj_type = ZBI;
err = kobject_init_and_add(&bi->head.kobj, &zdktype,
&chan->head.kobj, "buffer");
if (err)
goto out_kobj;
snprintf(bi->head.name, ZIO_NAME_LEN, "%s-%s-%d-%d",
zbuf->head.name,
chan->cset->zdev->head.name,
chan->cset->index,
chan->index);
err = __zattr_set_copy(&bi->zattr_set, &zbuf->zattr_set);
if (err)
goto out_clone;
err = zattr_set_create(&bi->head, zbuf->s_op);
if (err)
goto out_sysfs;
init_waitqueue_head(&bi->q);
spin_lock(&zbuf->lock);
list_add(&bi->list, &zbuf->list);
spin_unlock(&zbuf->lock);
bi->cset = chan->cset;
chan->bi = bi;
/* Done. This cset->ti marks everything is running (FIXME?) */
mb();
bi->chan = chan;
return 0;
out_sysfs:
__zattr_set_free(&bi->zattr_set);
out_clone:
kobject_del(&bi->head.kobj);
out_kobj:
kobject_put(&bi->head.kobj);
zbuf->b_op->destroy(bi);
return err;
}
/* Destroy a buffer instance */
static void __buffer_destroy_instance(struct zio_channel *chan)
{
struct zio_buffer_type *zbuf = chan->cset->zbuf;
struct zio_bi *bi = chan->bi;
chan->bi = NULL;
/* Remove from buffer instance list */
spin_lock(&zbuf->lock);
list_del(&bi->list);
spin_unlock(&zbuf->lock);
zattr_set_remove(&bi->head);
__zattr_set_free(&bi->zattr_set);
kobject_del(&bi->head.kobj);
kobject_put(&bi->head.kobj);
/* Finally destroy the instance */
zbuf->b_op->destroy(bi);
}
/* Create a trigger instance according to the trigger type defined in cset */
static int __trigger_create_instance(struct zio_cset *cset)
{
int err;
struct zio_control *ctrl;
struct zio_ti *ti;
pr_debug("%s:%d\n", __func__, __LINE__);
/* Allocate and fill current control as much as possible*/
ctrl = zio_alloc_control(GFP_KERNEL);
if (!ctrl)
ctrl->cset_i = cset->index;
strncpy(ctrl->devname, cset->zdev->head.name, ZIO_NAME_LEN);
strncpy(ctrl->triggername, cset->trig->head.name, ZIO_NAME_LEN);
ctrl->sbits = 8; /* FIXME: retrieve from attribute */
ctrl->ssize = cset->ssize;
ti = cset->trig->t_op->create(cset->trig, cset, ctrl, 0/*FIXME*/);
if (IS_ERR(ti)) {
err = PTR_ERR(ti);
pr_err("%s: can't create trigger error %i\n", __func__, err);
goto out;
}
/* Now fill the trigger instance, ops, head, then the rest */
ti->t_op = cset->trig->t_op;
ti->f_op = cset->trig->f_op;
ti->flags |= cset->flags & ZIO_DIR;
ti->head.zobj_type = ZTI;
err = kobject_init_and_add(&ti->head.kobj, &zdktype,
&cset->head.kobj, "trigger");
if (err)
goto out_kobj;
snprintf(ti->head.name, ZIO_NAME_LEN, "%s-%s-%d",
cset->trig->head.name,
cset->zdev->head.name,
cset->index);
err = __zattr_set_copy(&ti->zattr_set, &cset->trig->zattr_set);
goto out_clone;
err = zattr_set_create(&ti->head, cset->trig->s_op);
if (err)
goto out_sysfs;
/* Add to trigger instance list */
spin_lock(&cset->trig->lock);
list_add(&ti->list, &cset->trig->list);
spin_unlock(&cset->trig->lock);
cset->ti = ti;
/* Done. This cset->ti marks everything is running (FIXME?) */
mb();
ti->cset = cset;
return 0;
out_sysfs:
__zattr_set_free(&ti->zattr_set);
out_clone:
kobject_del(&ti->head.kobj);
out_kobj:
kobject_put(&ti->head.kobj);
ti->t_op->destroy(ti);
out:
zio_free_control(ctrl);
return err;
}
/* Destroy a buffer instance */
static void __trigger_destroy_instance(struct zio_cset *cset)
{
struct zio_ti *ti = cset->ti;
struct zio_control *ctrl = ti->current_ctrl;
cset->ti = NULL;
/* Remove from trigger instance list */
spin_lock(&cset->trig->lock);
list_del(&ti->list);
spin_unlock(&cset->trig->lock);
zattr_set_remove(&ti->head);
__zattr_set_free(&ti->zattr_set);
kobject_del(&ti->head.kobj);
kobject_put(&ti->head.kobj);
/* Finally destroy the instance and free the default control*/
cset->trig->t_op->destroy(ti);
zio_free_control(ctrl);
}
/*
* chan_register registers one channel. It is important to register
* or unregister all the channels of a cset at the same time to prevent
*/
static int chan_register(struct zio_channel *chan)
{
int err;
pr_debug("%s:%d\n", __func__, __LINE__);
if (!chan)
return -EINVAL;
chan->head.zobj_type = ZCHAN;
err = kobject_init_and_add(&chan->head.kobj, &zdktype,
&chan->cset->head.kobj, "chan%i", chan->index);
if (err)
goto out_add;
/* Create sysfs channel attributes */
chan->zattr_set.n_std_attr = ZATTR_STD_NUM_ZDEV;
err = zattr_set_create(&chan->head, chan->cset->zdev->s_op);
/* Check attribute hierarchy */
err = __check_dev_zattr(&chan->cset->zattr_set, &chan->zattr_set);
if (err)
goto out_remove_sys;
err = __check_dev_zattr(&chan->cset->zdev->zattr_set, &chan->zattr_set);
if (err)
goto out_remove_sys;
err = __buffer_create_instance(chan);
if (err)
err = zio_create_chan_devices(chan);
if (err)
goto out_create;
/*
* If no name was assigned, ZIO assigns it. channel name is
* set to the kobject name. kobject name has no length limit,
* so the channel name is the first ZIO_NAME_LEN characters of
* kobject name. A duplicate channel name is not a problem
* anyways.
*/
if (!strlen(chan->head.name))
strncpy(chan->head.name, chan->head.kobj.name, ZIO_NAME_LEN);
return 0;
out_create:
__buffer_destroy_instance(chan);
zattr_set_remove(&chan->head);
out_sysfs:
kobject_del(&chan->head.kobj);
out_add:
/* we must _put even if it returned error */
kobject_put(&chan->head.kobj);
return err;
}
static void chan_unregister(struct zio_channel *chan)
{
pr_debug("%s:%d\n", __func__, __LINE__);
if (!chan)
return;
zio_destroy_chan_devices(chan);
__buffer_destroy_instance(chan);
/* remove sysfs cset attributes */
zattr_set_remove(&chan->head);
kobject_del(&chan->head.kobj);
kobject_put(&chan->head.kobj);
}
/*
* @cset_alloc_chan: low-level drivers can avoid allocating their channels,
* they say how many are there and ZIO allocates them.
* @cset_free_chan: if ZIO allocated channels, then it frees them; otherwise
* it does nothing.
*/
static struct zio_channel *cset_alloc_chan(struct zio_cset *cset)
{
pr_debug("%s:%d\n", __func__, __LINE__);
/*if no static channels, then ZIO must alloc them */
if (cset->chan)
return cset->chan;
/* initialize memory to zero to have correct flags and attrs */
cset->chan = kzalloc(sizeof(struct zio_channel) * cset->n_chan,
GFP_KERNEL);
if (!cset->chan)
return ERR_PTR(-ENOMEM);
cset->flags |= ZCSET_CHAN_ALLOC;
if (!cset->chan_template)
return cset->chan;
cset->chan_template->zattr_set.n_std_attr = ZATTR_STD_NUM_ZDEV;
/* apply template on channels */
for (i = 0; i < cset->n_chan; ++i) {
memcpy(cset->chan + i, cset->chan_template,
sizeof(struct zio_channel));
__zattr_set_copy(&cset->chan->zattr_set,
&cset->chan_template->zattr_set);
}
return cset->chan;
}
static inline void cset_free_chan(struct zio_cset *cset)
{
pr_debug("%s:%d\n", __func__, __LINE__);
/* Only allocated channels need to be freed */
if (cset->flags & ZCSET_CHAN_ALLOC)
kfree(cset->chan);
}
static int cset_register(struct zio_cset *cset)
{
int i, j, err = 0;
struct zio_buffer_type *zbuf;
struct zio_trigger_type *trig;
char *name;
pr_debug("%s:%d\n", __func__, __LINE__);
if (!cset)
return -EINVAL;
if (!cset->n_chan) {
pr_err("ZIO: no channels in cset%i\n", cset->index);
return -EINVAL;
}
if (!cset->ssize) {
pr_err("ZIO: ssize can not be 0 in cset%i\n", cset->index);
return -EINVAL;
}
err = __zio_minorbase_get(cset);
if (err) {
pr_err("ZIO: no minors available\n");
return -EBUSY;
}
cset->head.zobj_type = ZCSET;
err = kobject_init_and_add(&cset->head.kobj, &zdktype,
&cset->zdev->head.kobj, "cset%i", cset->index);
if (err)
goto out_add;
/* Create sysfs cset attributes */
cset->zattr_set.n_std_attr = ZATTR_STD_NUM_ZDEV;