Newer
Older
/* Federico Vaga for CERN, 2011, GNU GPLv2 or later */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#define __ZIO_INTERNAL__
#include <linux/zio.h>
#include <linux/zio-sysfs.h>
#include <linux/zio-buffer.h>
#include <linux/zio-trigger.h>
#define ZOBJ_SYSFS_NAME "name"
#define CSET_SYSFS_BUFFER "current_buffer"
#define CSET_SYSFS_TRIGGER "current_trigger"
static struct zio_status *zstat = &zio_global_status; /* Always use ptr */
const char zio_zdev_attr_names[ZATTR_STD_NUM_ZDEV][ZIO_NAME_LEN] = {
[ZATTR_GAIN] = "gain_factor",
[ZATTR_OFFSET] = "offset",
[ZATTR_NBIT] = "resolution-bits",
[ZATTR_MAXRATE] = "max-sample-rate",
[ZATTR_VREFTYPE] = "vref-src",
EXPORT_SYMBOL(zio_zdev_attr_names);
const char zio_trig_attr_names[ZATTR_STD_NUM_TRIG][ZIO_NAME_LEN] = {
[ZATTR_TRIG_REENABLE] = "re-enable",
[ZATTR_TRIG_NSAMPLES] = "nsamples",
};
EXPORT_SYMBOL(zio_trig_attr_names);
const char zio_zbuf_attr_names[ZATTR_STD_NUM_ZBUF][ZIO_NAME_LEN] = {
[ZATTR_ZBUF_MAXLEN] = "max-buffer-len",
[ZATTR_ZBUF_MAXKB] = "max-buffer-kb",
};
EXPORT_SYMBOL(zio_zbuf_attr_names);
/* buffer instance prototype */
static struct zio_bi *__bi_create_and_init(struct zio_buffer_type *zbuf,
struct zio_channel *chan);
static void __bi_destroy(struct zio_buffer_type *zbuf, struct zio_bi *bi);
static int __bi_register(struct zio_buffer_type *zbuf, struct zio_channel *chan,
struct zio_bi *bi, const char *name);
static void __bi_unregister(struct zio_buffer_type *zbuf, struct zio_bi *bi);
/* trigger instance prototype */
static struct zio_ti *__ti_create_and_init(struct zio_trigger_type *trig,
struct zio_cset *cset);
static void __ti_destroy(struct zio_trigger_type *trig, struct zio_ti *ti);
static int __ti_register(struct zio_trigger_type *trig, struct zio_cset *cset,
struct zio_ti *ti, const char *name);
static void __ti_unregister(struct zio_trigger_type *trig, struct zio_ti *ti);
/*
* Top-level ZIO objects has a unique name.
* You can find a particular object by searching its name.
*/
static inline struct zio_object_list_item *__find_by_name(
struct zio_object_list *zobj_list, char *name)
{
struct zio_object_list_item *cur;
if (!name)
return NULL;
list_for_each_entry(cur, &zobj_list->list, list) {
pr_debug("%s:%d %s=%s\n", __func__, __LINE__, cur->name, name);
if (strcmp(cur->name, name) == 0)
return cur; /* object found */
static inline struct zio_object_list_item *__zio_object_get(
struct zio_object_list *zobj_list, char *name)
struct zio_object_list_item *list_item;
/* search for default trigger */
list_item = __find_by_name(zobj_list, name);
if (!list_item)
/* increment trigger usage to prevent rmmod */
if (!try_module_get(list_item->owner))
return NULL;
return list_item;
}
static struct zio_buffer_type *zio_buffer_get(char *name)
{
struct zio_object_list_item *list_item;
list_item = __zio_object_get(&zstat->all_buffer_types, name);
return container_of(list_item->obj_head, struct zio_buffer_type, head);
static inline void zio_buffer_put(struct zio_buffer_type *zbuf)
{
pr_debug("%s:%d %p\n", __func__, __LINE__, zbuf->owner);
module_put(zbuf->owner);
}
static struct zio_trigger_type *zio_trigger_get(char *name)
struct zio_object_list_item *list_item;
list_item = __zio_object_get(&zstat->all_trigger_types, name);
return ERR_PTR(-ENODEV);
return container_of(list_item->obj_head, struct zio_trigger_type, head);
static inline void zio_trigger_put(struct zio_trigger_type *trig)
{
pr_debug("%s:%d %p\n", __func__, __LINE__, trig->owner);
module_put(trig->owner);
}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/* data_done is called by the driver, after {in,out}put_cset */
void zio_generic_data_done(struct zio_cset *cset)
{
struct zio_buffer_type *zbuf;
struct zio_device *zdev;
struct zio_channel *chan;
struct zio_block *block;
struct zio_ti *ti;
struct zio_bi *bi;
pr_debug("%s:%d\n", __func__, __LINE__);
ti = cset->ti;
zdev = cset->zdev;
zbuf = cset->zbuf;
if (unlikely((ti->flags & ZIO_DIR) == ZIO_DIR_OUTPUT)) {
cset_for_each(cset, chan) {
bi = chan->bi;
block = chan->active_block;
if (block)
zbuf->b_op->free_block(chan->bi, block);
/* We may have a new block ready, or not */
chan->active_block = zbuf->b_op->retr_block(chan->bi);
}
}
/* DIR_INPUT */
cset_for_each(cset, chan) {
bi = chan->bi;
block = chan->active_block;
if (!block)
continue;
if (zbuf->b_op->store_block(bi, block)) /* may fail, no prob */
zbuf->b_op->free_block(bi, block);
}
spin_lock(&cset->lock);
ti->flags &= (~ZTI_BUSY); /* Reset busy, now is idle */
spin_unlock(&cset->lock);
}
EXPORT_SYMBOL(zio_generic_data_done);
static void __zio_fire_input_trigger(struct zio_ti *ti)
{
struct zio_buffer_type *zbuf;
struct zio_block *block;
struct zio_device *zdev;
struct zio_cset *cset;
struct zio_channel *chan;
struct zio_control *ctrl;
cset = ti->cset;
zdev = cset->zdev;
zbuf = cset->zbuf;
pr_debug("%s:%d\n", __func__, __LINE__);
/* Allocate the buffer for the incoming sample, in active channels */
cset_for_each(cset, chan) {
ctrl = zio_alloc_control(GFP_ATOMIC);
if (!errdone++)
pr_err("%s: can't alloc control\n", __func__);
continue;
}
memcpy(ctrl, ti->current_ctrl, ZIO_CONTROL_SIZE);
ctrl->chan_i = chan->index;
block = zbuf->b_op->alloc_block(chan->bi, ctrl,
ctrl->ssize * ctrl->nsamples,
GFP_ATOMIC);
if (IS_ERR(block)) {
/* Remove the following print, it's common */
if (0 && !errdone++)
pr_err("%s: can't alloc block\n", __func__);
zio_free_control(ctrl);
chan->active_block = block;
}
if (!cset->raw_io(cset)) {
/* It succeeded immediately */
ti->t_op->data_done(cset);
static void __zio_fire_output_trigger(struct zio_ti *ti)
struct zio_cset *cset = ti->cset;
pr_debug("%s:%d\n", __func__, __LINE__);
/* We are expected to already have a block in active channels */
if (!cset->raw_io(cset)) {
/* It succeeded immediately */
ti->t_op->data_done(cset);
}
}
/*
* When a software trigger fires, it should call this function. Hw ones don't
*/
void zio_fire_trigger(struct zio_ti *ti)
{
/* If the trigger runs too early, ti->cset is still NULL */
if (!ti->cset)
/* check if trigger is disabled or previous fire is still running */
if (unlikely((ti->flags & ZIO_STATUS) == ZIO_DISABLED ||
(ti->flags & ZTI_BUSY)))
spin_lock(&ti->cset->lock);
ti->flags |= ZTI_BUSY;
spin_unlock(&ti->cset->lock);
/* Copy the stamp (we are software driven anyways) */
ti->current_ctrl->tstamp.secs = ti->tstamp.tv_sec;
ti->current_ctrl->tstamp.ticks = ti->tstamp.tv_nsec;
ti->current_ctrl->tstamp.bins = ti->tstamp_extra;
/*
* And the sequence number too (first returned seq is 1).
* Sequence number is always increased to identify un-stored
* blocks or other errors in trigger activation.
*/
ti->current_ctrl->seq_num++;
if (likely((ti->flags & ZIO_DIR) == ZIO_DIR_INPUT))
__zio_fire_input_trigger(ti);
else
__zio_fire_output_trigger(ti);
}
EXPORT_SYMBOL(zio_fire_trigger);
static int __has_auto_index(char *s)
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
{
int i = 0;
for (i = 0; i < ZIO_NAME_LEN-1; i++) {
if (s[i] != '%')
continue;
i++;
if (s[i] == 'd')
return 1;
}
return 0;
}
static int __next_strlen(char *str)
{
int increment = 0, i;
for (i = strlen(str)-1; i > 0; i--) {
/* if is an ascii number */
if (str[i] >= '0' && str[i] <= '9') {
if (str[i] == '9')
continue;
else
break;
} else {
increment++;
break;
}
}
return strlen(str) + increment;
}
/*
* The zio device name must be unique. If it is not unique, a busy error is
* returned.
*/
static int zobj_unique_name(struct zio_object_list *zobj_list, char *name)
{
struct zio_object_list_item *cur;
struct zio_obj_head *tmp;
unsigned int counter = 0, again, len;
char name_to_check[ZIO_NAME_LEN];
int auto_index = __has_auto_index(name);
pr_debug("%s\n", __func__);
if (!name)
return -EINVAL;
len = strlen(name);
if (!len)
return -EINVAL;
strncpy(name_to_check, name, ZIO_NAME_LEN);
do {
again = 0;
if (auto_index) { /* TODO when zio become bus, it is useless */
sprintf(name_to_check, name, counter++);
len = strlen(name_to_check);
}
list_for_each_entry(cur, &zobj_list->list, list) {
tmp = cur->obj_head;
if (strcmp(tmp->name, name_to_check))
continue; /* no conflict */
/* conflict found */
/* if not auto-assigned, then error */
if (!auto_index) {
pr_err("ZIO: name \"%s\" is already taken\n",
name);
return -EBUSY;
}
if (__next_strlen(name_to_check) > ZIO_NAME_LEN) {
pr_err("ZIO: invalid name \"%s\"\n", name);
return -EINVAL;
}
again = 1;
break;
}
} while (again);
strncpy(name, name_to_check, ZIO_NAME_LEN);
return 0;
}
static struct zio_attribute *__zattr_clone(const struct zio_attribute *src,
unsigned int n)
{
struct zio_attribute *dest = NULL;
unsigned int size;
if (!src)
return NULL;
size = n * sizeof(struct zio_attribute);
dest = kmalloc(size, GFP_KERNEL);
if (!dest)
dest = memcpy(dest, src, size);
return dest;
}
static void __zattr_unclone(struct zio_attribute *zattr)
{
kfree(zattr);
}
static int __zattr_set_copy(struct zio_attribute_set *dest,
struct zio_attribute_set *src)
{
if (!dest || !src)
return -EINVAL;
dest->n_std_attr = src->n_std_attr;
dest->n_ext_attr = src->n_ext_attr;
dest->std_zattr = __zattr_clone(src->std_zattr, dest->n_std_attr);
dest->ext_zattr = __zattr_clone(src->ext_zattr, dest->n_ext_attr);
return 0;
}
static void __zattr_set_free(struct zio_attribute_set *zattr_set)
{
if (!zattr_set)
return;
__zattr_unclone(zattr_set->ext_zattr);
__zattr_unclone(zattr_set->std_zattr);
}
/* When touching attributes, we always use the spinlock for the hosting dev */
static spinlock_t *__get_spinlock(struct zio_obj_head *head)
{
spinlock_t *lock;
switch (head->zobj_type) {
case ZDEV:
lock = &to_zio_dev(&head->kobj)->lock;
break;
case ZCSET:
lock = &to_zio_cset(&head->kobj)->zdev->lock;
break;
case ZCHAN:
lock = &to_zio_chan(&head->kobj)->cset->zdev->lock;
break;
case ZTI: /* we might not want to take a lock but... */
lock = &to_zio_ti(&head->kobj)->cset->zdev->lock;
lock = &to_zio_bi(&head->kobj)->cset->zdev->lock;
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
return NULL;
}
return lock;
}
/* Retrieve an attribute set from an object head */
static struct zio_attribute_set *__get_zattr_set(struct zio_obj_head *head)
{
struct zio_attribute_set *zattr_set;
switch (head->zobj_type) {
case ZDEV:
zattr_set = &to_zio_dev(&head->kobj)->zattr_set;
break;
case ZCSET:
zattr_set = &to_zio_cset(&head->kobj)->zattr_set;
break;
case ZCHAN:
zattr_set = &to_zio_chan(&head->kobj)->zattr_set;
break;
zattr_set = &to_zio_trig(&head->kobj)->zattr_set;
break;
case ZBUF:
zattr_set = &to_zio_buf(&head->kobj)->zattr_set;
case ZTI:
zattr_set = &to_zio_ti(&head->kobj)->zattr_set;
break;
case ZBI:
zattr_set = &to_zio_bi(&head->kobj)->zattr_set;
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
return NULL;
}
return zattr_set;
}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
/* Retrieve flag from an object head */
static unsigned long *__get_flag(struct zio_obj_head *head)
{
unsigned long *flags;
switch (head->zobj_type) {
case ZDEV:
flags = &to_zio_dev(&head->kobj)->flags;
break;
case ZCSET:
flags = &to_zio_cset(&head->kobj)->flags;
break;
case ZCHAN:
flags = &to_zio_chan(&head->kobj)->flags;
break;
case ZTRIG:
flags = &to_zio_chan(&head->kobj)->flags;
break;
case ZBUF:
flags = &to_zio_chan(&head->kobj)->flags;
break;
case ZTI:
flags = &to_zio_ti(&head->kobj)->flags;
break;
case ZBI:
flags = &to_zio_bi(&head->kobj)->flags;
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
return NULL;
}
return flags;
}
static int zio_change_current_trigger(struct zio_cset *cset, char *name)
{
struct zio_trigger_type *trig, *trig_old = cset->trig;
struct zio_ti *ti, *ti_old = cset->ti;
int err;
pr_debug("%s\n", __func__);
spin_lock(&cset->lock);
if (ti_old->flags & ZTI_BUSY) {
spin_unlock(&cset->lock);
return -EBUSY;
}
/* Set ti BUSY, so it cannot fire */
ti_old->flags |= ZTI_BUSY;
spin_unlock(&cset->lock);
if (strlen(name) > ZIO_OBJ_NAME_LEN)
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
return -EINVAL; /* name too long */
if (unlikely(strcmp(name, trig_old->head.name) == 0))
return 0; /* is the current trigger */
/* get the new trigger */
trig = zio_trigger_get(name);
if (IS_ERR(trig))
return PTR_ERR(trig);
/* Create and register the new trigger instance */
ti = __ti_create_and_init(trig, cset);
if (IS_ERR(ti)) {
err = PTR_ERR(ti);
goto out;
}
err = __ti_register(trig, cset, ti, "trigger-tmp");
if (err)
goto out_reg;
/* New ti successful created, remove the old ti */
__ti_unregister(trig_old, ti_old);
__ti_destroy(trig_old, ti_old);
zio_trigger_put(trig_old);
/* Set new trigger*/
mb();
cset->trig = trig;
/* Rename trigger-tmp to trigger */
err = kobject_rename(&ti->head.kobj, "trigger");
if (err)
WARN(1, "%s: cannot rename trigger folder for"
" cset%d\n", __func__, cset->index);
return 0;
out_reg:
__ti_destroy(trig, ti);
out:
zio_trigger_put(trig);
return err;
}
static int zio_change_current_buffer(struct zio_cset *cset, char *name)
{
struct zio_buffer_type *zbuf, *zbuf_old = cset->zbuf;
struct zio_bi **bi_vector;
int i, j, err;
pr_debug("%s\n", __func__);
if (strlen(name) > ZIO_OBJ_NAME_LEN)
return -EINVAL; /* name too long */
if (unlikely(strcmp(name, cset->zbuf->head.name) == 0))
return 0; /* is the current buffer */
zbuf = zio_buffer_get(name);
if (IS_ERR(zbuf))
return PTR_ERR(zbuf);
bi_vector = kzalloc(sizeof(struct zio_bi *) * cset->n_chan,
GFP_KERNEL);
if (!bi_vector) {
err = -ENOMEM;
goto out;
}
/* Create a new buffer instance for each channel of the cset */
for (i = 0; i < cset->n_chan; ++i) {
bi_vector[i] = __bi_create_and_init(zbuf, &cset->chan[i]);
if (IS_ERR(bi_vector[i])) {
pr_err("%s can't create buffer instance\n", __func__);
err = PTR_ERR(bi_vector[i]);
goto out_create;
}
err = __bi_register(zbuf, &cset->chan[i], bi_vector[i],
"buffer-tmp");
if (err) {
pr_err("%s can't register buffer instance\n", __func__);
__bi_destroy(zbuf, bi_vector[i]);
goto out_create;
}
}
for (i = 0; i < cset->n_chan; ++i) {
/* Delete old buffer instance */
__bi_unregister(zbuf_old, cset->chan[i].bi);
__bi_destroy(zbuf_old, cset->chan[i].bi);
/* Assign new buffer instance */
cset->chan[i].bi = bi_vector[i];
/* Rename buffer-tmp to trigger */
err = kobject_rename(&cset->chan[i].bi->head.kobj, "buffer");
if (err)
WARN(1, "%s: cannot rename buffer folder for"
" cset%d:chan%d\n", __func__, cset->index, i);
kfree(bi_vector);
cset->zbuf = zbuf;
zio_buffer_put(zbuf_old);
return 0;
for (j = i-1; j >= 0; --j) {
__bi_unregister(zbuf, bi_vector[j]);
__bi_destroy(zbuf, bi_vector[j]);
kfree(bi_vector);
out:
zio_buffer_put(zbuf);
return err;
}
static inline void __zattr_valcpy(struct zio_ctrl_attr *ctrl,
enum zattr_flags flags,
int index, uint32_t value)
pr_debug("%s\n", __func__);
if ((flags & ZATTR_TYPE) == ZATTR_TYPE_EXT)
ctrl->ext_val[index] = value;
else
ctrl->std_val[index] = value;
}
static void __zattr_propagate_value(struct zio_obj_head *head,
struct zio_attribute *zattr)
{
int i, j, index, value, flags;
struct zio_ti *ti;
struct zio_device *zdev;
struct zio_cset *cset;
struct zio_channel *chan;
pr_debug("%s\n", __func__);
index = zattr->index;
value = zattr->value;
flags = zattr->flags;
switch (head->zobj_type) {
case ZDEV:
zdev = to_zio_dev(&head->kobj);
for (i = 0; i < zdev->n_cset; ++i) {
cset = &zdev->cset[i];
for (j = 0; j < cset->n_chan; ++j)
__zattr_valcpy(&cset->chan[j].zattr_val,
flags, index, value);
}
break;
case ZCSET:
cset = to_zio_cset(&head->kobj);
for (i = 0; i < cset->n_chan; ++i)
__zattr_valcpy(&cset->chan[i].zattr_val,
flags, index, value);
break;
case ZCHAN:
chan = to_zio_chan(&head->kobj);
__zattr_valcpy(&chan->zattr_val, flags, index, value);
break;
case ZTI:
ti = to_zio_ti(&head->kobj);
__zattr_valcpy(&ti->zattr_val, flags, index, value);
break;
default:
return;
}
}
static void __zattr_init_ctrl(struct zio_obj_head *head,
struct zio_attribute_set *zattr_set)
{
int i;
pr_debug("%s\n", __func__);
/* copy standard attribute default value */
for (i = 0; i < zattr_set->n_std_attr; ++i)
__zattr_propagate_value(head, &zattr_set->std_zattr[i]);
/* copy extended attribute default value */
for (i = 0; i < zattr_set->n_ext_attr; ++i)
__zattr_propagate_value(head, &zattr_set->ext_zattr[i]);
}
/*
* Zio objects all handle uint32_t values. So the show and store
* are centralized here, and each device has its own get_info and set_conf
* which handle binary 32-bit numbers. Both the function are locked to prevent
* concurrency issue when editing device register.
static ssize_t zattr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
int err = 0;
ssize_t len = 0;
spinlock_t *lock;
struct zio_attribute *zattr = to_zio_zattr(attr);
pr_debug("%s\n", __func__);
/* print device name */
if (unlikely(strcmp(attr->name, ZOBJ_SYSFS_NAME) == 0))
return sprintf(buf, "%s\n", to_zio_head(kobj)->name);
/* print current trigger name */
if (unlikely(strcmp(attr->name, CSET_SYSFS_TRIGGER) == 0))
return sprintf(buf, "%s\n", to_zio_cset(kobj)->trig->head.name);
/* print current buffer name */
if (unlikely(strcmp(attr->name, CSET_SYSFS_BUFFER) == 0))
return sprintf(buf, "%s\n", to_zio_cset(kobj)->zbuf->head.name);
/* print current enable status */
if (unlikely(strcmp(attr->name, ZOBJ_SYSFS_ENABLE) == 0))
return sprintf(buf, "%d\n",
!((*__get_flag(to_zio_head(kobj))) & ZIO_DISABLED));
if (zattr->s_op->info_get) {
lock = __get_spinlock(to_zio_head(kobj));
err = zattr->s_op->info_get(kobj, zattr, &zattr->value);
spin_unlock(lock);
if (err)
return err;
}
len = sprintf(buf, "%i\n", zattr->value);
return len;
}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
/* enable/disable a zio object*/
static void __zobj_enable(struct kobject *kobj, unsigned int enable,
unsigned int need_lock)
{
unsigned long *flags;
int i, status;
struct zio_obj_head *head;
struct zio_device *zdev;
struct zio_cset *cset;
struct zio_ti *ti;
spinlock_t *lock;
pr_debug("%s\n", __func__);
head = to_zio_head(kobj);
/* lock object if needed */
lock = __get_spinlock(head);
if (need_lock)
spin_lock(lock);
flags = __get_flag(to_zio_head(kobj));
status = !((*flags) & ZIO_STATUS);
/* if the status is not changing */
if (!(enable ^ status)) {
goto out;
}
/* change status */
*flags = (*flags | ZIO_STATUS) & status;
switch (head->zobj_type) {
case ZDEV:
pr_debug("%s: zdev\n", __func__);
zdev = to_zio_dev(kobj);
/* enable/disable all cset */
for (i = 0; i < zdev->n_cset; ++i) {
__zobj_enable(&zdev->cset[i].head.kobj, enable, 0);
}
/* device callback */
break;
case ZCSET:
pr_debug("%s: zcset\n", __func__);
cset = to_zio_cset(kobj);
/* enable/disable trigger instance */
__zobj_enable(&cset->ti->head.kobj, enable, 1);
/* enable/disable all channel*/
for (i = 0; i < cset->n_chan; ++i) {
__zobj_enable(&cset->chan[i].head.kobj, enable, 0);
}
/* cset callback */
break;
case ZCHAN:
pr_debug("%s: zchan\n", __func__);
/* channel callback */
break;
case ZTI:
pr_debug("%s: zti\n", __func__);
ti = to_zio_ti(kobj);
/* if trigger is running, abort it*/
if (*flags & ZTI_BUSY)
if(ti->t_op->abort)
ti->t_op->abort(ti->cset);
/* trigger instance callback */
if (ti->t_op->change_status) {
pr_debug("%s:%d\n", __func__, __LINE__);
ti->t_op->change_status(ti, status);
}
break;
/* following objects can't be enabled/disabled */
case ZBUF:
case ZTRIG:
case ZBI:
pr_debug("%s: others\n", __func__);
/* buffer instance callback */
break;
default:
WARN(1, "ZIO: unknown zio object %i\n", head->zobj_type);
}
out:
if (need_lock)
spin_unlock(lock);
}
static ssize_t zattr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
long val;
int err = 0;
char buf_tmp[ZIO_OBJ_NAME_LEN];
struct zio_attribute *zattr = to_zio_zattr(attr);
spinlock_t *lock;
pr_debug("%s\n", __func__);
if (unlikely(strcmp(attr->name, CSET_SYSFS_TRIGGER) == 0)) {
if (strlen(buf) > ZIO_OBJ_NAME_LEN + 1)
return -EINVAL; /* name too long */
sscanf(buf, "%s\n", buf_tmp);
err = zio_change_current_trigger(to_zio_cset(kobj), buf_tmp);
return err == 0 ? size : err;
}
/* change current buffer */
if (unlikely(strcmp(attr->name, CSET_SYSFS_BUFFER) == 0)) {
if (strlen(buf) > ZIO_OBJ_NAME_LEN + 1)
return -EINVAL; /* name too long */
sscanf(buf, "%s\n", buf_tmp);
err = zio_change_current_buffer(to_zio_cset(kobj), buf_tmp);
return err == 0 ? size : err;
}
err = strict_strtol(buf, 0, &val);
/* change enable status */
if (unlikely(strcmp(attr->name, ZOBJ_SYSFS_ENABLE) == 0 &&
(val == 0 || val == 1))) {
__zobj_enable(kobj, val, 1);
return size;
}
/* device attributes */
if (zattr->s_op->conf_set) {
lock = __get_spinlock(to_zio_head(kobj));
err = zattr->s_op->conf_set(kobj, zattr, (uint32_t)val);
if (err) {
spin_unlock(lock);
return err;
}
zattr->value = (uint32_t)val;
__zattr_propagate_value(to_zio_head(kobj), zattr);
}
static const struct sysfs_ops zio_attribute_ktype_ops = {
.show = zattr_show,
.store = zattr_store,
static struct attribute default_cset_attrs[] = {
{ /* show the name */
.name = ZOBJ_SYSFS_NAME,
.mode = 0444, /* read only */
},
{
/* enable/disable object */
.name = ZOBJ_SYSFS_ENABLE,
.mode = 0666, /* read write */
},
{ /* get/set trigger */
.name = CSET_SYSFS_TRIGGER,
.mode = 0666, /* read write */
},
{ /* get/set buffer */
.name = CSET_SYSFS_BUFFER,
.mode = 0666, /* read write */
},
};
static struct attribute *def_cset_attr_ptr[] = {
&default_cset_attrs[0],
&default_cset_attrs[1],
&default_cset_attrs[2],
NULL,
};
static struct kobj_type zdkctype = { /* only for cset */
.release = NULL,
.sysfs_ops = &zio_attribute_ktype_ops,
.default_attrs = def_cset_attr_ptr,
};
static struct attribute default_attrs[] = {
{ /* show the name */
.name = ZOBJ_SYSFS_NAME,
.mode = 0444, /* read only */
},
{ /* enable/disable object */
.name = ZOBJ_SYSFS_ENABLE,
.mode = 0666, /* read write */
},
static struct attribute *def_attr_ptr[] = {
static struct kobj_type zdktype = { /* for all the other object */
.release = NULL,
.sysfs_ops = &zio_attribute_ktype_ops,
.default_attrs = def_attr_ptr,
};
static int __check_dev_zattr(struct zio_attribute_set *parent,
struct zio_attribute_set *this)
{
int i, j;
pr_debug("%s\n", __func__);
/* verify standard attribute */
for (i = 0; i < this->n_std_attr; ++i) {
if (this->std_zattr[i].index == ZATTR_INDEX_NONE)
continue; /* next attribute */
for (j = 0; j < parent->n_std_attr; ++j) {
/*
* a standard attribute must be unique from a child
* to the root. This allow to create a consistent
* vector of value in control structure
*/
if (this->std_zattr[i].index ==
parent->std_zattr[j].index) {
pr_err("ZIO: attribute conflict for %s\n",
this->std_zattr[i].attr.name);
return -EINVAL;
}
}
}
return 0;
}
static int __check_attr(struct attribute *attr,
const struct zio_sysfs_operations *s_op)
{
/* check name*/
if (!attr->name)
return -EINVAL;
/* check mode */
if ((attr->mode & S_IWUGO) == S_IWUGO && !s_op->conf_set) {
pr_err("ZIO: %s: attribute %s has write permission but "
"no write function\n", __func__, attr->name);
return -ENOSYS;
}
return 0;
}
/* create a set of zio attributes: the standard one and the extended one */
static int zattr_set_create(struct zio_obj_head *head,
const struct zio_sysfs_operations *s_op)
int n_attr, i, j, attr_count = 0, err = 0;
struct zio_attribute_set *zattr_set;
struct attribute_group *group;
pr_debug("%s\n", __func__);
zattr_set = __get_zattr_set(head);
if (!zattr_set)
return -EINVAL; /* message already printed */
group = &zattr_set->group;
n_attr = (zattr_set->n_std_attr + zattr_set->n_ext_attr);
if (!n_attr || (!zattr_set->std_zattr && !zattr_set->ext_zattr)) {
zattr_set->n_std_attr = 0;
zattr_set->n_ext_attr = 0;
return 0;
}
group->attrs = kzalloc(sizeof(struct attribute) * n_attr, GFP_KERNEL);
if (!group->attrs)
return -ENOMEM;
for (i = 0; i < zattr_set->n_std_attr; ++i) {
err = __check_attr(&zattr_set->std_zattr[i].attr, s_op);
switch (err) {
case 0:
/* valid attribute */
group->attrs[attr_count++] =
&zattr_set->std_zattr[i].attr;
zattr_set->std_zattr[i].s_op = s_op;
zattr_set->std_zattr[i].index = i;
break;
case -EINVAL: /* unused std attribute */
zattr_set->std_zattr[i].index = ZATTR_INDEX_NONE;
break;
default:
return err;
ext:
if (!zattr_set->ext_zattr)
goto out;
for (j = 0; j < zattr_set->n_ext_attr; ++j) {
err = __check_attr(&zattr_set->ext_zattr[j].attr, s_op);
if (err)
return err;