Commit 76f3fcdc authored by Federico Vaga's avatar Federico Vaga

minor changes on buffer management

parents 14923d2d df8df06a
......@@ -75,14 +75,17 @@ static int zbk_conf_set(struct device *dev, struct zio_attribute *zattr,
uint32_t usr_val)
{
struct zio_bi *bi = to_zio_bi(dev);
struct zio_ti *ti = NULL;
struct zbk_instance *zbki = to_zbki(bi);
struct zio_block *block;
unsigned long flags, bflags;
unsigned long flags, bflags, tflags;
void *data;
int ret = 0;
switch (zattr->id) {
case ZIO_ATTR_ZBUF_MAXKB:
if (usr_val == zattr->value)
return 0; /* nothing to do */
/* Lock and disable */
spin_lock_irqsave(&bi->lock, flags);
if (atomic_read(&zbki->map_count)) {
......@@ -93,6 +96,14 @@ static int zbk_conf_set(struct device *dev, struct zio_attribute *zattr,
bi->flags |= ZIO_DISABLED;
spin_unlock_irqrestore(&bi->lock, flags);
/*
* Disable trigger while resizing buffer to avoid
* problems with blocks that point to a different
* vmalloc() area
*/
ti = bi->cset->ti;
tflags = zio_trigger_abort_disable(ti->cset, 1);
/* Flush the buffer */
while ((block = bi->b_op->retr_block(bi)))
bi->b_op->free_block(bi, block);
......@@ -109,10 +120,18 @@ static int zbk_conf_set(struct device *dev, struct zio_attribute *zattr,
} else {
ret = -ENOMEM;
}
/* Lock and restore flags */
spin_lock_irqsave(&bi->lock, flags);
bi->flags = bflags;
spin_unlock_irqrestore(&bi->lock, flags);
/* Restore trigger */
if (ti && ((tflags & ZIO_STATUS) == ZIO_ENABLED))
ti->flags = (ti->flags & ~ZIO_STATUS) | ZIO_ENABLED;
if (ti && (tflags & ZIO_TI_ARMED))
zio_arm_trigger(ti);
return ret;
case ZBK_ATTR_MERGE_DATA:
......
......@@ -303,7 +303,7 @@ static int zio_can_r_ctrl(struct zio_f_priv *priv)
/* Control: if not yet done, we can read */
if (chan->user_block) {
if (zio_is_cdone(chan->user_block)) {
bi->b_op->free_block(bi, chan->user_block);
zio_buffer_free_block(bi, chan->user_block);
chan->user_block = NULL;
} else{
mutex_unlock(&chan->user_lock);
......@@ -380,7 +380,7 @@ static int zio_can_w_ctrl(struct zio_f_priv *priv)
if (ctrl->nsamples)
zio_buffer_store_block(bi, block);
else
chan->bi->b_op->free_block(chan->bi, block);
zio_buffer_free_block(chan->bi, block);
block = NULL;
}
/* if no block is there, get a new one */
......@@ -482,7 +482,7 @@ static ssize_t zio_generic_read(struct file *f, char __user *ubuf,
block->uoff += count;
if (block->uoff == block->datalen) {
chan->user_block = NULL;
bi->b_op->free_block(bi, block);
zio_buffer_free_block(bi, block);
}
}
mutex_unlock(&chan->user_lock);
......@@ -628,7 +628,7 @@ static int zio_generic_release(struct inode *inode, struct file *f)
mutex_lock(&chan->user_lock);
if (atomic_read(&chan->bi->use_count) == 1 && chan->user_block) {
chan->bi->b_op->free_block(chan->bi, block);
zio_buffer_free_block(chan->bi, block);
chan->user_block = NULL;
}
mutex_unlock(&chan->user_lock);
......
......@@ -24,8 +24,7 @@ static void __zio_internal_abort_free(struct zio_cset *cset)
chan_for_each(chan, cset) {
block = chan->active_block;
if (block)
cset->zbuf->b_op->free_block(chan->bi, block);
zio_buffer_free_block(chan->bi, block);
chan->active_block = NULL;
}
}
......
......@@ -117,7 +117,7 @@ struct zio_buffer_operations {
struct zio_block * (*retr_block) (struct zio_bi *bi);
int (*store_block)(struct zio_bi *bi,
struct zio_block *block);
/* Create returns ERR_PTR on error */
struct zio_bi * (*create)(struct zio_buffer_type *zbuf,
struct zio_channel *chan);
......@@ -172,6 +172,8 @@ static inline void zio_buffer_store_block(struct zio_bi *bi, struct zio_block *b
static inline int zio_buffer_free_block(struct zio_bi *bi,
struct zio_block *block)
{
if (unlikely(!block))
return -1;
bi->b_op->free_block(bi, block);
return 0;
......
......@@ -170,7 +170,7 @@ static inline int zio_generic_data_done(struct zio_cset *cset)
}
if (unlikely((ti->flags & ZIO_DIR) == ZIO_DIR_OUTPUT)) {
zbuf->b_op->free_block(chan->bi, block);
zio_buffer_free_block(chan->bi, block);
} else { /* DIR_INPUT */
memcpy(zio_get_ctrl(block), ctrl,
zio_control_size(chan));
......
......@@ -757,7 +757,8 @@ static ssize_t zio_buf_flush(struct device *dev,
bi->flags |= ZIO_DISABLED;
spin_unlock_irqrestore(&bi->lock, flags);
/* Flushing blocks. It does not use helpers to prevent deadlock */
/* Flushing blocks. Buffer helpers may use locks, so we not use helpers
here to prevent deadlock */
while ((block = bi->b_op->retr_block(bi))) {
dev_dbg(dev, "Flushing ... (%d)\n", n++);
bi->b_op->free_block(bi, block);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment