Commit 59467962 authored by Federico Vaga's avatar Federico Vaga

sw:drv: locking simplification

Reduce the number of spinlocks around and play safe
Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent c0c88b6e
......@@ -145,7 +145,7 @@ struct trtl_hmq_slot {
* @base_out: base address for outgoing HMQ
* @head list_msg_input: list of messages to input slot
* @n_input: number of messages in the list
* @lock: to protect list read/write
* @lock: to protect the users list
* @mtx: to protect operations on the HMQ
* @q_msg: wait queue for messages
* @list_usr: list of consumer of the output slot
......@@ -181,12 +181,15 @@ struct trtl_hmq {
* It describes the consumer of the output slot
* @list: to keep it in our local queue
* @hmq: reference to opened HMQ
* @lock: to protect list read/write
* @lock: to protect filters
* @list_filters: list of filters to apply
* @n_filters: number of filters
* @lock_filter: to protect filter list read/write
* @ptr_r: read pointer for the message circular buffer
* @idx_r:
* @idx_r: index read pointer for the message circular buffer. This is
* protected by the input buffer lock. Accessing this field means that
* you are accessing the buffer input, and in order to do that you need
* to get the input buffer spinlock. So, we are protecting this
* variable as well.
*/
struct trtl_hmq_user {
struct list_head list;
......@@ -194,7 +197,6 @@ struct trtl_hmq_user {
spinlock_t lock;
struct list_head list_filters;
unsigned int n_filters;
spinlock_t lock_filter;
unsigned int ptr_r;
unsigned int idx_r;
};
......
......@@ -282,6 +282,8 @@ static void trtl_hmq_irq_disable(struct trtl_hmq *hmq, unsigned int is_output)
static void trtl_hmq_flush(struct trtl_hmq *hmq)
{
struct trtl_hmq_user *usr, *tmp;
struct mturtle_hmq_buffer *buf;
unsigned long flags;
mutex_lock(&hmq->mtx); /* Block user activity while flushing */
......@@ -289,20 +291,20 @@ static void trtl_hmq_flush(struct trtl_hmq *hmq)
trtl_hmq_purge(hmq);
/* Resets all the indexes */
spin_lock(&hmq->buf_in.lock);
hmq->buf_in.idx_w = 0;
hmq->buf_in.idx_r = 0;
buf = &hmq->buf_in;
spin_lock_irqsave(&buf->lock, flags);
buf->idx_w = 0;
buf->idx_r = 0;
list_for_each_entry_safe(usr, tmp, &hmq->list_usr, list) {
spin_lock(&usr->lock);
usr->idx_r = 0;
spin_unlock(&usr->lock);
}
spin_unlock(&hmq->buf_in.lock);
spin_unlock_irqrestore(&buf->lock, flags);
spin_lock(&hmq->buf_out.lock);
hmq->buf_out.idx_w = 0;
hmq->buf_out.idx_r = 0;
spin_unlock(&hmq->buf_out.lock);
buf = &hmq->buf_out;
spin_lock_irqsave(&buf->lock, flags);
buf->idx_w = 0;
buf->idx_r = 0;
spin_unlock_irqrestore(&buf->lock, flags);
mutex_unlock(&hmq->mtx);
}
......@@ -310,6 +312,10 @@ static void trtl_hmq_flush(struct trtl_hmq *hmq)
/**
* It applies filters on a given message.
* @user HMQ user instance from which we take filters
* @msg the message to filter
*
* Return: 1 if the message passes all filters, otherwise 0
*/
static int trtl_hmq_filter_check(struct trtl_hmq_user *user,
struct trtl_msg *msg)
......@@ -317,8 +323,9 @@ static int trtl_hmq_filter_check(struct trtl_hmq_user *user,
struct trtl_msg_filter_element *fltel, *tmp;
unsigned int passed = 1;
uint32_t word, *data = msg->data, *head = (uint32_t *)&msg->hdr;
unsigned long flags;
spin_lock(&user->lock_filter);
spin_lock_irqsave(&user->lock, flags);
list_for_each_entry_safe(fltel, tmp, &user->list_filters, list) {
/* If one of the previous filter failed, then stop */
if (!passed)
......@@ -350,7 +357,7 @@ static int trtl_hmq_filter_check(struct trtl_hmq_user *user,
break;
}
}
spin_unlock(&user->lock_filter);
spin_unlock_irqrestore(&user->lock, flags);
return passed;
}
......@@ -514,6 +521,7 @@ static int trtl_hmq_open(struct inode *inode, struct file *file)
struct trtl_hmq_user *user;
struct trtl_hmq *hmq;
int m = iminor(inode);
unsigned long flags;
hmq = to_trtl_hmq(minors[m]);
......@@ -523,20 +531,18 @@ static int trtl_hmq_open(struct inode *inode, struct file *file)
user->hmq = hmq;
spin_lock_init(&user->lock);
spin_lock_init(&user->lock_filter);
INIT_LIST_HEAD(&user->list_filters);
/* Add new user to the list */
spin_lock(&hmq->lock);
spin_lock_irqsave(&hmq->lock, flags);
list_add(&user->list, &hmq->list_usr);
hmq->n_user++;
spin_unlock_irqrestore(&hmq->lock, flags);
spin_lock(&user->lock);
spin_lock_irqsave(&hmq->buf_in.lock, flags);
/* Point to the current position in buffer */
user->idx_r = hmq->buf_in.idx_w;
spin_unlock(&user->lock);
spin_unlock(&hmq->lock);
spin_unlock_irqrestore(&hmq->buf_in.lock, flags);
file->private_data = user;
......@@ -553,10 +559,8 @@ static int trtl_hmq_release(struct inode *inode, struct file *f)
/* Remove user from the list */
spin_lock_irqsave(&hmq->lock, flags);
hmq->n_user--;
list_del(&user->list);
kfree(user);
spin_unlock_irqrestore(&hmq->lock, flags);
return 0;
......@@ -576,6 +580,7 @@ static int trtl_hmq_write_one(struct trtl_hmq *hmq,
int err = 0, copy_size;
size_t size;
struct mturtle_hmq_buffer *buf = &hmq->buf_out;
unsigned long flags;
/* Here we can safely sleep */
size = TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes);
......@@ -593,7 +598,7 @@ static int trtl_hmq_write_one(struct trtl_hmq *hmq,
}
/* don't sleep here */
spin_lock(&buf->lock);
spin_lock_irqsave(&buf->lock, flags);
if ((buf->idx_w - buf->idx_r) >= hmq_buf_max_msg) {
err = -EAGAIN;
} else {
......@@ -603,7 +608,7 @@ static int trtl_hmq_write_one(struct trtl_hmq *hmq,
msg = &buf->msg[buf->idx_w++ & (hmq_buf_max_msg - 1)];
memcpy(msg, &buf->msg_tmp, copy_size);
}
spin_unlock(&buf->lock);
spin_unlock_irqrestore(&buf->lock, flags);
return err;
}
......@@ -671,6 +676,7 @@ static int trtl_ioctl_msg_filter_add(struct trtl_hmq_user *user,
struct trtl_msg_filter_element *fltel;
uint32_t size;
int err = 0;
unsigned long flags;
fltel = kmalloc(sizeof(struct trtl_msg_filter_element), GFP_KERNEL);
if (!fltel)
......@@ -707,10 +713,10 @@ static int trtl_ioctl_msg_filter_add(struct trtl_hmq_user *user,
}
/* Store filter */
spin_lock(&user->lock_filter);
spin_lock_irqsave(&user->lock, flags);
list_add_tail(&fltel->list, &user->list_filters);
user->n_filters++;
spin_unlock(&user->lock_filter);
spin_unlock_irqrestore(&user->lock, flags);
return 0;
......@@ -728,14 +734,15 @@ static void trtl_ioctl_msg_filter_clean(struct trtl_hmq_user *user,
void __user *uarg)
{
struct trtl_msg_filter_element *fltel, *tmp;
unsigned long flags;
spin_lock(&user->lock_filter);
spin_lock_irqsave(&user->lock, flags);
list_for_each_entry_safe(fltel, tmp, &user->list_filters, list) {
list_del(&fltel->list);
kfree(fltel);
user->n_filters--;
}
spin_unlock(&user->lock_filter);
spin_unlock_irqrestore(&user->lock, flags);
}
......@@ -789,6 +796,7 @@ static ssize_t trtl_hmq_read(struct file *f, char __user *ubuf,
struct trtl_msg *msg;
unsigned int i = 0, copy_size, n_msg;
int err = 0;
unsigned long flags;
/* Calculate the number of messages to read */
if (count % sizeof(struct trtl_msg)) {
......@@ -804,16 +812,16 @@ static ssize_t trtl_hmq_read(struct file *f, char __user *ubuf,
mutex_lock(&hmq->mtx);
while (!err && i < n_msg && buf->idx_w != usr->idx_r) {
spin_lock(&buf->lock);
spin_lock_irqsave(&buf->lock, flags);
msg = &buf->msg[usr->idx_r++ & (hmq_buf_max_msg - 1)];
if (!trtl_hmq_filter_check(usr, msg)) {
spin_unlock(&buf->lock);
spin_unlock_irqrestore(&buf->lock, flags);
continue;
}
copy_size = sizeof(struct trtl_hmq_header);
copy_size += (msg->hdr.len * 4);
memcpy(&buf->msg_tmp, msg, copy_size);
spin_unlock(&buf->lock);
spin_unlock_irqrestore(&buf->lock, flags);
/* now we copy to user space and we can safely sleep */
if (copy_to_user(ubuf + count, &buf->msg_tmp, copy_size)) {
......@@ -836,20 +844,19 @@ static ssize_t trtl_hmq_read(struct file *f, char __user *ubuf,
static void trtl_hmq_user_filter(struct trtl_hmq_user *usr)
{
struct trtl_hmq *hmq = usr->hmq;
struct mturtle_hmq_buffer *buf = &hmq->buf_in;
struct trtl_msg *msg;
unsigned long flags1, flags2;
unsigned long flags;
spin_lock_irqsave(&hmq->buf_in.lock, flags1);
spin_lock_irqsave(&usr->lock, flags2);
spin_lock_irqsave(&buf->lock, flags);
/* Loop until we find a valid message for the user */
while (hmq->buf_in.idx_w != usr->idx_r) {
msg = &hmq->buf_in.msg[usr->idx_r & (hmq_buf_max_msg - 1)];
while (buf->idx_w != usr->idx_r) {
msg = &buf->msg[usr->idx_r & (hmq_buf_max_msg - 1)];
if (trtl_hmq_filter_check(usr, msg))
break;
usr->idx_r++;
}
spin_unlock_irqrestore(&usr->lock, flags1);
spin_unlock_irqrestore(&hmq->buf_in.lock, flags2);
spin_unlock_irqrestore(&buf->lock, flags);
}
/**
......@@ -944,8 +951,9 @@ static void trtl_message_pop_to_buf(struct trtl_hmq *hmq)
*/
static void trtl_irq_handler_input(struct trtl_hmq *hmq)
{
struct mturtle_hmq_buffer *buf = &hmq->buf_in;
struct trtl_hmq_user *usr, *tmp;
unsigned long flags, flags2;
unsigned long flags;
/*
* Update user pointer when the write pointer is overwriting data
......@@ -953,17 +961,13 @@ static void trtl_irq_handler_input(struct trtl_hmq *hmq)
* read-pointer
*/
list_for_each_entry_safe(usr, tmp, &hmq->list_usr, list) {
spin_lock_irqsave(&usr->lock, flags);
spin_lock_irqsave(&hmq->buf_in.lock, flags2);
if (usr->idx_r + hmq_buf_max_msg > hmq->buf_in.idx_w) {
spin_unlock_irqrestore(&hmq->buf_in.lock, flags2);
spin_unlock_irqrestore(&usr->lock, flags);
spin_lock_irqsave(&buf->lock, flags);
if (usr->idx_r + hmq_buf_max_msg > buf->idx_w) {
spin_unlock_irqrestore(&buf->lock, flags);
continue;
}
spin_unlock_irqrestore(&hmq->buf_in.lock, flags2);
/* The one we are going to */
usr->idx_r++;
spin_unlock_irqrestore(&usr->lock, flags);
spin_unlock_irqrestore(&buf->lock, flags);
}
trtl_message_pop_to_buf(hmq);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment