Commit 22af3460 authored by Federico Vaga's avatar Federico Vaga

sw:drv: checkpatch

Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent c31f3377
......@@ -115,9 +115,9 @@ void trtl_minor_put(struct device *dev)
/**
* It returns the application ID
*/
static ssize_t trtl_show_app_id(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t application_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
......@@ -127,9 +127,9 @@ static ssize_t trtl_show_app_id(struct device *dev,
/**
* It returns the number of CPU in the FPGA
*/
static ssize_t trtl_show_n_cpu(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t n_cpu_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
......@@ -140,9 +140,9 @@ static ssize_t trtl_show_n_cpu(struct device *dev,
/**
* It returns the reset status of all CPUs as bitmask
*/
static ssize_t trtl_show_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t reset_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
uint32_t reg_val;
......@@ -155,9 +155,9 @@ static ssize_t trtl_show_reset_mask(struct device *dev,
/**
* It sets the reset status of all CPUs as bitmask
*/
static ssize_t trtl_store_reset_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t reset_mask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
long val;
......@@ -171,18 +171,18 @@ static ssize_t trtl_store_reset_mask(struct device *dev,
}
static ssize_t trtl_show_smem_op(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t smem_operation_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
return sprintf(buf, "%d", trtl->mod);
}
static ssize_t trtl_store_smem_op(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t smem_operation_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct trtl_dev *trtl = to_trtl_dev(dev);
long val;
......@@ -200,12 +200,10 @@ static ssize_t trtl_store_smem_op(struct device *dev,
return count;
}
DEVICE_ATTR(application_id, S_IRUGO, trtl_show_app_id, NULL);
DEVICE_ATTR(n_cpu, S_IRUGO, trtl_show_n_cpu, NULL);
DEVICE_ATTR(reset_mask, (S_IRUGO | S_IWUSR),
trtl_show_reset_mask, trtl_store_reset_mask);
DEVICE_ATTR(smem_operation, (S_IRUGO | S_IWUSR),
trtl_show_smem_op, trtl_store_smem_op);
DEVICE_ATTR(application_id, 0444, application_id_show, NULL);
DEVICE_ATTR(n_cpu, 0444, n_cpu_show, NULL);
DEVICE_ATTR(reset_mask, 0644, reset_mask_show, reset_mask_store);
DEVICE_ATTR(smem_operation, 0644, smem_operation_show, smem_operation_store);
static struct attribute *trtl_dev_attr[] = {
&dev_attr_application_id.attr,
......@@ -262,7 +260,8 @@ static long trtl_ioctl_io(struct trtl_dev *trtl, void __user *uarg)
/* read */
addr = trtl->base_smem + io.addr;
} else {
trtl_iowrite(trtl, io.mod, trtl->base_csr + MT_CPU_CSR_REG_SMEM_OP);
trtl_iowrite(trtl, io.mod,
trtl->base_csr + MT_CPU_CSR_REG_SMEM_OP);
/* write */
addr = trtl->base_smem + io.addr;
trtl_iowrite(trtl, io.value, addr);
......@@ -479,7 +478,8 @@ static ssize_t trtl_config_rom_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct trtl_dev *trtl = to_trtl_dev(container_of(kobj, struct device, kobj));
struct device *dev = container_of(kobj, struct device, kobj);
struct trtl_dev *trtl = to_trtl_dev(dev);
memcpy(buf, &trtl->cfgrom, count);
......@@ -489,7 +489,7 @@ static ssize_t trtl_config_rom_read(struct file *file, struct kobject *kobj,
struct bin_attribute trtl_config_rom_sysfs = {
.attr = {
.name = "config-rom",
.mode = S_IRUGO,
.mode = 0444,
},
.size = sizeof(struct trtl_config_rom),
.read = trtl_config_rom_read,
......
......@@ -65,9 +65,9 @@ static void trtl_cpu_on(struct trtl_cpu *cpu)
}
static ssize_t trtl_show_notify_history(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t last_notifications_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_cpu *cpu = to_trtl_cpu(dev);
ssize_t len;
......@@ -83,15 +83,16 @@ static ssize_t trtl_show_notify_history(struct device *dev,
return len;
}
DEVICE_ATTR(last_notifications, S_IRUGO, trtl_show_notify_history, NULL);
DEVICE_ATTR(last_notifications, 0444,
last_notifications_show, NULL);
/**
* It returns the CPU reset status
*/
static ssize_t trtl_show_reset(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t reset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_cpu *cpu = to_trtl_cpu(dev);
struct trtl_dev *trtl = to_trtl_dev(dev->parent);
......@@ -105,9 +106,9 @@ static ssize_t trtl_show_reset(struct device *dev,
/**
* It assert or de-assert the CPU reset line
*/
static ssize_t trtl_store_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct trtl_cpu *cpu = to_trtl_cpu(dev);
long val;
......@@ -122,7 +123,7 @@ static ssize_t trtl_store_reset(struct device *dev,
return count;
}
DEVICE_ATTR(reset, (S_IRUGO | S_IWUSR), trtl_show_reset, trtl_store_reset);
DEVICE_ATTR(reset, 0644, reset_show, reset_store);
static struct attribute *trtl_cpu_attr[] = {
&dev_attr_reset.attr,
......@@ -151,17 +152,20 @@ static int trtl_cpu_firmware_load(struct trtl_cpu *cpu, void *fw_buf,
int size, offset, i, err = 0;
/*
* The Debug console uses the CORE select. In order to avoid an IRQ burst
* due to the loading of a new firmware, we temporary disable the IRQs.
* The Debug IRQ handler, can be called anyway right before we disable it
* but the spinlock will prevent any conflict on the core selection
* The Debug console uses the CORE select. In order to avoid an IRQ
* burst due to the loading of a new firmware, we temporary disable
* the IRQs.
* The Debug IRQ handler, can be called anyway right before we disable
* it but the spinlock will prevent any conflict on the core selection
*/
irq_dbg_old = trtl_ioread(trtl, trtl->base_csr + MT_CPU_CSR_REG_UART_IMSK);
irq_dbg_old = trtl_ioread(trtl,
trtl->base_csr + MT_CPU_CSR_REG_UART_IMSK);
trtl_iowrite(trtl, 0, trtl->base_csr + MT_CPU_CSR_REG_UART_IMSK);
/* Select the CPU memory to write */
spin_lock(&trtl->lock_cpu_sel);
trtl_iowrite(trtl, cpu->index, trtl->base_csr + MT_CPU_CSR_REG_CORE_SEL);
trtl_iowrite(trtl, cpu->index,
trtl->base_csr + MT_CPU_CSR_REG_CORE_SEL);
if (off + count > trtl->cfgrom.mem_size[cpu->index]) {
dev_err(&cpu->dev,
......@@ -228,7 +232,8 @@ static int trtl_cpu_firmware_dump(struct trtl_cpu *cpu, void *fw_buf,
/* Select the CPU memory to write */
spin_lock(&trtl->lock_cpu_sel);
trtl_iowrite(trtl, cpu->index, trtl->base_csr + MT_CPU_CSR_REG_CORE_SEL);
trtl_iowrite(trtl, cpu->index,
trtl->base_csr + MT_CPU_CSR_REG_CORE_SEL);
if (off + count > trtl->cfgrom.mem_size[cpu->index]) {
dev_err(&cpu->dev, "Cannot dump firmware: size limit %d byte\n",
......@@ -354,7 +359,7 @@ static void trtl_cpu_irq_clear(struct trtl_dev *trtl, uint32_t mask)
irqreturn_t trtl_cpu_irq_handler_not(int irq, void *arg)
{
struct trtl_dev *trtl =arg;
struct trtl_dev *trtl = arg;
struct trtl_cpu *cpu;
uint32_t status;
unsigned long flags;
......
......@@ -17,32 +17,50 @@ static int trtl_dbg_info_seq_read(struct seq_file *s, void *data)
struct trtl_dev *trtl = s->private;
struct trtl_cpu *cpu;
struct trtl_hmq *hmq;
unt32_t reg;
int i, k;
seq_printf(s, "%s\n", dev_name(&trtl->dev));
seq_printf(s, "cpu-core:\n");
seq_printf(s, " HMQ input status LO: 0x%x:\n",
trtl_ioread(trtl, trtl->base_csr + MT_CPU_CSR_REG_HMQI_STATUS_LO));
seq_printf(s, " HMQ input status HI: 0x%x:\n",
trtl_ioread(trtl, trtl->base_csr + MT_CPU_CSR_REG_HMQI_STATUS_HI));
seq_printf(s, " HMQ output status LO: 0x%x:\n",
trtl_ioread(trtl, trtl->base_csr + MT_CPU_CSR_REG_HMQO_STATUS_LO));
seq_printf(s, " HMQ output status HI: 0x%x:\n",
trtl_ioread(trtl, trtl->base_csr + MT_CPU_CSR_REG_HMQO_STATUS_HI));
seq_printf(s, " cpu-core:\n");
seq_puts(s, "cpu-core:\n");
reg = trtl_ioread(trtl,
trtl->base_csr + MT_CPU_CSR_REG_HMQI_STATUS_LO);
seq_printf(s, " HMQ input status LO: 0x%x:\n", reg);
reg = trtl_ioread(trtl,
trtl->base_csr + MT_CPU_CSR_REG_HMQI_STATUS_HI);
seq_printf(s, " HMQ input status HI: 0x%x:\n", reg);
reg = trtl_ioread(trtl,
trtl->base_csr + MT_CPU_CSR_REG_HMQO_STATUS_LO);
seq_printf(s, " HMQ output status LO: 0x%x:\n", reg);
reg = trtl_ioread(trtl,
trtl->base_csr + MT_CPU_CSR_REG_HMQO_STATUS_HI);
seq_printf(s, " HMQ output status HI: 0x%x:\n", reg);
seq_puts(s, " cpu-core:\n");
for (i = 0; i < trtl->cfgrom.n_cpu; ++i) {
cpu = &trtl->cpu[i];
seq_printf(s, " - index: %d\n", cpu->index);
seq_printf(s, " name: %s\n", dev_name(&cpu->dev));
seq_printf(s, " hmq:\n");
seq_printf(s, " - index: %d\n",
cpu->index);
seq_printf(s, " name: %s\n",
dev_name(&cpu->dev));
seq_puts(s, " hmq:\n");
for (k = 0; k < trtl->cfgrom.n_hmq[i]; ++k) {
hmq = &cpu->hmq[k];
seq_printf(s, " - index: %d\n", hmq->index);
seq_printf(s, " name: %s\n", dev_name(&hmq->dev));
seq_printf(s, " buf-in-r: %d (look user)\n", hmq->buf_in.idx_r);
seq_printf(s, " buf-in-w: %d\n", hmq->buf_in.idx_w);
seq_printf(s, " buf-out-r: %d\n", hmq->buf_out.idx_r);
seq_printf(s, " buf-out-w: %d\n", hmq->buf_out.idx_w);
seq_printf(s, " - index: %d\n",
hmq->index);
seq_printf(s, " name: %s\n",
dev_name(&hmq->dev));
seq_printf(s, " buf-in-r: %d (look user)\n",
hmq->buf_in.idx_r);
seq_printf(s, " buf-in-w: %d\n",
hmq->buf_in.idx_w);
seq_printf(s, " buf-out-r: %d\n",
hmq->buf_out.idx_r);
seq_printf(s, " buf-out-w: %d\n",
hmq->buf_out.idx_w);
}
}
return 0;
......@@ -94,6 +112,5 @@ void trtl_debugfs_init(struct trtl_dev *trtl)
*/
void trtl_debugfs_exit(struct trtl_dev *trtl)
{
if (trtl->dbg_dir)
debugfs_remove_recursive(trtl->dbg_dir);
debugfs_remove_recursive(trtl->dbg_dir);
}
......@@ -96,7 +96,7 @@ struct trtl_msg_element {
* Circular buffer implementation for MockTurtle
*/
struct mturtle_hmq_buffer {
struct spinlock lock;
spinlock_t lock;
unsigned int idx_w;
unsigned int idx_r;
struct trtl_msg *msg;
......@@ -158,11 +158,11 @@ struct trtl_hmq {
struct trtl_hmq_user {
struct list_head list; /**< to keep it in our local queue */
struct trtl_hmq *hmq; /**< reference to opened HMQ */
struct spinlock lock; /**< to protect list read/write */
spinlock_t lock; /**< to protect list read/write */
struct list_head list_filters; /**< list of filters to apply */
unsigned int n_filters; /**< number of filters */
struct spinlock lock_filter; /**< to protect filter list read/write */
spinlock_t lock_filter; /**< to protect filter list read/write */
unsigned int ptr_r; /**< read pointer for the message circular buffer */
unsigned int idx_r;
......@@ -177,7 +177,7 @@ struct trtl_cpu {
struct device dev; /**< device representing a single CPU */
struct circ_buf cbuf; /**< debug circular buffer */
struct spinlock lock;
spinlock_t lock;
struct trtl_hmq hmq[TRTL_MAX_MQ_CHAN]; /**< list of HMQ slots used by
this CPU */
struct device *tty_dev;
......@@ -208,8 +208,8 @@ struct trtl_dev {
enum trtl_smem_modifier mod; /**< smem operation modifier */
uint32_t message_sequence; /**< message sequence number */
struct spinlock lock_cpu_sel;
struct spinlock lock_hmq_sel;
spinlock_t lock_cpu_sel;
spinlock_t lock_hmq_sel;
const struct trtl_config_rom cfgrom; /**< synthesis configuration ROM */
......
......@@ -176,8 +176,10 @@ static inline void trtl_cpu_hmq_select(struct trtl_dev *trtl,
uint32_t sel;
sel = (cpu << MT_CPU_CSR_HMQ_SEL_CORE_SHIFT) & MT_CPU_CSR_HMQ_SEL_CORE_MASK;
sel |= ((hmq << MT_CPU_CSR_HMQ_SEL_QUEUE_SHIFT) & MT_CPU_CSR_HMQ_SEL_QUEUE_MASK);
sel = (cpu << MT_CPU_CSR_HMQ_SEL_CORE_SHIFT);
sel &= MT_CPU_CSR_HMQ_SEL_CORE_MASK;
sel |= (hmq << MT_CPU_CSR_HMQ_SEL_QUEUE_SHIFT);
sel &= MT_CPU_CSR_HMQ_SEL_QUEUE_MASK);
trtl_iowrite(trtl, sel, trtl->base_csr + MT_CPU_CSR_REG_HMQ_SEL);
}
......@@ -318,14 +320,15 @@ static void trtl_hmq_flush(struct trtl_hmq *hmq)
/**
* It applies filters on a given message.
*/
static int trtl_hmq_filter_check(struct trtl_hmq_user *user, struct trtl_msg *msg)
static int trtl_hmq_filter_check(struct trtl_hmq_user *user,
struct trtl_msg *msg)
{
struct trtl_msg_filter_element *fltel, *tmp;
unsigned int passed = 1;
uint32_t word, *data = msg->data, *head = (uint32_t *)&msg->hdr;
spin_lock(&user->lock_filter);
list_for_each_entry_safe (fltel, tmp, &user->list_filters, list) {
list_for_each_entry_safe(fltel, tmp, &user->list_filters, list) {
/* If one of the previous filter failed, then stop */
if (!passed)
break;
......@@ -335,7 +338,7 @@ static int trtl_hmq_filter_check(struct trtl_hmq_user *user, struct trtl_msg *ms
else
word = head[fltel->filter.word_offset];
switch(fltel->filter.operation) {
switch (fltel->filter.operation) {
case TRTL_MSG_FILTER_AND:
word &= fltel->filter.mask;
if (word != fltel->filter.value)
......@@ -365,9 +368,9 @@ static int trtl_hmq_filter_check(struct trtl_hmq_user *user, struct trtl_msg *ms
/**
* It clears the content of the HMQ
*/
static ssize_t trtl_hmq_flush_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t discard_all_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct trtl_hmq *hmq = to_trtl_hmq(dev);
......@@ -375,15 +378,15 @@ static ssize_t trtl_hmq_flush_store(struct device *dev,
return count;
}
DEVICE_ATTR(flush, S_IWUSR|S_IWGRP, NULL, trtl_hmq_flush_store);
DEVICE_ATTR(discard_all, 0200, NULL, discard_all_store);
/**
* It return 1 if the message quque slot is full
*/
static ssize_t trtl_show_full(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_hmq *hmq = to_trtl_hmq(dev);
struct trtl_dev *trtl = to_trtl_dev(dev->parent);
......@@ -394,15 +397,15 @@ static ssize_t trtl_show_full(struct device *dev,
return sprintf(buf, "%d\n", !!(status & 0x1));
}
DEVICE_ATTR(full, S_IRUGO, trtl_show_full, NULL);
DEVICE_ATTR(full, 0444, full_show, NULL);
/**
* It return 1 if the message quque slot is empty
*/
static ssize_t trtl_show_empty(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t empty_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trtl_hmq *hmq = to_trtl_hmq(dev);
struct trtl_dev *trtl = to_trtl_dev(dev->parent);
......@@ -412,7 +415,7 @@ static ssize_t trtl_show_empty(struct device *dev,
return sprintf(buf, "%d\n", !!(status & 0x2));
}
DEVICE_ATTR(empty, S_IRUGO, trtl_show_empty, NULL);
DEVICE_ATTR(empty, 0444, empty_show, NULL);
static struct attribute *trtl_hmq_attr[] = {
&dev_attr_flush.attr,
......@@ -426,7 +429,7 @@ static const struct attribute_group trtl_hmq_group = {
};
static ssize_t trtl_message_sent(struct device *dev,
static ssize_t message_sent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -434,9 +437,9 @@ static ssize_t trtl_message_sent(struct device *dev,
return sprintf(buf, "%d\n", hmq->buf_out.count);
}
DEVICE_ATTR(message_sent, S_IRUGO, trtl_message_sent, NULL);
DEVICE_ATTR(message_sent, 0444, message_sent_show, NULL);
static ssize_t trtl_message_received(struct device *dev,
static ssize_t message_received_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
......@@ -444,7 +447,7 @@ static ssize_t trtl_message_received(struct device *dev,
return sprintf(buf, "%d\n", hmq->buf_in.count);
}
DEVICE_ATTR(message_received, S_IRUGO, trtl_message_received, NULL);
DEVICE_ATTR(message_received, 0444, message_received_show, NULL);
static struct attribute *trtl_hmq_attr_stat[] = {
&dev_attr_message_received.attr,
......@@ -533,25 +536,27 @@ static int trtl_hmq_write_one(struct trtl_hmq *hmq,
{
struct trtl_msg *msg;
int err = 0, copy_size;
size_t size;
struct mturtle_hmq_buffer *buf = &hmq->buf_out;
/* Here we can safely sleep */
size = TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes);
copy_size = sizeof(struct trtl_hmq_header);
copy_size += (TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes) * 4);
copy_size += size;
copy_size *= 4;
if (copy_from_user(&buf->msg_tmp, ubuf, copy_size))
return -EFAULT;
if (buf->msg_tmp.hdr.len > TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes)) {
if (buf->msg_tmp.hdr.len > size) {
dev_err(&hmq->dev,
"write: cannot send %u bytes, the maximum size is %u bytes\n",
buf->msg_tmp.hdr.len * 4,
TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes) * 4);
buf->msg_tmp.hdr.len * 4, size * 4);
return -EINVAL;
}
/* don't sleep here */
spin_lock(&buf->lock);
if((buf->idx_w - buf->idx_r) >= hmq_buf_max_msg) {
if ((buf->idx_w - buf->idx_r) >= hmq_buf_max_msg) {
err = -EAGAIN;
} else {
/* copy only the message */
......@@ -606,13 +611,14 @@ static ssize_t trtl_hmq_write(struct file *f, const char __user *buf,
*offp += count;
/*
* If `count` is not 0, it means that we saved at least one message, even
* if we got an error on the second message. So, in this case notifiy the
* user space about how many messages where stored and ignore errors.
* Return the error only when we did not save any message.
* If `count` is not 0, it means that we saved at least one message,
* even if we got an error on the second message. So, in this case
* notifiy the user space about how many messages where stored and
* ignore errors. Return the error only when we did not save any
* message.
*
* I choosed this solution because we do not have any dangerous error here,
* and it simplify the code.
* I choosed this solution because we do not have any dangerous error
* here, and it simplify the code.
*/
return count ? count : err;
}
......@@ -686,7 +692,7 @@ static void trtl_ioctl_msg_filter_clean(struct trtl_hmq_user *user,
struct trtl_msg_filter_element *fltel, *tmp;
spin_lock(&user->lock_filter);
list_for_each_entry_safe (fltel, tmp, &user->list_filters, list) {
list_for_each_entry_safe(fltel, tmp, &user->list_filters, list) {
list_del(&fltel->list);
kfree(fltel);
user->n_filters--;
......@@ -828,7 +834,7 @@ const struct file_operations trtl_hmq_fops = {
static void trtl_message_pop(struct trtl_hmq *hmq, struct trtl_msg *msg)
{
struct trtl_dev *trtl = to_trtl_dev(hmq->dev.parent);
unsigned int copy_size = TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes);
size_t copy_size = TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes);
unsigned long flags;
spin_lock_irqsave(&trtl->lock_hmq_sel, flags);
......@@ -922,7 +928,7 @@ static uint64_t trtl_hmq_irq_status_in(struct trtl_dev *trtl)
*/
irqreturn_t trtl_irq_handler_in(int irq_core_base, void *arg)
{
struct trtl_dev *trtl =arg;
struct trtl_dev *trtl = arg;
struct trtl_hmq *hmq;
uint64_t status;
int i, n_disp = 0;
......@@ -967,8 +973,10 @@ static void trtl_message_push(struct trtl_hmq *hmq, struct trtl_msg *msg)
{
struct trtl_dev *trtl = to_trtl_dev(hmq->dev.parent);
unsigned long flags;
size_t size;
if (unlikely(msg->hdr.len > TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes))) {
size = TRTL_CONFIG_ROM_MQ_SIZE_PAYLOAD(hmq->cfg->sizes);
if (unlikely(msg->hdr.len > size)) {
dev_err(&hmq->dev,
"The message (%d bytes) does not fit in the maximum message size (%d bytes)\n",
msg->hdr.len,
......@@ -1037,7 +1045,7 @@ static uint64_t trtl_hmq_irq_status_out(struct trtl_dev *trtl)
*/
irqreturn_t trtl_irq_handler_out(int irq_core_base, void *arg)
{
struct trtl_dev *trtl =arg;
struct trtl_dev *trtl = arg;
uint64_t status;
int i, freeslots;
struct trtl_hmq *hmq;
......@@ -1049,7 +1057,7 @@ irqreturn_t trtl_irq_handler_out(int irq_core_base, void *arg)
i = -1;
while (status && i < TRTL_MAX_MQ_CHAN * TRTL_MAX_CPU) {
++i;
if (!(status & 0x1)){
if (!(status & 0x1)) {
status >>= 1;
continue;
}
......@@ -1102,6 +1110,7 @@ int trtl_probe_hmq(struct trtl_cpu *cpu, unsigned int hmq_idx)
struct trtl_dev *trtl = to_trtl_dev(cpu->dev.parent);
struct trtl_hmq *hmq = &cpu->hmq[hmq_idx];
int err;
size_t size;
hmq->index = hmq_idx;
......@@ -1142,15 +1151,12 @@ int trtl_probe_hmq(struct trtl_cpu *cpu, unsigned int hmq_idx)
goto out_dev;
/* Allocate buffers */
hmq->buf_in.msg = devm_kzalloc(&hmq->dev,
sizeof(struct trtl_msg) * hmq_buf_max_msg,
GFP_KERNEL);
size = sizeof(struct trtl_msg) * hmq_buf_max_msg;
hmq->buf_in.msg = devm_kzalloc(&hmq->dev, size, GFP_KERNEL);
if (!hmq->buf_in.msg)
goto out_msg_in;
hmq->buf_out.msg= devm_kzalloc(&hmq->dev,
sizeof(struct trtl_msg) * hmq_buf_max_msg,
GFP_KERNEL);
hmq->buf_out.msg = devm_kzalloc(&hmq->dev, size, GFP_KERNEL);
if (!hmq->buf_out.msg)
goto out_msg_out;
......
......@@ -26,7 +26,7 @@ static DEFINE_IDA(trtl_tty_ida);
static inline void trtl_tty_insert_flip_char(struct trtl_cpu *cpu,
unsigned char ch, char flag)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
#if KERNEL_VERSION(3, 9, 0) > LINUX_VERSION_CODE
tty_insert_flip_char(cpu->tty_port.tty, ch, flag);
#else
tty_insert_flip_char(&cpu->tty_port, ch, flag);
......@@ -35,7 +35,7 @@ static inline void trtl_tty_insert_flip_char(struct trtl_cpu *cpu,
static inline void trtl_tty_flip_buffer_push(struct trtl_cpu *cpu)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
#if KERNEL_VERSION(3, 9, 0) > LINUX_VERSION_CODE
tty_schedule_flip(cpu->tty_port.tty);
#else
tty_schedule_flip(cpu->tty_port);
......@@ -46,7 +46,7 @@ static inline void trtl_tty_flip_buffer_push(struct trtl_cpu *cpu)
* trtl_tty_handler_getchar - Get Character From soft-CPU
* It retrieves a character from the soft-CPU serial interface
* and it adds the character to the TTY buffer
* @cpu: the soft-CPU to use
* @cpu: the soft-CPU to use
*/
static void trtl_tty_handler_getchar(struct trtl_cpu *cpu)
{
......@@ -65,7 +65,7 @@ static void trtl_tty_handler_getchar(struct trtl_cpu *cpu)
if (!cpu->tty_port.tty) /* Not open */
return;
trtl_tty_insert_flip_char(cpu, c , TTY_NORMAL);
trtl_tty_insert_flip_char(cpu, c, TTY_NORMAL);
}
......@@ -244,9 +244,8 @@ int trtl_tty_probe(struct trtl_dev *trtl)
trtl->tty_driver->init_termios.c_lflag = 0;
err = tty_register_driver(trtl->tty_driver);
if (err < 0) {
if (err < 0)
goto err_tty;
}
for (i = 0; i < trtl->cfgrom.n_cpu; ++i) {
err = trtl_tty_port_init(&trtl->cpu[i]);
......@@ -269,7 +268,7 @@ int trtl_tty_probe(struct trtl_dev *trtl)
err_irq:
err_port:
while(--i)
while (--i)
trtl_tty_port_exit(&trtl->cpu[i]);
tty_unregister_driver(trtl->tty_driver);
err_tty:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment