summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c13
-rw-r--r--kernel/irq/generic-chip.c316
-rw-r--r--kernel/irq/irqdomain.c17
-rw-r--r--kernel/irq/manage.c17
4 files changed, 298 insertions, 65 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index cbd97ce0b000..a3bb14fbe5c6 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -213,6 +213,19 @@ void irq_enable(struct irq_desc *desc)
irq_state_clr_masked(desc);
}
+/**
+ * irq_disable - Mark interupt disabled
+ * @desc: irq descriptor which should be disabled
+ *
+ * If the chip does not implement the irq_disable callback, we
+ * use a lazy disable approach. That means we mark the interrupt
+ * disabled, but leave the hardware unmasked. That's an
+ * optimization because we avoid the hardware access for the
+ * common case where no interrupt happens after we marked it
+ * disabled. If an interrupt happens, then the interrupt flow
+ * handler masks the line at the hardware level and marks it
+ * pending.
+ */
void irq_disable(struct irq_desc *desc)
{
irq_state_set_disabled(desc);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index c89295a8f668..e3544c19bdd2 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/syscore_ops.h>
@@ -16,11 +17,6 @@
static LIST_HEAD(gc_list);
static DEFINE_RAW_SPINLOCK(gc_lock);
-static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
-{
- return &container_of(d->chip, struct irq_chip_type, chip)->regs;
-}
-
/**
* irq_gc_noop - NOOP function
* @d: irq_data
@@ -39,16 +35,17 @@ void irq_gc_noop(struct irq_data *d)
void irq_gc_mask_disable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable);
- gc->mask_cache &= ~mask;
+ irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
+ *ct->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
/**
- * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register
+ * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
@@ -57,16 +54,18 @@ void irq_gc_mask_disable_reg(struct irq_data *d)
void irq_gc_mask_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- gc->mask_cache |= mask;
- irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
+ *ct->mask_cache |= mask;
+ irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
+EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
/**
- * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register
+ * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
@@ -75,13 +74,15 @@ void irq_gc_mask_set_bit(struct irq_data *d)
void irq_gc_mask_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- gc->mask_cache &= ~mask;
- irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
+EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
/**
* irq_gc_unmask_enable_reg - Unmask chip via enable register
@@ -93,11 +94,12 @@ void irq_gc_mask_clr_bit(struct irq_data *d)
void irq_gc_unmask_enable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable);
- gc->mask_cache |= mask;
+ irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
+ *ct->mask_cache |= mask;
irq_gc_unlock(gc);
}
@@ -108,12 +110,14 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
void irq_gc_ack_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
+EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
/**
* irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
@@ -122,25 +126,27 @@ void irq_gc_ack_set_bit(struct irq_data *d)
void irq_gc_ack_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = ~(1 << (d->irq - gc->irq_base));
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = ~d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
/**
- * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
+ * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
* @d: irq_data
*/
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
@@ -151,16 +157,18 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
void irq_gc_eoi(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi);
+ irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
irq_gc_unlock(gc);
}
/**
* irq_gc_set_wake - Set/clr wake bit for an interrupt
- * @d: irq_data
+ * @d: irq_data
+ * @on: Indicates whether the wake bit should be set or cleared
*
* For chips where the wake from suspend functionality is not
* configured in a separate register and the wakeup active state is
@@ -169,7 +177,7 @@ void irq_gc_eoi(struct irq_data *d)
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
if (!(mask & gc->wake_enabled))
return -EINVAL;
@@ -183,6 +191,19 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static void
+irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
+ int num_ct, unsigned int irq_base,
+ void __iomem *reg_base, irq_flow_handler_t handler)
+{
+ raw_spin_lock_init(&gc->lock);
+ gc->num_ct = num_ct;
+ gc->irq_base = irq_base;
+ gc->reg_base = reg_base;
+ gc->chip_types->chip.name = name;
+ gc->chip_types->handler = handler;
+}
+
/**
* irq_alloc_generic_chip - Allocate a generic chip and initialize it
* @name: Name of the irq chip
@@ -203,23 +224,185 @@ irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
gc = kzalloc(sz, GFP_KERNEL);
if (gc) {
- raw_spin_lock_init(&gc->lock);
- gc->num_ct = num_ct;
- gc->irq_base = irq_base;
- gc->reg_base = reg_base;
- gc->chip_types->chip.name = name;
- gc->chip_types->handler = handler;
+ irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
+ handler);
}
return gc;
}
EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
+static void
+irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
+{
+ struct irq_chip_type *ct = gc->chip_types;
+ u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
+ int i;
+
+ for (i = 0; i < gc->num_ct; i++) {
+ if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
+ mskptr = &ct[i].mask_cache_priv;
+ mskreg = ct[i].regs.mask;
+ }
+ ct[i].mask_cache = mskptr;
+ if (flags & IRQ_GC_INIT_MASK_CACHE)
+ *mskptr = irq_reg_readl(gc->reg_base + mskreg);
+ }
+}
+
+/**
+ * irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
+ * @d: irq domain for which to allocate chips
+ * @irqs_per_chip: Number of interrupts each chip handles
+ * @num_ct: Number of irq_chip_type instances associated with this
+ * @name: Name of the irq chip
+ * @handler: Default flow handler associated with these chips
+ * @clr: IRQ_* bits to clear in the mapping function
+ * @set: IRQ_* bits to set in the mapping function
+ * @gcflags: Generic chip specific setup flags
+ */
+int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
+ int num_ct, const char *name,
+ irq_flow_handler_t handler,
+ unsigned int clr, unsigned int set,
+ enum irq_gc_flags gcflags)
+{
+ struct irq_domain_chip_generic *dgc;
+ struct irq_chip_generic *gc;
+ int numchips, sz, i;
+ unsigned long flags;
+ void *tmp;
+
+ if (d->gc)
+ return -EBUSY;
+
+ if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR)
+ return -EINVAL;
+
+ numchips = d->revmap_data.linear.size / irqs_per_chip;
+ if (!numchips)
+ return -EINVAL;
+
+ /* Allocate a pointer, generic chip and chiptypes for each chip */
+ sz = sizeof(*dgc) + numchips * sizeof(gc);
+ sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
+
+ tmp = dgc = kzalloc(sz, GFP_KERNEL);
+ if (!dgc)
+ return -ENOMEM;
+ dgc->irqs_per_chip = irqs_per_chip;
+ dgc->num_chips = numchips;
+ dgc->irq_flags_to_set = set;
+ dgc->irq_flags_to_clear = clr;
+ dgc->gc_flags = gcflags;
+ d->gc = dgc;
+
+ /* Calc pointer to the first generic chip */
+ tmp += sizeof(*dgc) + numchips * sizeof(gc);
+ for (i = 0; i < numchips; i++) {
+ /* Store the pointer to the generic chip */
+ dgc->gc[i] = gc = tmp;
+ irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
+ NULL, handler);
+ gc->domain = d;
+ raw_spin_lock_irqsave(&gc_lock, flags);
+ list_add_tail(&gc->list, &gc_list);
+ raw_spin_unlock_irqrestore(&gc_lock, flags);
+ /* Calc pointer to the next generic chip */
+ tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
+
+/**
+ * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
+ * @d: irq domain pointer
+ * @hw_irq: Hardware interrupt number
+ */
+struct irq_chip_generic *
+irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
+{
+ struct irq_domain_chip_generic *dgc = d->gc;
+ int idx;
+
+ if (!dgc)
+ return NULL;
+ idx = hw_irq / dgc->irqs_per_chip;
+ if (idx >= dgc->num_chips)
+ return NULL;
+ return dgc->gc[idx];
+}
+EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
+
/*
* Separate lockdep class for interrupt chip which can nest irq_desc
* lock.
*/
static struct lock_class_key irq_nested_lock_class;
+/*
+ * irq_map_generic_chip - Map a generic chip for an irq domain
+ */
+static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ struct irq_data *data = irq_get_irq_data(virq);
+ struct irq_domain_chip_generic *dgc = d->gc;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct irq_chip *chip;
+ unsigned long flags;
+ int idx;
+
+ if (!d->gc)
+ return -ENODEV;
+
+ idx = hw_irq / dgc->irqs_per_chip;
+ if (idx >= dgc->num_chips)
+ return -EINVAL;
+ gc = dgc->gc[idx];
+
+ idx = hw_irq % dgc->irqs_per_chip;
+
+ if (test_bit(idx, &gc->unused))
+ return -ENOTSUPP;
+
+ if (test_bit(idx, &gc->installed))
+ return -EBUSY;
+
+ ct = gc->chip_types;
+ chip = &ct->chip;
+
+ /* We only init the cache for the first mapping of a generic chip */
+ if (!gc->installed) {
+ raw_spin_lock_irqsave(&gc->lock, flags);
+ irq_gc_init_mask_cache(gc, dgc->gc_flags);
+ raw_spin_unlock_irqrestore(&gc->lock, flags);
+ }
+
+ /* Mark the interrupt as installed */
+ set_bit(idx, &gc->installed);
+
+ if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
+ irq_set_lockdep_class(virq, &irq_nested_lock_class);
+
+ if (chip->irq_calc_mask)
+ chip->irq_calc_mask(data);
+ else
+ data->mask = 1 << idx;
+
+ irq_set_chip_and_handler(virq, chip, ct->handler);
+ irq_set_chip_data(virq, gc);
+ irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
+ return 0;
+}
+
+struct irq_domain_ops irq_generic_chip_ops = {
+ .map = irq_map_generic_chip,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
+
/**
* irq_setup_generic_chip - Setup a range of interrupts with a generic chip
* @gc: Generic irq chip holding all data
@@ -237,15 +420,14 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int set)
{
struct irq_chip_type *ct = gc->chip_types;
+ struct irq_chip *chip = &ct->chip;
unsigned int i;
raw_spin_lock(&gc_lock);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock(&gc_lock);
- /* Init mask cache ? */
- if (flags & IRQ_GC_INIT_MASK_CACHE)
- gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
+ irq_gc_init_mask_cache(gc, flags);
for (i = gc->irq_base; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
@@ -254,7 +436,15 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
if (flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(i, &irq_nested_lock_class);
- irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+ if (!(flags & IRQ_GC_NO_MASK)) {
+ struct irq_data *d = irq_get_irq_data(i);
+
+ if (chip->irq_calc_mask)
+ chip->irq_calc_mask(d);
+ else
+ d->mask = 1 << (i - gc->irq_base);
+ }
+ irq_set_chip_and_handler(i, chip, ct->handler);
irq_set_chip_data(i, gc);
irq_modify_status(i, clr, set);
}
@@ -265,7 +455,7 @@ EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
/**
* irq_setup_alt_chip - Switch to alternative chip
* @d: irq_data for this interrupt
- * @type Flow type to be initialized
+ * @type: Flow type to be initialized
*
* Only to be called from chip->irq_set_type() callbacks.
*/
@@ -317,6 +507,24 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
}
EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
+{
+ unsigned int virq;
+
+ if (!gc->domain)
+ return irq_get_irq_data(gc->irq_base);
+
+ /*
+ * We don't know which of the irqs has been actually
+ * installed. Use the first one.
+ */
+ if (!gc->installed)
+ return NULL;
+
+ virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
+ return virq ? irq_get_irq_data(virq) : NULL;
+}
+
#ifdef CONFIG_PM
static int irq_gc_suspend(void)
{
@@ -325,8 +533,12 @@ static int irq_gc_suspend(void)
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
- if (ct->chip.irq_suspend)
- ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base));
+ if (ct->chip.irq_suspend) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_suspend(data);
+ }
}
return 0;
}
@@ -338,8 +550,12 @@ static void irq_gc_resume(void)
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
- if (ct->chip.irq_resume)
- ct->chip.irq_resume(irq_get_irq_data(gc->irq_base));
+ if (ct->chip.irq_resume) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_resume(data);
+ }
}
}
#else
@@ -354,8 +570,12 @@ static void irq_gc_shutdown(void)
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
- if (ct->chip.irq_pm_shutdown)
- ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base));
+ if (ct->chip.irq_pm_shutdown) {
+ struct irq_data *data = irq_gc_get_irq_data(gc);
+
+ if (data)
+ ct->chip.irq_pm_shutdown(data);
+ }
}
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5a83dde8ca0c..1ed8dff17eb9 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -16,12 +16,6 @@
#include <linux/smp.h>
#include <linux/fs.h>
-#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
- * ie. legacy 8259, gets irqs 1..15 */
-#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
-#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
-#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
-
static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);
@@ -143,7 +137,10 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
- * @first_irq: first number of irq block assigned to the domain
+ * @first_irq: first number of irq block assigned to the domain,
+ * pass zero to assign irqs on-the-fly. This will result in a
+ * linear IRQ domain so it is important to use irq_create_mapping()
+ * for each used IRQ, especially when SPARSE_IRQ is enabled.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
@@ -191,6 +188,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
/* A linear domain is the default */
return irq_domain_add_linear(of_node, size, ops, host_data);
}
+EXPORT_SYMBOL_GPL(irq_domain_add_simple);
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
@@ -397,11 +395,12 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irq_data->hwirq;
+ irq_hw_number_t hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
+ hwirq = irq_data->hwirq;
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
@@ -693,7 +692,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
- type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+ type != irq_get_trigger_type(virq))
irq_set_irq_type(virq, type);
return virq;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fa17855ca65a..514bcfd855a8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -555,9 +555,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
return 0;
if (irq_settings_can_request(desc)) {
- if (desc->action)
- if (irqflags & desc->action->flags & IRQF_SHARED)
- canrequest =1;
+ if (!desc->action ||
+ irqflags & desc->action->flags & IRQF_SHARED)
+ canrequest = 1;
}
irq_put_desc_unlock(desc, flags);
return canrequest;
@@ -840,9 +840,6 @@ static void irq_thread_dtor(struct callback_head *unused)
static int irq_thread(void *data)
{
struct callback_head on_exit_work;
- static const struct sched_param param = {
- .sched_priority = MAX_USER_RT_PRIO/2,
- };
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
irqreturn_t (*handler_fn)(struct irq_desc *desc,
@@ -854,8 +851,6 @@ static int irq_thread(void *data)
else
handler_fn = irq_thread_fn;
- sched_setscheduler(current, SCHED_FIFO, &param);
-
init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, false);
@@ -950,6 +945,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
if (new->thread_fn && !nested) {
struct task_struct *t;
+ static const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
new->name);
@@ -957,6 +955,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
ret = PTR_ERR(t);
goto out_mput;
}
+
+ sched_setscheduler(t, SCHED_FIFO, &param);
+
/*
* We keep the reference to the task struct even if
* the thread dies to avoid that the interrupt code