linux arm irq (4): interrupt driver interface
2021/5/17 7:26:01
本文主要是介绍linux arm irq (4): interrupt driver interface,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!
linux arm irq (4)
4 interrupt driver interface
Author: Yangkai Wang
wang_yangkai@163.com
Coding in 2021/05/16
转载请注明author,出处.
linux version 3.4.39
s5p6818 soc
Cortex-A53 Octa core CPU
Interrupt Controller,GIC400
GIC (Generic Interrupt Controllers), reference:Arm Generic Interrupt Controller Architecture version 2.0,Architecture Specification
GPIO controller,reference:S5P6818 Application Processor Datasheet
-
include <linux/interrupt.h>
static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { return request_threaded_irq(irq, handler, NULL, flags, name, dev); } extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); etc.
example code:
#define CFG_KEYPAD_KEY_OK { PAD_GPIO_B + 31 } #define CFG_KEYPAD_KEY_OK_CODE { KEY_OK } /* 352 */ /* drivers/input/keyboard/nxp_io_key.c */ ... static int nxp_key_probe(struct platform_device *pdev) { ... printk("~~~ %s() gpio:%d, irqno:%d, call request_irq()\n", \ __func__, code->io, gpio_to_irq(code->io)); ret = request_irq(gpio_to_irq(code->io), nxp_key_irqhnd, (IRQF_SHARED | IRQ_TYPE_EDGE_BOTH), pdev->name, code); if (ret) { pr_err("fail, gpio[%d] %s request irq...\n", code->io, pdev->name); goto err_irq; } printk("~~~ %s() gpio:%d, irqno:%d, call disable_irq()\n", \ __func__, code->io, gpio_to_irq(code->io)); disable_irq(gpio_to_irq(code->io)); printk("~~~ %s() gpio:%d, irqno:%d, call enable_irq()\n", \ __func__, code->io, gpio_to_irq(code->io)); enable_irq(gpio_to_irq(code->io)); printk("~~~ %s() gpio:%d, irqno:%d, call disable_irq()\n", \ __func__, code->io, gpio_to_irq(code->io)); disable_irq(gpio_to_irq(code->io)); printk("~~~ %s() gpio:%d, irqno:%d, call enable_irq()\n", \ __func__, code->io, gpio_to_irq(code->io)); enable_irq(gpio_to_irq(code->io)); ... } ... static irqreturn_t nxp_key_irqhnd(int irqno, void *dev_id) { struct key_code *code = dev_id; printk("~~~ %s() irqno:%d\n", __func__, irqno); queue_delayed_work(code->kcode_wq, &code->kcode_work, DELAY_WORK_JIFFIES); return IRQ_HANDLED; } ...
log
[ 1.688000] ~~~ nxp_key_probe() gpio:63, irqno:169, call request_irq() [ 1.692000] gpio_set_type_irq: gpio irq = 169, GPIOB.31, type=0x3 [ 1.692000] reg=0xf001b00c, val=0x00000000 [ 1.692000] reg=0xf001b028, val=0x80000000 [ 1.692000] reg=0xf001b024, val=0x55550002 [ 1.692000] ~~~ __setup_irq() irq:169, desc->depth:1, call irq_startup() [ 1.692000] ~~~ irq_startup() irq:169, call irq_enable() [ 1.692000] gpio_irq_enable: gpio irq = 169, GPIOB.31 [ 1.692000] ~~~ gpio_irq_enable() gpio irq:169, GPIOB.31 [ 1.720000] ~~~ nxp_key_probe() gpio:63, irqno:169, call disable_irq() [ 1.724000] gpio_irq_disable: gpio irq = 169, GPIOB.31 [ 1.724000] ~~~ gpio_irq_disable() gpio irq:169, GPIOB.31 [ 1.736000] ~~~ nxp_key_probe() gpio:63, irqno:169, call enable_irq() [ 1.740000] ~~~ __enable_irq() do, irq:169, desc->depth:1 [ 1.740000] gpio_irq_enable: gpio irq = 169, GPIOB.31 [ 1.740000] ~~~ gpio_irq_enable() gpio irq:169, GPIOB.31 [ 1.740000] ~~~ __enable_irq() done, irq:169, desc->depth:0 [ 1.760000] ~~~ nxp_key_probe() gpio:63, irqno:169, call disable_irq() [ 1.764000] gpio_irq_disable: gpio irq = 169, GPIOB.31 [ 1.764000] ~~~ gpio_irq_disable() gpio irq:169, GPIOB.31 [ 1.776000] ~~~ nxp_key_probe() gpio:63, irqno:169, call enable_irq() [ 1.784000] ~~~ __enable_irq() do, irq:169, desc->depth:1 [ 1.784000] gpio_irq_enable: gpio irq = 169, GPIOB.31 [ 1.784000] ~~~ gpio_irq_enable() gpio irq:169, GPIOB.31 [ 1.784000] ~~~ __enable_irq() done, irq:169, desc->depth:0 [ 1.800000] [1] key io= 63, code= 352
- gpio_to_irq
/* arch/arm/include/asm/gpio.h */ #ifndef _ARCH_ARM_GPIO_H #define _ARCH_ARM_GPIO_H #if CONFIG_ARCH_NR_GPIO > 0 #define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO #endif /* not all ARM platforms necessarily support this API ... */ #include <mach/gpio.h> #ifndef __ARM_GPIOLIB_COMPLEX /* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ #include <asm-generic/gpio.h> /* The trivial gpiolib dispatchers */ #define gpio_get_value __gpio_get_value #define gpio_set_value __gpio_set_value #define gpio_cansleep __gpio_cansleep #endif /* * Provide a default gpio_to_irq() which should satisfy every case. * However, some platforms want to do this differently, so allow them * to override it. */ #ifndef gpio_to_irq #define gpio_to_irq __gpio_to_irq #endif #endif /* _ARCH_ARM_GPIO_H */
/* drivers/gpio/gpiolib.c */ /** * __gpio_to_irq() - return the IRQ corresponding to a GPIO * @gpio: gpio whose IRQ will be returned (already requested) * Context: any * * This is used directly or indirectly to implement gpio_to_irq(). * It returns the number of the IRQ signaled by this (input) GPIO, * or a negative errno. */ int __gpio_to_irq(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO; } EXPORT_SYMBOL_GPL(__gpio_to_irq);
/* drivers/gpio/gpio-nxp.c */ static int nxp_gpio_to_irq( struct gpio_chip *chip , unsigned offset ) { struct nxp_gpio *gpio = GET_GPIO(chip); unsigned int io = gpio->index * GPIO_NUM_PER_BANK + offset; /*printk("~~~ %s() offset:%d, io:%d, irq:%d\n", __func__, \ offset, io, io + IRQ_GPIO_START);*/ return (io + IRQ_GPIO_START); }
io number to irq number: fixed io number + IRQ_GPIO_START;
linux 3.4.39 s5p6818, irq number都是固定分配好的;reference:arch/arm/mach-s5p6818/include/mach/s5p6818_irq.h
- request_irq()
/* include/linux/interrupt.h */ static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { return request_threaded_irq(irq, handler, NULL, flags, name, dev); }
/* kernel/irq/manage.c */ /** * request_threaded_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts * If NULL and thread_fn != NULL the default * primary handler is installed * @thread_fn: Function called from the irq handler thread * If NULL, no irq thread is created * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. From the point this * call is made your handler function may be invoked. Since * your handler function must clear any interrupt the board * raises, you must take care both to initialise your hardware * and to set up the interrupt handler in the right order. * * If you want to set up a threaded irq handler for your device * then you need to supply @handler and @thread_fn. @handler is * still called in hard interrupt context and has to check * whether the interrupt originates from the device. If yes it * needs to disable the interrupt on the device and return * IRQ_WAKE_THREAD which will wake up the handler thread and run * @thread_fn. This split handler design is necessary to support * shared interrupts. * * Dev_id must be globally unique. Normally the address of the * device data structure is used as the cookie. Since the handler * receives this value it makes sense to use it. * * If your interrupt is shared you must pass a non NULL dev_id * as this is required when freeing the interrupt. * * Flags: * * IRQF_SHARED Interrupt is shared * IRQF_TRIGGER_* Specify active edge(s) or level * */ int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). */ if ((irqflags & IRQF_SHARED) && !dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc) return -EINVAL; if (!irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; if (!handler) { if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->thread_fn = thread_fn; action->flags = irqflags; action->name = devname; action->dev_id = dev_id; chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); if (retval) kfree(action); #ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... * We disable the irq to make sure that a 'real' IRQ doesn't * run in parallel with our fake. */ unsigned long flags; disable_irq(irq); local_irq_save(flags); handler(irq, dev_id); local_irq_restore(flags); enable_irq(irq); } #endif return retval; } EXPORT_SYMBOL(request_threaded_irq);
alloc struct irqaction *action;
struct irq_desc *desc = irq_to_desc(irq);
retval = __setup_irq(irq, desc, action); / * core ops */
- static int __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. */ static int __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) { struct irqaction *old, **old_ptr; const char *old_name = NULL; unsigned long flags, thread_mask = 0; int ret, nested, shared = 0; cpumask_var_t mask; if (!desc) return -EINVAL; if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; if (!try_module_get(desc->owner)) return -ENODEV; /* * Check whether the interrupt nests into another interrupt * thread. */ nested = irq_settings_is_nested_thread(desc); if (nested) { if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; } else { if (irq_settings_can_thread(desc)) irq_setup_forced_threading(new); } /* * Create a handler thread when a thread function is supplied * and the interrupt does not nest into another interrupt * thread. */ if (new->thread_fn && !nested) { struct task_struct *t; t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto out_mput; } /* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code * references an already freed task_struct. */ get_task_struct(t); new->thread = t; /* * Tell the thread to set its affinity. This is * important for shared interrupt handlers as we do * not invoke setup_affinity() for the secondary * handlers as everything is already set up. Even for * interrupts marked with IRQF_NO_BALANCE this is * correct as we want the thread to move to the cpu(s) * on which the requesting code placed the interrupt. */ set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { ret = -ENOMEM; goto out_thread; } /* * The following block of code has to be executed atomically */ raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { /* * Can't share interrupts unless both agree to and are * the same type (level, edge, polarity). So both flag * fields must have IRQF_SHARED set and the bits which * set the trigger type must match. Also all must * agree on ONESHOT. */ if (!((old->flags & new->flags) & IRQF_SHARED) || ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || ((old->flags ^ new->flags) & IRQF_ONESHOT)) { old_name = old->name; goto mismatch; } /* All handlers must agree on per-cpuness */ if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) goto mismatch; /* add new interrupt at end of irq queue */ do { /* * Or all existing action->thread_mask bits, * so we can find the next zero bit for this * new action. */ thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; } while (old); shared = 1; } /* * Setup the thread mask for this irqaction for ONESHOT. For * !ONESHOT irqs the thread mask is 0 so we can avoid a * conditional in irq_wake_thread(). */ if (new->flags & IRQF_ONESHOT) { /* * Unlikely to have 32 resp 64 irqs sharing one line, * but who knows. */ if (thread_mask == ~0UL) { ret = -EBUSY; goto out_mask; } /* * The thread_mask for the action is or'ed to * desc->thread_active to indicate that the * IRQF_ONESHOT thread handler has been woken, but not * yet finished. The bit is cleared when a thread * completes. When all threads of a shared interrupt * line have completed desc->threads_active becomes * zero and the interrupt line is unmasked. See * handle.c:irq_wake_thread() for further information. * * If no thread is woken by primary (hard irq context) * interrupt handlers, then desc->threads_active is * also checked for zero to unmask the irq line in the * affected hard irq flow handlers * (handle_[fasteoi|level]_irq). * * The new action gets the first zero bit of * thread_mask assigned. See the loop above which or's * all existing action->thread_mask bits. */ new->thread_mask = 1 << ffz(thread_mask); } if (!shared) { init_waitqueue_head(&desc->wait_for_threads); /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK); if (ret) goto out_mask; } desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ IRQS_ONESHOT | IRQS_WAITING); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); } if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; if (irq_settings_can_autoenable(desc)) { printk("~~~ %s() irq:%u, desc->depth:%d, call irq_startup()\n", \ __func__, irq, desc->depth); irq_startup(desc, true); /*printk("~~~ %s() irq:%u, desc->depth:%d\n", \ __func__, irq, desc->depth);*/ } else { /* Undo nested disables: */ desc->depth = 1; printk("~~~ %s() irq:%d, set desc->depth = 1\n", __func__, irq); } /* Exclude IRQ from balancing if requested */ if (new->flags & IRQF_NOBALANCING) { irq_settings_set_no_balancing(desc); irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } /* Set default affinity mask once everything is setup */ setup_affinity(irq, desc, mask); } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; unsigned int omsk = irq_settings_get_trigger_mask(desc); if (nmsk != omsk) /* hope the handler works with current trigger mode */ pr_warning("IRQ %d uses trigger mode %u; requested %u\n", irq, nmsk, omsk); } new->irq = irq; *old_ptr = new; /* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0; /* * Check whether we disabled the irq via the spurious handler * before. Reenable it and give it another chance. */ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; __enable_irq(desc, irq, false); } raw_spin_unlock_irqrestore(&desc->lock, flags); /* * Strictly no need to wake it up, but hung_task complains * when no hard interrupt wakes the thread up. */ if (new->thread) wake_up_process(new->thread); register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); free_cpumask_var(mask); return 0; mismatch: #ifdef CONFIG_DEBUG_SHIRQ if (!(new->flags & IRQF_PROBE_SHARED)) { printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); if (old_name) printk(KERN_ERR "current handler: %s\n", old_name); dump_stack(); } #endif ret = -EBUSY; out_mask: raw_spin_unlock_irqrestore(&desc->lock, flags); free_cpumask_var(mask); out_thread: if (new->thread) { struct task_struct *t = new->thread; new->thread = NULL; kthread_stop(t); put_task_struct(t); } out_mput: module_put(desc->owner); return ret; }
old_ptr = &desc->action; old = *old_ptr; if (old) { /* if irq is share interrupts, and had desc->action */ ... shared = 1; } here shared is 0; if (!shared) { /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK); | /* caller masked out all except trigger mode flags */ ret = chip->irq_set_type(&desc->irq_data, flags); } if (irq_settings_can_autoenable(desc)) { printk("~~~ %s() irq:%u, desc->depth:%d, call irq_startup()\n", \ __func__, irq, desc->depth); irq_startup(desc, true); | if (desc->irq_data.chip->irq_startup) { /* */ } else { printk("~~~ %s() irq:%u, call irq_enable()\n", \ __func__, desc->irq_data.irq); irq_enable(desc); /* kernel/irq/chip.c void irq_enable(struct irq_desc *desc) { irq_state_clr_disabled(desc); if (desc->irq_data.chip->irq_enable) desc->irq_data.chip->irq_enable(&desc->irq_data); else desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_state_clr_masked(desc); } */ } } } new->irq = irq; *old_ptr = new;
main operation:
__irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK); call chip->irq_set_type(&desc->irq_data, flags);
irq_startup(desc, true); call irq_enable(desc);
and
setup &desc->action; desc->action = new;
- void disable_irq(unsigned int irq)
/* kernel/irq/manage.c */ /** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { if (!__disable_irq_nosync(irq)) synchronize_irq(irq); } EXPORT_SYMBOL(disable_irq);
Enables and Disables are nested.
If you use this function while holding a resource the IRQ handler may need you will deadlock.
/* kernel/irq/manage.c */ static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; __disable_irq(desc, irq, false); irq_put_desc_busunlock(desc, flags); return 0; }
/* kernel/irq/manage.c */ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) { if (suspend) { if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) return; desc->istate |= IRQS_SUSPENDED; } /*printk("~~~ %s() irq:%d, desc->depth:%d\n", __func__, \ irq, desc->depth);*/ if (!desc->depth++) irq_disable(desc); }
/* kernel/irq/chip.c */ void irq_disable(struct irq_desc *desc) { irq_state_set_disabled(desc); if (desc->irq_data.chip->irq_disable) { desc->irq_data.chip->irq_disable(&desc->irq_data); irq_state_set_masked(desc); } }
/* kernel/irq/manage.c */ /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); bool inprogress; if (!desc) return; do { unsigned long flags; /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ raw_spin_lock_irqsave(&desc->lock, flags); inprogress = irqd_irq_inprogress(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); /* Oops, that failed? */ } while (inprogress); /* * We made sure that no hardirq handler is running. Now verify * that no threaded handlers are active. */ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } EXPORT_SYMBOL(synchronize_irq);
This function waits for any pending IRQ handlers for this interrupt to complete before returning. If you use this function while holding a resource the IRQ handler may need you will deadlock.
- void enable_irq(unsigned int irq)
/* kernel/irq/manage.c */ /** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. * * This function may be called from IRQ context only when * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return; if (WARN(!desc->irq_data.chip, KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) goto out; __enable_irq(desc, irq, false); out: irq_put_desc_busunlock(desc, flags); } EXPORT_SYMBOL(enable_irq);
/* kernel/irq/manage.c */ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) { if (resume) { if (!(desc->istate & IRQS_SUSPENDED)) { if (!desc->action) return; if (!(desc->action->flags & IRQF_FORCE_RESUME)) return; /* Pretend that it got disabled ! */ desc->depth++; } desc->istate &= ~IRQS_SUSPENDED; } printk("~~~ %s() do, irq:%d, desc->depth:%d\n", __func__, \ irq, desc->depth); switch (desc->depth) { case 0: err_out: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); break; case 1: { if (desc->istate & IRQS_SUSPENDED) goto err_out; /* Prevent probing on this irq: */ irq_settings_set_noprobe(desc); irq_enable(desc); check_irq_resend(desc, irq); /* fall-through */ } default: desc->depth--; } printk("~~~ %s() done, irq:%d, desc->depth:%d\n", __func__, \ irq, desc->depth); }
/* kernel/irq/chip.c */ void irq_enable(struct irq_desc *desc) { irq_state_clr_disabled(desc); if (desc->irq_data.chip->irq_enable) desc->irq_data.chip->irq_enable(&desc->irq_data); else desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_state_clr_masked(desc); }
disable_irq()中,desc->depth++,只有desc->depth值原来为0,++变为1的这次会去call desc->irq_data.chip->irq_disable(&desc->irq_data);
enable_irq()中,desc->depth--,只有desc->depth值原来为1,--变为0的这次会去call
desc->irq_data.chip->irq_enable(&desc->irq_data); or desc->irq_data.chip->irq_unmask(&desc->irq_data);
- void disable_irq_nosync(unsigned int irq)
/* kernel/irq/manage.c */ static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; __disable_irq(desc, irq, false); irq_put_desc_busunlock(desc, flags); return 0; } /** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { __disable_irq_nosync(irq); } EXPORT_SYMBOL(disable_irq_nosync);
Unlike disable_irq(), this function does not ensure existing instances of the IRQ handler have completed before returning.
This function may be called from IRQ context.
这篇关于linux arm irq (4): interrupt driver interface的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!
- 2024-12-18git仓库有更新,jenkins 自动触发拉代码怎么配置的?-icode9专业技术文章分享
- 2024-12-18Jenkins webhook 方式怎么配置指定的分支?-icode9专业技术文章分享
- 2024-12-13Linux C++项目实战入门教程
- 2024-12-13Linux C++编程项目实战入门教程
- 2024-12-11Linux部署Scrapy教程:新手入门指南
- 2024-12-11怎么将在本地创建的 Maven 仓库迁移到 Linux 服务器上?-icode9专业技术文章分享
- 2024-12-10Linux常用命令
- 2024-12-06谁看谁服! Linux 创始人对于进程和线程的理解是…
- 2024-12-04操作系统教程:新手入门及初级技巧详解
- 2024-12-04操作系统入门:新手必学指南