Lines Matching +full:on +full:- +full:chip

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 * This file contains the core interrupt handling code, for irq-chip based
8 * Documentation/core-api/genericirq.rst
29 * Chained handlers should never call action on their IRQ. This default
37 * irq_set_chip - set the irq chip for an irq
39 * @chip: pointer to irq chip description structure
41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) in irq_set_chip() argument
47 return -EINVAL; in irq_set_chip()
49 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); in irq_set_chip()
61 * irq_set_irq_type - set the irq trigger type for an irq
63 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
72 return -EINVAL; in irq_set_irq_type()
81 * irq_set_handler_data - set irq handler data for an irq
93 return -EINVAL; in irq_set_handler_data()
94 desc->irq_common_data.handler_data = data; in irq_set_handler_data()
101 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
115 return -EINVAL; in irq_set_msi_desc_off()
116 desc->irq_common_data.msi_desc = entry; in irq_set_msi_desc_off()
118 entry->irq = irq_base; in irq_set_msi_desc_off()
124 * irq_set_msi_desc - set MSI descriptor data for an irq
136 * irq_set_chip_data - set irq chip data for an irq
138 * @data: Pointer to chip specific data
140 * Set the hardware irq chip data for an irq
148 return -EINVAL; in irq_set_chip_data()
149 desc->irq_data.chip_data = data; in irq_set_chip_data()
159 return desc ? &desc->irq_data : NULL; in irq_get_irq_data()
165 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_clr_disabled()
170 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_clr_masked()
175 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_clr_started()
180 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_set_started()
203 * Catch code which fiddles with enable_irq() on a managed in __irq_startup_managed()
205 * installment or irq auto probing should not happen on in __irq_startup_managed()
243 if (d->chip->irq_startup) { in __irq_startup()
244 ret = d->chip->irq_startup(d); in __irq_startup()
260 desc->depth = 0; in irq_startup()
267 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) in irq_startup()
270 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) in irq_startup()
308 if (irqd_is_started(&desc->irq_data)) { in irq_shutdown()
310 desc->depth = 1; in irq_shutdown()
311 if (desc->irq_data.chip->irq_shutdown) { in irq_shutdown()
312 desc->irq_data.chip->irq_shutdown(&desc->irq_data); in irq_shutdown()
332 irq_domain_deactivate_irq(&desc->irq_data); in irq_shutdown_and_deactivate()
337 if (!irqd_irq_disabled(&desc->irq_data)) { in irq_enable()
341 if (desc->irq_data.chip->irq_enable) { in irq_enable()
342 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_enable()
352 if (irqd_irq_disabled(&desc->irq_data)) { in __irq_disable()
357 if (desc->irq_data.chip->irq_disable) { in __irq_disable()
358 desc->irq_data.chip->irq_disable(&desc->irq_data); in __irq_disable()
367 * irq_disable - Mark interrupt disabled
370 * If the chip does not implement the irq_disable callback, we
379 * If the interrupt chip does not implement the irq_disable callback,
393 if (desc->irq_data.chip->irq_enable) in irq_percpu_enable()
394 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_percpu_enable()
396 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_percpu_enable()
397 cpumask_set_cpu(cpu, desc->percpu_enabled); in irq_percpu_enable()
402 if (desc->irq_data.chip->irq_disable) in irq_percpu_disable()
403 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_percpu_disable()
405 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_percpu_disable()
406 cpumask_clear_cpu(cpu, desc->percpu_enabled); in irq_percpu_disable()
411 if (desc->irq_data.chip->irq_mask_ack) { in mask_ack_irq()
412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); in mask_ack_irq()
416 if (desc->irq_data.chip->irq_ack) in mask_ack_irq()
417 desc->irq_data.chip->irq_ack(&desc->irq_data); in mask_ack_irq()
423 if (irqd_irq_masked(&desc->irq_data)) in mask_irq()
426 if (desc->irq_data.chip->irq_mask) { in mask_irq()
427 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_irq()
434 if (!irqd_irq_masked(&desc->irq_data)) in unmask_irq()
437 if (desc->irq_data.chip->irq_unmask) { in unmask_irq()
438 desc->irq_data.chip->irq_unmask(&desc->irq_data); in unmask_irq()
445 struct irq_chip *chip = desc->irq_data.chip; in unmask_threaded_irq() local
447 if (chip->flags & IRQCHIP_EOI_THREADED) in unmask_threaded_irq()
448 chip->irq_eoi(&desc->irq_data); in unmask_threaded_irq()
454 * handle_nested_irq - Handle a nested irq from a irq thread
469 raw_spin_lock_irq(&desc->lock); in handle_nested_irq()
471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_nested_irq()
473 action = desc->action; in handle_nested_irq()
474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { in handle_nested_irq()
475 desc->istate |= IRQS_PENDING; in handle_nested_irq()
476 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
481 atomic_inc(&desc->threads_active); in handle_nested_irq()
482 raw_spin_unlock_irq(&desc->lock); in handle_nested_irq()
486 action_ret |= action->thread_fn(action->irq, action->dev_id); in handle_nested_irq()
497 if (!(desc->istate & IRQS_POLL_INPROGRESS)) in irq_check_poll()
510 if (!irqd_has_set(&desc->irq_data, mask)) in irq_may_run()
522 * Handle a potential concurrent poll on a different core. in irq_may_run()
528 * handle_simple_irq - Simple and software-decoded IRQs.
540 raw_spin_lock(&desc->lock); in handle_simple_irq()
545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_simple_irq()
547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_simple_irq()
548 desc->istate |= IRQS_PENDING; in handle_simple_irq()
556 raw_spin_unlock(&desc->lock); in handle_simple_irq()
561 * handle_untracked_irq - Simple and software-decoded IRQs.
575 raw_spin_lock(&desc->lock); in handle_untracked_irq()
580 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_untracked_irq()
582 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_untracked_irq()
583 desc->istate |= IRQS_PENDING; in handle_untracked_irq()
587 desc->istate &= ~IRQS_PENDING; in handle_untracked_irq()
588 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
589 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
593 raw_spin_lock(&desc->lock); in handle_untracked_irq()
594 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
597 raw_spin_unlock(&desc->lock); in handle_untracked_irq()
609 * - Standard level irq (IRQF_ONESHOT is not set) in cond_unmask_irq()
610 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_irq()
614 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_irq()
615 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) in cond_unmask_irq()
620 * handle_level_irq - Level type irq handler
630 raw_spin_lock(&desc->lock); in handle_level_irq()
636 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_level_irq()
642 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_level_irq()
643 desc->istate |= IRQS_PENDING; in handle_level_irq()
653 raw_spin_unlock(&desc->lock); in handle_level_irq()
657 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) in cond_unmask_eoi_irq() argument
659 if (!(desc->istate & IRQS_ONESHOT)) { in cond_unmask_eoi_irq()
660 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
665 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_eoi_irq()
669 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_eoi_irq()
670 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { in cond_unmask_eoi_irq()
671 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
673 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { in cond_unmask_eoi_irq()
674 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
679 * handle_fasteoi_irq - irq handler for transparent controllers
682 * Only a single callback will be issued to the chip: an ->eoi()
689 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_irq() local
691 raw_spin_lock(&desc->lock); in handle_fasteoi_irq()
695 * can arrive on the new CPU before the original CPU has completed in handle_fasteoi_irq()
696 * handling the previous one - it may need to be resent. in handle_fasteoi_irq()
699 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) in handle_fasteoi_irq()
700 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
704 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_irq()
710 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_irq()
711 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
717 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_irq()
722 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_irq()
727 if (unlikely(desc->istate & IRQS_PENDING)) in handle_fasteoi_irq()
730 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
733 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_irq()
734 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_irq()
735 raw_spin_unlock(&desc->lock); in handle_fasteoi_irq()
740 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
743 * A simple NMI-safe handler, considering the restrictions
746 * Only a single callback will be issued to the chip: an ->eoi()
753 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_fasteoi_nmi() local
754 struct irqaction *action = desc->action; in handle_fasteoi_nmi()
764 res = action->handler(irq, action->dev_id); in handle_fasteoi_nmi()
767 if (chip->irq_eoi) in handle_fasteoi_nmi()
768 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_nmi()
773 * handle_edge_irq - edge type IRQ handler
776 * Interrupt occurs on the falling and/or rising edge of a hardware
779 * interrupt can happen on the same source even before the first one
781 * might be necessary to disable (mask) the interrupt depending on the
789 raw_spin_lock(&desc->lock); in handle_edge_irq()
791 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_irq()
794 desc->istate |= IRQS_PENDING; in handle_edge_irq()
803 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_irq()
804 desc->istate |= IRQS_PENDING; in handle_edge_irq()
812 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_edge_irq()
815 if (unlikely(!desc->action)) { in handle_edge_irq()
825 if (unlikely(desc->istate & IRQS_PENDING)) { in handle_edge_irq()
826 if (!irqd_irq_disabled(&desc->irq_data) && in handle_edge_irq()
827 irqd_irq_masked(&desc->irq_data)) in handle_edge_irq()
833 } while ((desc->istate & IRQS_PENDING) && in handle_edge_irq()
834 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_irq()
837 raw_spin_unlock(&desc->lock); in handle_edge_irq()
843 * handle_edge_eoi_irq - edge eoi type IRQ handler
851 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_edge_eoi_irq() local
853 raw_spin_lock(&desc->lock); in handle_edge_eoi_irq()
855 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_edge_eoi_irq()
858 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
866 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { in handle_edge_eoi_irq()
867 desc->istate |= IRQS_PENDING; in handle_edge_eoi_irq()
874 if (unlikely(!desc->action)) in handle_edge_eoi_irq()
879 } while ((desc->istate & IRQS_PENDING) && in handle_edge_eoi_irq()
880 !irqd_irq_disabled(&desc->irq_data)); in handle_edge_eoi_irq()
883 chip->irq_eoi(&desc->irq_data); in handle_edge_eoi_irq()
884 raw_spin_unlock(&desc->lock); in handle_edge_eoi_irq()
889 * handle_percpu_irq - Per CPU local irq handler
892 * Per CPU interrupts on SMP machines without locking requirements
896 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_irq() local
900 * desc->tot_count. in handle_percpu_irq()
904 if (chip->irq_ack) in handle_percpu_irq()
905 chip->irq_ack(&desc->irq_data); in handle_percpu_irq()
909 if (chip->irq_eoi) in handle_percpu_irq()
910 chip->irq_eoi(&desc->irq_data); in handle_percpu_irq()
914 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
917 * Per CPU interrupts on SMP machines without locking requirements. Same as
920 * action->percpu_dev_id is a pointer to percpu variables which
921 * contain the real device id for the cpu on which this handler is
926 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_irq() local
927 struct irqaction *action = desc->action; in handle_percpu_devid_irq()
933 * desc->tot_count. in handle_percpu_devid_irq()
937 if (chip->irq_ack) in handle_percpu_devid_irq()
938 chip->irq_ack(&desc->irq_data); in handle_percpu_devid_irq()
942 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_irq()
946 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in handle_percpu_devid_irq()
951 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", in handle_percpu_devid_irq()
955 if (chip->irq_eoi) in handle_percpu_devid_irq()
956 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_irq()
960 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
969 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_fasteoi_nmi() local
970 struct irqaction *action = desc->action; in handle_percpu_devid_fasteoi_nmi()
977 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_fasteoi_nmi()
980 if (chip->irq_eoi) in handle_percpu_devid_fasteoi_nmi()
981 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_fasteoi_nmi()
991 struct irq_data *irq_data = &desc->irq_data; in __irq_do_set_handler()
995 * situation where the outermost chip is not yet set in __irq_do_set_handler()
1001 if (irq_data->chip != &no_irq_chip) in __irq_do_set_handler()
1004 * Bail out if the outer chip is not set up in __irq_do_set_handler()
1011 irq_data = irq_data->parent_data; in __irq_do_set_handler()
1014 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) in __irq_do_set_handler()
1020 if (desc->irq_data.chip != &no_irq_chip) in __irq_do_set_handler()
1024 desc->action = NULL; in __irq_do_set_handler()
1027 desc->depth = 1; in __irq_do_set_handler()
1029 desc->handle_irq = handle; in __irq_do_set_handler()
1030 desc->name = name; in __irq_do_set_handler()
1033 unsigned int type = irqd_get_trigger_type(&desc->irq_data); in __irq_do_set_handler()
1045 desc->handle_irq = handle; in __irq_do_set_handler()
1051 desc->action = &chained_action; in __irq_do_set_handler()
1082 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data()
1090 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, in irq_set_chip_and_handler_name() argument
1093 irq_set_chip(irq, chip); in irq_set_chip_and_handler_name()
1107 * Warn when a driver sets the no autoenable flag on an already in irq_modify_status()
1110 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); in irq_modify_status()
1114 trigger = irqd_get_trigger_type(&desc->irq_data); in irq_modify_status()
1116 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | in irq_modify_status()
1119 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in irq_modify_status()
1121 irqd_set(&desc->irq_data, IRQD_PER_CPU); in irq_modify_status()
1123 irqd_set(&desc->irq_data, IRQD_LEVEL); in irq_modify_status()
1129 irqd_set(&desc->irq_data, trigger); in irq_modify_status()
1137 * irq_cpu_online - Invoke all irq_cpu_online functions.
1139 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1145 struct irq_chip *chip; in irq_cpu_online() local
1154 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_online()
1156 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_online()
1157 if (chip && chip->irq_cpu_online && in irq_cpu_online()
1158 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_online()
1159 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_online()
1160 chip->irq_cpu_online(&desc->irq_data); in irq_cpu_online()
1162 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_online()
1167 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1169 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1175 struct irq_chip *chip; in irq_cpu_offline() local
1184 raw_spin_lock_irqsave(&desc->lock, flags); in irq_cpu_offline()
1186 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_offline()
1187 if (chip && chip->irq_cpu_offline && in irq_cpu_offline()
1188 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_offline()
1189 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_offline()
1190 chip->irq_cpu_offline(&desc->irq_data); in irq_cpu_offline()
1192 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_cpu_offline()
1201 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1202 * stacked on transparent controllers
1207 * the irq_chip also needs to have its ->irq_ack() function
1212 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_ack_irq() local
1214 raw_spin_lock(&desc->lock); in handle_fasteoi_ack_irq()
1219 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_ack_irq()
1225 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_ack_irq()
1226 desc->istate |= IRQS_PENDING; in handle_fasteoi_ack_irq()
1232 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_ack_irq()
1236 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_fasteoi_ack_irq()
1240 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_ack_irq()
1242 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1245 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_ack_irq()
1246 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_ack_irq()
1247 raw_spin_unlock(&desc->lock); in handle_fasteoi_ack_irq()
1252 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1253 * stacked on transparent controllers
1258 * the irq_chip also needs to have its ->irq_mask_ack() function
1263 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_mask_irq() local
1265 raw_spin_lock(&desc->lock); in handle_fasteoi_mask_irq()
1271 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in handle_fasteoi_mask_irq()
1277 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in handle_fasteoi_mask_irq()
1278 desc->istate |= IRQS_PENDING; in handle_fasteoi_mask_irq()
1284 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_mask_irq()
1289 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_mask_irq()
1291 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()
1294 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in handle_fasteoi_mask_irq()
1295 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_mask_irq()
1296 raw_spin_unlock(&desc->lock); in handle_fasteoi_mask_irq()
1303 * irq_chip_set_parent_state - set the state of a parent interrupt.
1315 data = data->parent_data; in irq_chip_set_parent_state()
1317 if (!data || !data->chip->irq_set_irqchip_state) in irq_chip_set_parent_state()
1320 return data->chip->irq_set_irqchip_state(data, which, val); in irq_chip_set_parent_state()
1325 * irq_chip_get_parent_state - get the state of a parent interrupt.
1337 data = data->parent_data; in irq_chip_get_parent_state()
1339 if (!data || !data->chip->irq_get_irqchip_state) in irq_chip_get_parent_state()
1342 return data->chip->irq_get_irqchip_state(data, which, state); in irq_chip_get_parent_state()
1347 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1353 data = data->parent_data; in irq_chip_enable_parent()
1354 if (data->chip->irq_enable) in irq_chip_enable_parent()
1355 data->chip->irq_enable(data); in irq_chip_enable_parent()
1357 data->chip->irq_unmask(data); in irq_chip_enable_parent()
1362 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1368 data = data->parent_data; in irq_chip_disable_parent()
1369 if (data->chip->irq_disable) in irq_chip_disable_parent()
1370 data->chip->irq_disable(data); in irq_chip_disable_parent()
1372 data->chip->irq_mask(data); in irq_chip_disable_parent()
1377 * irq_chip_ack_parent - Acknowledge the parent interrupt
1382 data = data->parent_data; in irq_chip_ack_parent()
1383 data->chip->irq_ack(data); in irq_chip_ack_parent()
1388 * irq_chip_mask_parent - Mask the parent interrupt
1393 data = data->parent_data; in irq_chip_mask_parent()
1394 data->chip->irq_mask(data); in irq_chip_mask_parent()
1399 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1404 data = data->parent_data; in irq_chip_mask_ack_parent()
1405 data->chip->irq_mask_ack(data); in irq_chip_mask_ack_parent()
1410 * irq_chip_unmask_parent - Unmask the parent interrupt
1415 data = data->parent_data; in irq_chip_unmask_parent()
1416 data->chip->irq_unmask(data); in irq_chip_unmask_parent()
1421 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1426 data = data->parent_data; in irq_chip_eoi_parent()
1427 data->chip->irq_eoi(data); in irq_chip_eoi_parent()
1432 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1437 * Conditional, as the underlying parent chip might not implement it.
1442 data = data->parent_data; in irq_chip_set_affinity_parent()
1443 if (data->chip->irq_set_affinity) in irq_chip_set_affinity_parent()
1444 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
1446 return -ENOSYS; in irq_chip_set_affinity_parent()
1451 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1453 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1455 * Conditional, as the underlying parent chip might not implement it.
1459 data = data->parent_data; in irq_chip_set_type_parent()
1461 if (data->chip->irq_set_type) in irq_chip_set_type_parent()
1462 return data->chip->irq_set_type(data, type); in irq_chip_set_type_parent()
1464 return -ENOSYS; in irq_chip_set_type_parent()
1469 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1477 for (data = data->parent_data; data; data = data->parent_data) in irq_chip_retrigger_hierarchy()
1478 if (data->chip && data->chip->irq_retrigger) in irq_chip_retrigger_hierarchy()
1479 return data->chip->irq_retrigger(data); in irq_chip_retrigger_hierarchy()
1486 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1492 data = data->parent_data; in irq_chip_set_vcpu_affinity_parent()
1493 if (data->chip->irq_set_vcpu_affinity) in irq_chip_set_vcpu_affinity_parent()
1494 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); in irq_chip_set_vcpu_affinity_parent()
1496 return -ENOSYS; in irq_chip_set_vcpu_affinity_parent()
1500 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1502 * @on: Whether to set or reset the wake-up capability of this irq
1504 * Conditional, as the underlying parent chip might not implement it.
1506 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) in irq_chip_set_wake_parent() argument
1508 data = data->parent_data; in irq_chip_set_wake_parent()
1510 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) in irq_chip_set_wake_parent()
1513 if (data->chip->irq_set_wake) in irq_chip_set_wake_parent()
1514 return data->chip->irq_set_wake(data, on); in irq_chip_set_wake_parent()
1516 return -ENOSYS; in irq_chip_set_wake_parent()
1521 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1526 data = data->parent_data; in irq_chip_request_resources_parent()
1528 if (data->chip->irq_request_resources) in irq_chip_request_resources_parent()
1529 return data->chip->irq_request_resources(data); in irq_chip_request_resources_parent()
1531 /* no error on missing optional irq_chip::irq_request_resources */ in irq_chip_request_resources_parent()
1537 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1542 data = data->parent_data; in irq_chip_release_resources_parent()
1543 if (data->chip->irq_release_resources) in irq_chip_release_resources_parent()
1544 data->chip->irq_release_resources(data); in irq_chip_release_resources_parent()
1550 * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1554 * For hierarchical domains we find the first chip in the hierarchy
1556 * hierarchical we use the top level chip.
1563 if (data->chip && data->chip->irq_compose_msi_msg) in irq_chip_compose_msi_msg()
1568 return -ENOSYS; in irq_chip_compose_msi_msg()
1570 pos->chip->irq_compose_msi_msg(pos, msg); in irq_chip_compose_msi_msg()
1576 if (data->domain) in irq_get_pm_device()
1577 return data->domain->pm_dev; in irq_get_pm_device()
1583 * irq_chip_pm_get - Enable power for an IRQ chip
1586 * Enable the power to the IRQ chip referenced by the interrupt data
1601 * irq_chip_pm_put - Disable power for an IRQ chip
1604 * Disable the power to the IRQ chip referenced by the interrupt data