4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
32 #include <asm/ptrace.h>
35 #include <asm/sync_bitops.h>
36 #include <asm/xen/hypercall.h>
37 #include <asm/xen/hypervisor.h>
39 #include <xen/xen-ops.h>
40 #include <xen/events.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
45 * This lock protects updates to the following mapping and reference-count
46 * arrays. The lock does not need to be acquired to read the mapping tables.
48 static DEFINE_SPINLOCK(irq_mapping_update_lock);
50 /* IRQ <-> VIRQ mapping. */
51 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
53 /* IRQ <-> IPI mapping */
54 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
56 /* Interrupt types. */
66 * Packed IRQ information:
67 * type - enum xen_irq_type
68 * event channel - irq->event channel mapping
69 * cpu - cpu this event channel is bound to
70 * index - type-specific information:
71 * PIRQ - vector, with MSB being "needs EIO"
78 enum xen_irq_type type; /* type */
79 unsigned short evtchn; /* event channel */
80 unsigned short cpu; /* cpu bound */
87 unsigned short vector;
92 static struct irq_info irq_info[NR_IRQS];
94 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
95 [0 ... NR_EVENT_CHANNELS-1] = -1
98 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
100 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
101 static inline unsigned long *cpu_evtchn_mask(int cpu)
103 return cpu_evtchn_mask_p[cpu].bits;
106 /* Xen will never allocate port zero for any purpose. */
107 #define VALID_EVTCHN(chn) ((chn) != 0)
109 static struct irq_chip xen_dynamic_chip;
110 static struct irq_chip xen_percpu_chip;
112 /* Constructor for packed IRQ information. */
113 static struct irq_info mk_unbound_info(void)
115 return (struct irq_info) { .type = IRQT_UNBOUND };
118 static struct irq_info mk_evtchn_info(unsigned short evtchn)
120 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
124 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
126 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
127 .cpu = 0, .u.ipi = ipi };
130 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
132 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
133 .cpu = 0, .u.virq = virq };
136 static struct irq_info mk_pirq_info(unsigned short evtchn,
137 unsigned short gsi, unsigned short vector)
139 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
140 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
144 * Accessors for packed IRQ information.
146 static struct irq_info *info_for_irq(unsigned irq)
148 return &irq_info[irq];
151 static unsigned int evtchn_from_irq(unsigned irq)
153 return info_for_irq(irq)->evtchn;
156 unsigned irq_from_evtchn(unsigned int evtchn)
158 return evtchn_to_irq[evtchn];
160 EXPORT_SYMBOL_GPL(irq_from_evtchn);
162 static enum ipi_vector ipi_from_irq(unsigned irq)
164 struct irq_info *info = info_for_irq(irq);
166 BUG_ON(info == NULL);
167 BUG_ON(info->type != IRQT_IPI);
172 static unsigned virq_from_irq(unsigned irq)
174 struct irq_info *info = info_for_irq(irq);
176 BUG_ON(info == NULL);
177 BUG_ON(info->type != IRQT_VIRQ);
182 static unsigned gsi_from_irq(unsigned irq)
184 struct irq_info *info = info_for_irq(irq);
186 BUG_ON(info == NULL);
187 BUG_ON(info->type != IRQT_PIRQ);
189 return info->u.pirq.gsi;
192 static unsigned vector_from_irq(unsigned irq)
194 struct irq_info *info = info_for_irq(irq);
196 BUG_ON(info == NULL);
197 BUG_ON(info->type != IRQT_PIRQ);
199 return info->u.pirq.vector;
202 static enum xen_irq_type type_from_irq(unsigned irq)
204 return info_for_irq(irq)->type;
207 static unsigned cpu_from_irq(unsigned irq)
209 return info_for_irq(irq)->cpu;
212 static unsigned int cpu_from_evtchn(unsigned int evtchn)
214 int irq = evtchn_to_irq[evtchn];
218 ret = cpu_from_irq(irq);
223 static inline unsigned long active_evtchns(unsigned int cpu,
224 struct shared_info *sh,
227 return (sh->evtchn_pending[idx] &
228 cpu_evtchn_mask(cpu)[idx] &
229 ~sh->evtchn_mask[idx]);
232 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
234 int irq = evtchn_to_irq[chn];
238 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
241 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
242 __set_bit(chn, cpu_evtchn_mask(cpu));
244 irq_info[irq].cpu = cpu;
247 static void init_evtchn_cpu_bindings(void)
250 struct irq_desc *desc;
253 /* By default all event channels notify CPU#0. */
254 for_each_irq_desc(i, desc) {
255 cpumask_copy(desc->affinity, cpumask_of(0));
259 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
262 static inline void clear_evtchn(int port)
264 struct shared_info *s = HYPERVISOR_shared_info;
265 sync_clear_bit(port, &s->evtchn_pending[0]);
268 static inline void set_evtchn(int port)
270 struct shared_info *s = HYPERVISOR_shared_info;
271 sync_set_bit(port, &s->evtchn_pending[0]);
274 static inline int test_evtchn(int port)
276 struct shared_info *s = HYPERVISOR_shared_info;
277 return sync_test_bit(port, &s->evtchn_pending[0]);
282 * notify_remote_via_irq - send event to remote end of event channel via irq
283 * @irq: irq of event channel to send event to
285 * Unlike notify_remote_via_evtchn(), this is safe to use across
286 * save/restore. Notifications on a broken connection are silently
289 void notify_remote_via_irq(int irq)
291 int evtchn = evtchn_from_irq(irq);
293 if (VALID_EVTCHN(evtchn))
294 notify_remote_via_evtchn(evtchn);
296 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
298 static void mask_evtchn(int port)
300 struct shared_info *s = HYPERVISOR_shared_info;
301 sync_set_bit(port, &s->evtchn_mask[0]);
304 static void unmask_evtchn(int port)
306 struct shared_info *s = HYPERVISOR_shared_info;
307 unsigned int cpu = get_cpu();
309 BUG_ON(!irqs_disabled());
311 /* Slow path (hypercall) if this is a non-local port. */
312 if (unlikely(cpu != cpu_from_evtchn(port))) {
313 struct evtchn_unmask unmask = { .port = port };
314 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
316 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
318 sync_clear_bit(port, &s->evtchn_mask[0]);
321 * The following is basically the equivalent of
322 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
323 * the interrupt edge' if the channel is masked.
325 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
326 !sync_test_and_set_bit(port / BITS_PER_LONG,
327 &vcpu_info->evtchn_pending_sel))
328 vcpu_info->evtchn_upcall_pending = 1;
334 static int find_unbound_irq(void)
337 struct irq_desc *desc;
339 for (irq = 0; irq < nr_irqs; irq++)
340 if (irq_info[irq].type == IRQT_UNBOUND)
344 panic("No available IRQ to bind to: increase nr_irqs!\n");
346 desc = irq_to_desc_alloc_node(irq, 0);
347 if (WARN_ON(desc == NULL))
350 dynamic_irq_init(irq);
355 int bind_evtchn_to_irq(unsigned int evtchn)
359 spin_lock(&irq_mapping_update_lock);
361 irq = evtchn_to_irq[evtchn];
364 irq = find_unbound_irq();
366 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
367 handle_edge_irq, "event");
369 evtchn_to_irq[evtchn] = irq;
370 irq_info[irq] = mk_evtchn_info(evtchn);
373 spin_unlock(&irq_mapping_update_lock);
377 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
379 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
381 struct evtchn_bind_ipi bind_ipi;
384 spin_lock(&irq_mapping_update_lock);
386 irq = per_cpu(ipi_to_irq, cpu)[ipi];
389 irq = find_unbound_irq();
393 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
394 handle_percpu_irq, "ipi");
397 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
400 evtchn = bind_ipi.port;
402 evtchn_to_irq[evtchn] = irq;
403 irq_info[irq] = mk_ipi_info(evtchn, ipi);
404 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
406 bind_evtchn_to_cpu(evtchn, cpu);
410 spin_unlock(&irq_mapping_update_lock);
415 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
417 struct evtchn_bind_virq bind_virq;
420 spin_lock(&irq_mapping_update_lock);
422 irq = per_cpu(virq_to_irq, cpu)[virq];
425 bind_virq.virq = virq;
426 bind_virq.vcpu = cpu;
427 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
430 evtchn = bind_virq.port;
432 irq = find_unbound_irq();
434 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
435 handle_percpu_irq, "virq");
437 evtchn_to_irq[evtchn] = irq;
438 irq_info[irq] = mk_virq_info(evtchn, virq);
440 per_cpu(virq_to_irq, cpu)[virq] = irq;
442 bind_evtchn_to_cpu(evtchn, cpu);
445 spin_unlock(&irq_mapping_update_lock);
450 static void unbind_from_irq(unsigned int irq)
452 struct evtchn_close close;
453 int evtchn = evtchn_from_irq(irq);
455 spin_lock(&irq_mapping_update_lock);
457 if (VALID_EVTCHN(evtchn)) {
459 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
462 switch (type_from_irq(irq)) {
464 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
465 [virq_from_irq(irq)] = -1;
468 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
469 [ipi_from_irq(irq)] = -1;
475 /* Closed ports are implicitly re-bound to VCPU0. */
476 bind_evtchn_to_cpu(evtchn, 0);
478 evtchn_to_irq[evtchn] = -1;
481 if (irq_info[irq].type != IRQT_UNBOUND) {
482 irq_info[irq] = mk_unbound_info();
484 dynamic_irq_cleanup(irq);
487 spin_unlock(&irq_mapping_update_lock);
490 int bind_evtchn_to_irqhandler(unsigned int evtchn,
491 irq_handler_t handler,
492 unsigned long irqflags,
493 const char *devname, void *dev_id)
498 irq = bind_evtchn_to_irq(evtchn);
499 retval = request_irq(irq, handler, irqflags, devname, dev_id);
501 unbind_from_irq(irq);
507 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
509 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
510 irq_handler_t handler,
511 unsigned long irqflags, const char *devname, void *dev_id)
516 irq = bind_virq_to_irq(virq, cpu);
517 retval = request_irq(irq, handler, irqflags, devname, dev_id);
519 unbind_from_irq(irq);
525 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
527 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
529 irq_handler_t handler,
530 unsigned long irqflags,
536 irq = bind_ipi_to_irq(ipi, cpu);
540 irqflags |= IRQF_NO_SUSPEND;
541 retval = request_irq(irq, handler, irqflags, devname, dev_id);
543 unbind_from_irq(irq);
550 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
552 free_irq(irq, dev_id);
553 unbind_from_irq(irq);
555 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
557 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
559 int irq = per_cpu(ipi_to_irq, cpu)[vector];
561 notify_remote_via_irq(irq);
564 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
566 struct shared_info *sh = HYPERVISOR_shared_info;
567 int cpu = smp_processor_id();
570 static DEFINE_SPINLOCK(debug_lock);
572 spin_lock_irqsave(&debug_lock, flags);
574 printk("vcpu %d\n ", cpu);
576 for_each_online_cpu(i) {
577 struct vcpu_info *v = per_cpu(xen_vcpu, i);
578 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
579 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
580 v->evtchn_upcall_pending,
581 v->evtchn_pending_sel);
583 printk("pending:\n ");
584 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
585 printk("%08lx%s", sh->evtchn_pending[i],
586 i % 8 == 0 ? "\n " : " ");
587 printk("\nmasks:\n ");
588 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
589 printk("%08lx%s", sh->evtchn_mask[i],
590 i % 8 == 0 ? "\n " : " ");
592 printk("\nunmasked:\n ");
593 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
594 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
595 i % 8 == 0 ? "\n " : " ");
597 printk("\npending list:\n");
598 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
599 if (sync_test_bit(i, sh->evtchn_pending)) {
600 printk(" %d: event %d -> irq %d\n",
601 cpu_from_evtchn(i), i,
606 spin_unlock_irqrestore(&debug_lock, flags);
611 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
614 * Search the CPUs pending events bitmasks. For each one found, map
615 * the event number to an irq, and feed it into do_IRQ() for
618 * Xen uses a two-level bitmap to speed searching. The first level is
619 * a bitset of words which contain pending event bits. The second
620 * level is a bitset of pending events themselves.
622 void xen_evtchn_do_upcall(struct pt_regs *regs)
625 struct pt_regs *old_regs = set_irq_regs(regs);
626 struct shared_info *s = HYPERVISOR_shared_info;
627 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
634 unsigned long pending_words;
636 vcpu_info->evtchn_upcall_pending = 0;
638 if (__get_cpu_var(xed_nesting_count)++)
641 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
642 /* Clear master flag /before/ clearing selector flag. */
645 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
646 while (pending_words != 0) {
647 unsigned long pending_bits;
648 int word_idx = __ffs(pending_words);
649 pending_words &= ~(1UL << word_idx);
651 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
652 int bit_idx = __ffs(pending_bits);
653 int port = (word_idx * BITS_PER_LONG) + bit_idx;
654 int irq = evtchn_to_irq[port];
655 struct irq_desc *desc;
658 desc = irq_to_desc(irq);
660 generic_handle_irq_desc(irq, desc);
665 BUG_ON(!irqs_disabled());
667 count = __get_cpu_var(xed_nesting_count);
668 __get_cpu_var(xed_nesting_count) = 0;
673 set_irq_regs(old_regs);
678 /* Rebind a new event channel to an existing irq. */
679 void rebind_evtchn_irq(int evtchn, int irq)
681 struct irq_info *info = info_for_irq(irq);
683 /* Make sure the irq is masked, since the new event channel
684 will also be masked. */
687 spin_lock(&irq_mapping_update_lock);
689 /* After resume the irq<->evtchn mappings are all cleared out */
690 BUG_ON(evtchn_to_irq[evtchn] != -1);
691 /* Expect irq to have been bound before,
692 so there should be a proper type */
693 BUG_ON(info->type == IRQT_UNBOUND);
695 evtchn_to_irq[evtchn] = irq;
696 irq_info[irq] = mk_evtchn_info(evtchn);
698 spin_unlock(&irq_mapping_update_lock);
700 /* new event channels are always bound to cpu 0 */
701 irq_set_affinity(irq, cpumask_of(0));
703 /* Unmask the event channel. */
707 /* Rebind an evtchn so that it gets delivered to a specific cpu */
708 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
710 struct evtchn_bind_vcpu bind_vcpu;
711 int evtchn = evtchn_from_irq(irq);
713 if (!VALID_EVTCHN(evtchn))
716 /* Send future instances of this interrupt to other vcpu. */
717 bind_vcpu.port = evtchn;
718 bind_vcpu.vcpu = tcpu;
721 * If this fails, it usually just indicates that we're dealing with a
722 * virq or IPI channel, which don't actually need to be rebound. Ignore
723 * it, but don't do the xenlinux-level rebind in that case.
725 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
726 bind_evtchn_to_cpu(evtchn, tcpu);
731 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
733 unsigned tcpu = cpumask_first(dest);
735 return rebind_irq_to_cpu(irq, tcpu);
738 int resend_irq_on_evtchn(unsigned int irq)
740 int masked, evtchn = evtchn_from_irq(irq);
741 struct shared_info *s = HYPERVISOR_shared_info;
743 if (!VALID_EVTCHN(evtchn))
746 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
747 sync_set_bit(evtchn, s->evtchn_pending);
749 unmask_evtchn(evtchn);
754 static void enable_dynirq(unsigned int irq)
756 int evtchn = evtchn_from_irq(irq);
758 if (VALID_EVTCHN(evtchn))
759 unmask_evtchn(evtchn);
762 static void disable_dynirq(unsigned int irq)
764 int evtchn = evtchn_from_irq(irq);
766 if (VALID_EVTCHN(evtchn))
770 static void ack_dynirq(unsigned int irq)
772 int evtchn = evtchn_from_irq(irq);
774 move_native_irq(irq);
776 if (VALID_EVTCHN(evtchn))
777 clear_evtchn(evtchn);
780 static int retrigger_dynirq(unsigned int irq)
782 int evtchn = evtchn_from_irq(irq);
783 struct shared_info *sh = HYPERVISOR_shared_info;
786 if (VALID_EVTCHN(evtchn)) {
789 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
790 sync_set_bit(evtchn, sh->evtchn_pending);
792 unmask_evtchn(evtchn);
799 static void restore_cpu_virqs(unsigned int cpu)
801 struct evtchn_bind_virq bind_virq;
802 int virq, irq, evtchn;
804 for (virq = 0; virq < NR_VIRQS; virq++) {
805 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
808 BUG_ON(virq_from_irq(irq) != virq);
810 /* Get a new binding from Xen. */
811 bind_virq.virq = virq;
812 bind_virq.vcpu = cpu;
813 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
816 evtchn = bind_virq.port;
818 /* Record the new mapping. */
819 evtchn_to_irq[evtchn] = irq;
820 irq_info[irq] = mk_virq_info(evtchn, virq);
821 bind_evtchn_to_cpu(evtchn, cpu);
824 unmask_evtchn(evtchn);
828 static void restore_cpu_ipis(unsigned int cpu)
830 struct evtchn_bind_ipi bind_ipi;
831 int ipi, irq, evtchn;
833 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
834 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
837 BUG_ON(ipi_from_irq(irq) != ipi);
839 /* Get a new binding from Xen. */
841 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
844 evtchn = bind_ipi.port;
846 /* Record the new mapping. */
847 evtchn_to_irq[evtchn] = irq;
848 irq_info[irq] = mk_ipi_info(evtchn, ipi);
849 bind_evtchn_to_cpu(evtchn, cpu);
852 unmask_evtchn(evtchn);
857 /* Clear an irq's pending state, in preparation for polling on it */
858 void xen_clear_irq_pending(int irq)
860 int evtchn = evtchn_from_irq(irq);
862 if (VALID_EVTCHN(evtchn))
863 clear_evtchn(evtchn);
866 void xen_set_irq_pending(int irq)
868 int evtchn = evtchn_from_irq(irq);
870 if (VALID_EVTCHN(evtchn))
874 bool xen_test_irq_pending(int irq)
876 int evtchn = evtchn_from_irq(irq);
879 if (VALID_EVTCHN(evtchn))
880 ret = test_evtchn(evtchn);
885 /* Poll waiting for an irq to become pending. In the usual case, the
886 irq will be disabled so it won't deliver an interrupt. */
887 void xen_poll_irq(int irq)
889 evtchn_port_t evtchn = evtchn_from_irq(irq);
891 if (VALID_EVTCHN(evtchn)) {
892 struct sched_poll poll;
896 set_xen_guest_handle(poll.ports, &evtchn);
898 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
903 void xen_irq_resume(void)
905 unsigned int cpu, irq, evtchn;
907 init_evtchn_cpu_bindings();
909 /* New event-channel space is not 'live' yet. */
910 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
913 /* No IRQ <-> event-channel mappings. */
914 for (irq = 0; irq < nr_irqs; irq++)
915 irq_info[irq].evtchn = 0; /* zap event-channel binding */
917 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
918 evtchn_to_irq[evtchn] = -1;
920 for_each_possible_cpu(cpu) {
921 restore_cpu_virqs(cpu);
922 restore_cpu_ipis(cpu);
926 static struct irq_chip xen_dynamic_chip __read_mostly = {
929 .disable = disable_dynirq,
930 .mask = disable_dynirq,
931 .unmask = enable_dynirq,
934 .set_affinity = set_affinity_irq,
935 .retrigger = retrigger_dynirq,
938 static struct irq_chip xen_percpu_chip __read_mostly = {
939 .name = "xen-percpu",
941 .disable = disable_dynirq,
942 .mask = disable_dynirq,
943 .unmask = enable_dynirq,
948 void __init xen_init_IRQ(void)
952 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
954 BUG_ON(cpu_evtchn_mask_p == NULL);
956 init_evtchn_cpu_bindings();
958 /* No event channels are 'live' right now. */
959 for (i = 0; i < NR_EVENT_CHANNELS; i++)
962 irq_ctx_init(smp_processor_id());