Lines Matching full:opp
128 static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
171 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask) argument
241 static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst, in mpic_irq_raise() argument
250 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_raise()
263 static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst, in mpic_irq_lower() argument
268 __func__, (int)(dst - &opp->dst[0])); in mpic_irq_lower()
291 static void IRQ_check(struct openpic *opp, struct irq_queue *q) in IRQ_check() argument
298 irq = find_next_bit(q->queue, opp->max_irq, irq + 1); in IRQ_check()
299 if (irq == opp->max_irq) in IRQ_check()
303 irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); in IRQ_check()
305 if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { in IRQ_check()
307 priority = IVPR_PRIORITY(opp->src[irq].ivpr); in IRQ_check()
315 static int IRQ_get_next(struct openpic *opp, struct irq_queue *q) in IRQ_get_next() argument
318 IRQ_check(opp, q); in IRQ_get_next()
323 static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ, in IRQ_local_pipe() argument
330 dst = &opp->dst[n_CPU]; in IRQ_local_pipe()
331 src = &opp->src[n_IRQ]; in IRQ_local_pipe()
350 mpic_irq_raise(opp, dst, src->output); in IRQ_local_pipe()
357 mpic_irq_lower(opp, dst, src->output); in IRQ_local_pipe()
374 IRQ_check(opp, &dst->raised); in IRQ_local_pipe()
383 if (IRQ_get_next(opp, &dst->servicing) >= 0 && in IRQ_local_pipe()
390 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in IRQ_local_pipe()
393 IRQ_get_next(opp, &dst->servicing); in IRQ_local_pipe()
405 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in IRQ_local_pipe()
411 static void openpic_update_irq(struct openpic *opp, int n_IRQ) in openpic_update_irq() argument
417 src = &opp->src[n_IRQ]; in openpic_update_irq()
450 IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); in openpic_update_irq()
453 for (i = 0; i < opp->nb_cpus; i++) { in openpic_update_irq()
455 IRQ_local_pipe(opp, i, n_IRQ, active, in openpic_update_irq()
462 if (i == opp->nb_cpus) in openpic_update_irq()
466 IRQ_local_pipe(opp, i, n_IRQ, active, in openpic_update_irq()
477 struct openpic *opp = opaque; in openpic_set_irq() local
485 src = &opp->src[n_IRQ]; in openpic_set_irq()
491 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
496 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
507 openpic_update_irq(opp, n_IRQ); in openpic_set_irq()
512 static void openpic_reset(struct openpic *opp) in openpic_reset() argument
516 opp->gcr = GCR_RESET; in openpic_reset()
518 opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | in openpic_reset()
519 (opp->vid << FRR_VID_SHIFT); in openpic_reset()
521 opp->pir = 0; in openpic_reset()
522 opp->spve = -1 & opp->vector_mask; in openpic_reset()
523 opp->tfrr = opp->tfrr_reset; in openpic_reset()
525 for (i = 0; i < opp->max_irq; i++) { in openpic_reset()
526 opp->src[i].ivpr = opp->ivpr_reset; in openpic_reset()
528 switch (opp->src[i].type) { in openpic_reset()
530 opp->src[i].level = in openpic_reset()
531 !!(opp->ivpr_reset & IVPR_SENSE_MASK); in openpic_reset()
535 opp->src[i].ivpr |= IVPR_POLARITY_MASK; in openpic_reset()
542 write_IRQreg_idr(opp, i, opp->idr_reset); in openpic_reset()
546 opp->dst[i].ctpr = 15; in openpic_reset()
547 memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue)); in openpic_reset()
548 opp->dst[i].raised.next = -1; in openpic_reset()
549 memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue)); in openpic_reset()
550 opp->dst[i].servicing.next = -1; in openpic_reset()
554 opp->timers[i].tccr = 0; in openpic_reset()
555 opp->timers[i].tbcr = TBCR_CI; in openpic_reset()
558 opp->gcr = 0; in openpic_reset()
561 static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ) in read_IRQreg_idr() argument
563 return opp->src[n_IRQ].idr; in read_IRQreg_idr()
566 static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ) in read_IRQreg_ilr() argument
568 if (opp->flags & OPENPIC_FLAG_ILR) in read_IRQreg_ilr()
569 return opp->src[n_IRQ].output; in read_IRQreg_ilr()
574 static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ) in read_IRQreg_ivpr() argument
576 return opp->src[n_IRQ].ivpr; in read_IRQreg_ivpr()
579 static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, in write_IRQreg_idr() argument
582 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_idr()
583 uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; in write_IRQreg_idr()
586 int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; in write_IRQreg_idr()
589 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
597 if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { in write_IRQreg_idr()
608 for (i = 0; i < opp->nb_cpus; i++) { in write_IRQreg_idr()
624 static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ, in write_IRQreg_ilr() argument
627 if (opp->flags & OPENPIC_FLAG_ILR) { in write_IRQreg_ilr()
628 struct irq_source *src = &opp->src[n_IRQ]; in write_IRQreg_ilr()
638 static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ, in write_IRQreg_ivpr() argument
647 IVPR_POLARITY_MASK | opp->vector_mask; in write_IRQreg_ivpr()
650 opp->src[n_IRQ].ivpr = in write_IRQreg_ivpr()
651 (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); in write_IRQreg_ivpr()
657 switch (opp->src[n_IRQ].type) { in write_IRQreg_ivpr()
659 opp->src[n_IRQ].level = in write_IRQreg_ivpr()
660 !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); in write_IRQreg_ivpr()
664 opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; in write_IRQreg_ivpr()
668 opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); in write_IRQreg_ivpr()
672 openpic_update_irq(opp, n_IRQ); in write_IRQreg_ivpr()
674 opp->src[n_IRQ].ivpr); in write_IRQreg_ivpr()
677 static void openpic_gcr_write(struct openpic *opp, uint64_t val) in openpic_gcr_write() argument
680 openpic_reset(opp); in openpic_gcr_write()
684 opp->gcr &= ~opp->mpic_mode_mask; in openpic_gcr_write()
685 opp->gcr |= val & opp->mpic_mode_mask; in openpic_gcr_write()
690 struct openpic *opp = opaque; in openpic_gbl_write() local
708 err = openpic_cpu_write_internal(opp, addr, val, in openpic_gbl_write()
714 openpic_gcr_write(opp, val); in openpic_gbl_write()
731 write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); in openpic_gbl_write()
735 opp->spve = val & opp->vector_mask; in openpic_gbl_write()
746 struct openpic *opp = opaque; in openpic_gbl_read() local
757 retval = opp->frr; in openpic_gbl_read()
758 retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT; in openpic_gbl_read()
761 retval = opp->gcr; in openpic_gbl_read()
764 retval = opp->vir; in openpic_gbl_read()
770 retval = opp->brr1; in openpic_gbl_read()
780 err = openpic_cpu_read_internal(opp, addr, in openpic_gbl_read()
790 retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); in openpic_gbl_read()
794 retval = opp->spve; in openpic_gbl_read()
808 struct openpic *opp = opaque; in openpic_tmr_write() local
819 opp->tfrr = val; in openpic_tmr_write()
830 if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && in openpic_tmr_write()
832 (opp->timers[idx].tbcr & TBCR_CI) != 0) in openpic_tmr_write()
833 opp->timers[idx].tccr &= ~TCCR_TOG; in openpic_tmr_write()
835 opp->timers[idx].tbcr = val; in openpic_tmr_write()
838 write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
841 write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); in openpic_tmr_write()
850 struct openpic *opp = opaque; in openpic_tmr_read() local
861 retval = opp->tfrr; in openpic_tmr_read()
867 retval = opp->timers[idx].tccr; in openpic_tmr_read()
870 retval = opp->timers[idx].tbcr; in openpic_tmr_read()
873 retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
876 retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); in openpic_tmr_read()
888 struct openpic *opp = opaque; in openpic_src_write() local
898 write_IRQreg_ivpr(opp, idx, val); in openpic_src_write()
901 write_IRQreg_idr(opp, idx, val); in openpic_src_write()
904 write_IRQreg_ilr(opp, idx, val); in openpic_src_write()
913 struct openpic *opp = opaque; in openpic_src_read() local
925 retval = read_IRQreg_ivpr(opp, idx); in openpic_src_read()
928 retval = read_IRQreg_idr(opp, idx); in openpic_src_read()
931 retval = read_IRQreg_ilr(opp, idx); in openpic_src_read()
942 struct openpic *opp = opaque; in openpic_msi_write() local
943 int idx = opp->irq_msi; in openpic_msi_write()
955 opp->msi[srs].msir |= 1 << ibs; in openpic_msi_write()
956 openpic_set_irq(opp, idx, 1); in openpic_msi_write()
968 struct openpic *opp = opaque; in openpic_msi_read() local
987 r = opp->msi[srs].msir; in openpic_msi_read()
989 opp->msi[srs].msir = 0; in openpic_msi_read()
990 openpic_set_irq(opp, opp->irq_msi + srs, 0); in openpic_msi_read()
994 r |= (opp->msi[i].msir ? 1 : 0) << i; in openpic_msi_read()
1026 struct openpic *opp = opaque; in openpic_cpu_write_internal() local
1040 dst = &opp->dst[idx]; in openpic_cpu_write_internal()
1049 opp->src[opp->irq_ipi0 + idx].destmask |= val; in openpic_cpu_write_internal()
1050 openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); in openpic_cpu_write_internal()
1051 openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); in openpic_cpu_write_internal()
1063 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1067 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1081 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1093 s_IRQ = IRQ_get_next(opp, &dst->servicing); in openpic_cpu_write_internal()
1095 n_IRQ = IRQ_get_next(opp, &dst->raised); in openpic_cpu_write_internal()
1096 src = &opp->src[n_IRQ]; in openpic_cpu_write_internal()
1102 mpic_irq_raise(opp, dst, ILR_INTTGT_INT); in openpic_cpu_write_internal()
1105 spin_unlock(&opp->lock); in openpic_cpu_write_internal()
1106 kvm_notify_acked_irq(opp->kvm, 0, notify_eoi); in openpic_cpu_write_internal()
1107 spin_lock(&opp->lock); in openpic_cpu_write_internal()
1120 struct openpic *opp = opaque; in openpic_cpu_write() local
1122 return openpic_cpu_write_internal(opp, addr, val, in openpic_cpu_write()
1126 static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst, in openpic_iack() argument
1133 mpic_irq_lower(opp, dst, ILR_INTTGT_INT); in openpic_iack()
1135 irq = IRQ_get_next(opp, &dst->raised); in openpic_iack()
1140 return opp->spve; in openpic_iack()
1142 src = &opp->src[irq]; in openpic_iack()
1147 openpic_update_irq(opp, irq); in openpic_iack()
1148 retval = opp->spve; in openpic_iack()
1152 retval = IVPR_VECTOR(opp, src->ivpr); in openpic_iack()
1162 if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) { in openpic_iack()
1166 openpic_set_irq(opp, irq, 1); in openpic_iack()
1167 openpic_set_irq(opp, irq, 0); in openpic_iack()
1178 struct openpic *opp = vcpu->arch.mpic; in kvmppc_mpic_set_epr() local
1182 spin_lock_irqsave(&opp->lock, flags); in kvmppc_mpic_set_epr()
1184 if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY) in kvmppc_mpic_set_epr()
1185 kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu)); in kvmppc_mpic_set_epr()
1187 spin_unlock_irqrestore(&opp->lock, flags); in kvmppc_mpic_set_epr()
1193 struct openpic *opp = opaque; in openpic_cpu_read_internal() local
1206 dst = &opp->dst[idx]; in openpic_cpu_read_internal()
1216 retval = openpic_iack(opp, dst, idx); in openpic_cpu_read_internal()
1233 struct openpic *opp = opaque; in openpic_cpu_read() local
1235 return openpic_cpu_read_internal(opp, addr, ptr, in openpic_cpu_read()
1288 static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr) in add_mmio_region() argument
1290 if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) { in add_mmio_region()
1295 opp->mmio_regions[opp->num_mmio_regions++] = mr; in add_mmio_region()
1298 static void fsl_common_init(struct openpic *opp) in fsl_common_init() argument
1303 add_mmio_region(opp, &openpic_msi_mmio); in fsl_common_init()
1304 add_mmio_region(opp, &openpic_summary_mmio); in fsl_common_init()
1306 opp->vid = VID_REVISION_1_2; in fsl_common_init()
1307 opp->vir = VIR_GENERIC; in fsl_common_init()
1308 opp->vector_mask = 0xFFFF; in fsl_common_init()
1309 opp->tfrr_reset = 0; in fsl_common_init()
1310 opp->ivpr_reset = IVPR_MASK_MASK; in fsl_common_init()
1311 opp->idr_reset = 1 << 0; in fsl_common_init()
1312 opp->max_irq = MAX_IRQ; in fsl_common_init()
1314 opp->irq_ipi0 = virq; in fsl_common_init()
1316 opp->irq_tim0 = virq; in fsl_common_init()
1321 opp->irq_msi = 224; in fsl_common_init()
1323 for (i = 0; i < opp->fsl->max_ext; i++) in fsl_common_init()
1324 opp->src[i].level = false; in fsl_common_init()
1328 opp->src[i].type = IRQ_TYPE_FSLINT; in fsl_common_init()
1329 opp->src[i].level = true; in fsl_common_init()
1334 opp->src[i].type = IRQ_TYPE_FSLSPECIAL; in fsl_common_init()
1335 opp->src[i].level = false; in fsl_common_init()
1339 static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr) in kvm_mpic_read_internal() argument
1343 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_read_internal()
1344 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_read_internal()
1349 return mr->read(opp, addr - mr->start_addr, ptr); in kvm_mpic_read_internal()
1355 static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val) in kvm_mpic_write_internal() argument
1359 for (i = 0; i < opp->num_mmio_regions; i++) { in kvm_mpic_write_internal()
1360 const struct mem_reg *mr = opp->mmio_regions[i]; in kvm_mpic_write_internal()
1365 return mr->write(opp, addr - mr->start_addr, val); in kvm_mpic_write_internal()
1375 struct openpic *opp = container_of(this, struct openpic, mmio); in kvm_mpic_read() local
1388 spin_lock_irq(&opp->lock); in kvm_mpic_read()
1389 ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val); in kvm_mpic_read()
1390 spin_unlock_irq(&opp->lock); in kvm_mpic_read()
1417 struct openpic *opp = container_of(this, struct openpic, mmio); in kvm_mpic_write() local
1429 spin_lock_irq(&opp->lock); in kvm_mpic_write()
1430 ret = kvm_mpic_write_internal(opp, addr - opp->reg_base, in kvm_mpic_write()
1432 spin_unlock_irq(&opp->lock); in kvm_mpic_write()
1445 static void map_mmio(struct openpic *opp) in map_mmio() argument
1447 kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops); in map_mmio()
1449 kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS, in map_mmio()
1450 opp->reg_base, OPENPIC_REG_SIZE, in map_mmio()
1451 &opp->mmio); in map_mmio()
1454 static void unmap_mmio(struct openpic *opp) in unmap_mmio() argument
1456 kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio); in unmap_mmio()
1459 static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr) in set_base_addr() argument
1472 if (base == opp->reg_base) in set_base_addr()
1475 mutex_lock(&opp->kvm->slots_lock); in set_base_addr()
1477 unmap_mmio(opp); in set_base_addr()
1478 opp->reg_base = base; in set_base_addr()
1486 map_mmio(opp); in set_base_addr()
1489 mutex_unlock(&opp->kvm->slots_lock); in set_base_addr()
1496 static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type) in access_reg() argument
1503 spin_lock_irq(&opp->lock); in access_reg()
1506 ret = kvm_mpic_write_internal(opp, addr, *val); in access_reg()
1508 ret = kvm_mpic_read_internal(opp, addr, val); in access_reg()
1510 spin_unlock_irq(&opp->lock); in access_reg()
1519 struct openpic *opp = dev->private; in mpic_set_attr() local
1526 return set_base_addr(opp, attr); in mpic_set_attr()
1535 return access_reg(opp, attr->attr, &attr32, ATTR_SET); in mpic_set_attr()
1547 spin_lock_irq(&opp->lock); in mpic_set_attr()
1548 openpic_set_irq(opp, attr->attr, attr32); in mpic_set_attr()
1549 spin_unlock_irq(&opp->lock); in mpic_set_attr()
1558 struct openpic *opp = dev->private; in mpic_get_attr() local
1567 mutex_lock(&opp->kvm->slots_lock); in mpic_get_attr()
1568 attr64 = opp->reg_base; in mpic_get_attr()
1569 mutex_unlock(&opp->kvm->slots_lock); in mpic_get_attr()
1581 ret = access_reg(opp, attr->attr, &attr32, ATTR_GET); in mpic_get_attr()
1594 spin_lock_irq(&opp->lock); in mpic_get_attr()
1595 attr32 = opp->src[attr->attr].pending; in mpic_get_attr()
1596 spin_unlock_irq(&opp->lock); in mpic_get_attr()
1633 struct openpic *opp = dev->private; in mpic_destroy() local
1636 kfree(opp); in mpic_destroy()
1640 static int mpic_set_default_irq_routing(struct openpic *opp) in mpic_set_default_irq_routing() argument
1649 kvm_set_irq_routing(opp->kvm, routing, 0, 0); in mpic_set_default_irq_routing()
1657 struct openpic *opp; in mpic_create() local
1664 opp = kzalloc(sizeof(struct openpic), GFP_KERNEL); in mpic_create()
1665 if (!opp) in mpic_create()
1668 dev->private = opp; in mpic_create()
1669 opp->kvm = dev->kvm; in mpic_create()
1670 opp->dev = dev; in mpic_create()
1671 opp->model = type; in mpic_create()
1672 spin_lock_init(&opp->lock); in mpic_create()
1674 add_mmio_region(opp, &openpic_gbl_mmio); in mpic_create()
1675 add_mmio_region(opp, &openpic_tmr_mmio); in mpic_create()
1676 add_mmio_region(opp, &openpic_src_mmio); in mpic_create()
1677 add_mmio_region(opp, &openpic_cpu_mmio); in mpic_create()
1679 switch (opp->model) { in mpic_create()
1681 opp->fsl = &fsl_mpic_20; in mpic_create()
1682 opp->brr1 = 0x00400200; in mpic_create()
1683 opp->flags |= OPENPIC_FLAG_IDR_CRIT; in mpic_create()
1684 opp->nb_irqs = 80; in mpic_create()
1685 opp->mpic_mode_mask = GCR_MODE_MIXED; in mpic_create()
1687 fsl_common_init(opp); in mpic_create()
1692 opp->fsl = &fsl_mpic_42; in mpic_create()
1693 opp->brr1 = 0x00400402; in mpic_create()
1694 opp->flags |= OPENPIC_FLAG_ILR; in mpic_create()
1695 opp->nb_irqs = 196; in mpic_create()
1696 opp->mpic_mode_mask = GCR_MODE_PROXY; in mpic_create()
1698 fsl_common_init(opp); in mpic_create()
1707 ret = mpic_set_default_irq_routing(opp); in mpic_create()
1711 openpic_reset(opp); in mpic_create()
1714 dev->kvm->arch.mpic = opp; in mpic_create()
1719 kfree(opp); in mpic_create()
1735 struct openpic *opp = dev->private; in kvmppc_mpic_connect_vcpu() local
1740 if (opp->kvm != vcpu->kvm) in kvmppc_mpic_connect_vcpu()
1745 spin_lock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1747 if (opp->dst[cpu].vcpu) { in kvmppc_mpic_connect_vcpu()
1756 opp->dst[cpu].vcpu = vcpu; in kvmppc_mpic_connect_vcpu()
1757 opp->nb_cpus = max(opp->nb_cpus, cpu + 1); in kvmppc_mpic_connect_vcpu()
1759 vcpu->arch.mpic = opp; in kvmppc_mpic_connect_vcpu()
1764 if (opp->mpic_mode_mask == GCR_MODE_PROXY) in kvmppc_mpic_connect_vcpu()
1768 spin_unlock_irq(&opp->lock); in kvmppc_mpic_connect_vcpu()
1777 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu) in kvmppc_mpic_disconnect_vcpu() argument
1779 BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu); in kvmppc_mpic_disconnect_vcpu()
1781 opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; in kvmppc_mpic_disconnect_vcpu()
1795 struct openpic *opp = kvm->arch.mpic; in mpic_set_irq() local
1798 spin_lock_irqsave(&opp->lock, flags); in mpic_set_irq()
1799 openpic_set_irq(opp, irq, level); in mpic_set_irq()
1800 spin_unlock_irqrestore(&opp->lock, flags); in mpic_set_irq()
1809 struct openpic *opp = kvm->arch.mpic; in kvm_set_msi() local
1812 spin_lock_irqsave(&opp->lock, flags); in kvm_set_msi()
1819 spin_unlock_irqrestore(&opp->lock, flags); in kvm_set_msi()