Lines Matching +full:riscv +full:- +full:aia
1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/irqchip/riscv-imsic.h>
39 raw_spin_lock_irqsave(&hgctrl->lock, flags); in aia_find_hgei()
41 hgei = -1; in aia_find_hgei()
43 if (hgctrl->owners[i] == owner) { in aia_find_hgei()
49 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in aia_find_hgei()
72 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_flush_interrupts()
78 if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) { in kvm_riscv_vcpu_aia_flush_interrupts()
79 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0); in kvm_riscv_vcpu_aia_flush_interrupts()
80 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask; in kvm_riscv_vcpu_aia_flush_interrupts()
82 csr->hviph &= ~mask; in kvm_riscv_vcpu_aia_flush_interrupts()
83 csr->hviph |= val; in kvm_riscv_vcpu_aia_flush_interrupts()
89 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_sync_interrupts()
92 csr->vsieh = ncsr_read(CSR_VSIEH); in kvm_riscv_vcpu_aia_sync_interrupts()
105 if (READ_ONCE(vcpu->arch.irqs_pending[1]) & in kvm_riscv_vcpu_aia_has_interrupts()
106 (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask))) in kvm_riscv_vcpu_aia_has_interrupts()
110 seip = vcpu->arch.guest_csr.vsie; in kvm_riscv_vcpu_aia_has_interrupts()
114 if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip) in kvm_riscv_vcpu_aia_has_interrupts()
126 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_aia_update_hvip()
132 ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); in kvm_riscv_vcpu_aia_update_hvip()
134 ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT)))); in kvm_riscv_vcpu_aia_update_hvip()
139 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_load()
147 nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect); in kvm_riscv_vcpu_aia_load()
148 nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1); in kvm_riscv_vcpu_aia_load()
149 nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2); in kvm_riscv_vcpu_aia_load()
151 nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh); in kvm_riscv_vcpu_aia_load()
152 nacl_csr_write(nsh, CSR_HVIPH, csr->hviph); in kvm_riscv_vcpu_aia_load()
153 nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h); in kvm_riscv_vcpu_aia_load()
154 nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h); in kvm_riscv_vcpu_aia_load()
157 csr_write(CSR_VSISELECT, csr->vsiselect); in kvm_riscv_vcpu_aia_load()
158 csr_write(CSR_HVIPRIO1, csr->hviprio1); in kvm_riscv_vcpu_aia_load()
159 csr_write(CSR_HVIPRIO2, csr->hviprio2); in kvm_riscv_vcpu_aia_load()
161 csr_write(CSR_VSIEH, csr->vsieh); in kvm_riscv_vcpu_aia_load()
162 csr_write(CSR_HVIPH, csr->hviph); in kvm_riscv_vcpu_aia_load()
163 csr_write(CSR_HVIPRIO1H, csr->hviprio1h); in kvm_riscv_vcpu_aia_load()
164 csr_write(CSR_HVIPRIO2H, csr->hviprio2h); in kvm_riscv_vcpu_aia_load()
171 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_put()
179 csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT); in kvm_riscv_vcpu_aia_put()
180 csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1); in kvm_riscv_vcpu_aia_put()
181 csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2); in kvm_riscv_vcpu_aia_put()
183 csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH); in kvm_riscv_vcpu_aia_put()
184 csr->hviph = nacl_csr_read(nsh, CSR_HVIPH); in kvm_riscv_vcpu_aia_put()
185 csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H); in kvm_riscv_vcpu_aia_put()
186 csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H); in kvm_riscv_vcpu_aia_put()
189 csr->vsiselect = csr_read(CSR_VSISELECT); in kvm_riscv_vcpu_aia_put()
190 csr->hviprio1 = csr_read(CSR_HVIPRIO1); in kvm_riscv_vcpu_aia_put()
191 csr->hviprio2 = csr_read(CSR_HVIPRIO2); in kvm_riscv_vcpu_aia_put()
193 csr->vsieh = csr_read(CSR_VSIEH); in kvm_riscv_vcpu_aia_put()
194 csr->hviph = csr_read(CSR_HVIPH); in kvm_riscv_vcpu_aia_put()
195 csr->hviprio1h = csr_read(CSR_HVIPRIO1H); in kvm_riscv_vcpu_aia_put()
196 csr->hviprio2h = csr_read(CSR_HVIPRIO2H); in kvm_riscv_vcpu_aia_put()
205 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_get_csr()
208 return -ENOENT; in kvm_riscv_vcpu_aia_get_csr()
221 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; in kvm_riscv_vcpu_aia_set_csr()
224 return -ENOENT; in kvm_riscv_vcpu_aia_set_csr()
231 WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0); in kvm_riscv_vcpu_aia_set_csr()
244 /* If AIA not available then redirect trap */ in kvm_riscv_vcpu_aia_rmw_topei()
248 /* If AIA not initialized then forward to user space */ in kvm_riscv_vcpu_aia_rmw_topei()
249 if (!kvm_riscv_aia_initialized(vcpu->kvm)) in kvm_riscv_vcpu_aia_rmw_topei()
257 * External IRQ priority always read-only zero. This means default
262 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
263 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
264 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
265 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
266 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
267 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
268 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
269 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
375 first_irq = (isel - ISELECT_IPRIO0) * 4; in aia_rmw_iprio()
404 /* If AIA not available then redirect trap */ in kvm_riscv_vcpu_aia_rmw_ireg()
413 kvm_riscv_aia_initialized(vcpu->kvm)) in kvm_riscv_vcpu_aia_rmw_ireg()
424 int ret = -ENOENT; in kvm_riscv_aia_alloc_hgei()
431 return -ENODEV; in kvm_riscv_aia_alloc_hgei()
433 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_alloc_hgei()
435 if (hgctrl->free_bitmap) { in kvm_riscv_aia_alloc_hgei()
436 ret = __ffs(hgctrl->free_bitmap); in kvm_riscv_aia_alloc_hgei()
437 hgctrl->free_bitmap &= ~BIT(ret); in kvm_riscv_aia_alloc_hgei()
438 hgctrl->owners[ret] = owner; in kvm_riscv_aia_alloc_hgei()
441 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_alloc_hgei()
444 lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL; in kvm_riscv_aia_alloc_hgei()
447 *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ); in kvm_riscv_aia_alloc_hgei()
449 *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ); in kvm_riscv_aia_alloc_hgei()
463 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_free_hgei()
466 if (!(hgctrl->free_bitmap & BIT(hgei))) { in kvm_riscv_aia_free_hgei()
467 hgctrl->free_bitmap |= BIT(hgei); in kvm_riscv_aia_free_hgei()
468 hgctrl->owners[hgei] = NULL; in kvm_riscv_aia_free_hgei()
472 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_free_hgei()
500 raw_spin_lock_irqsave(&hgctrl->lock, flags); in hgei_interrupt()
503 if (hgctrl->owners[i]) in hgei_interrupt()
504 kvm_vcpu_kick(hgctrl->owners[i]); in hgei_interrupt()
507 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in hgei_interrupt()
519 /* Initialize per-CPU guest external interrupt line management */ in aia_hgei_init()
522 raw_spin_lock_init(&hgctrl->lock); in aia_hgei_init()
524 hgctrl->free_bitmap = in aia_hgei_init()
525 BIT(kvm_riscv_aia_nr_hgei + 1) - 1; in aia_hgei_init()
526 hgctrl->free_bitmap &= ~BIT(0); in aia_hgei_init()
528 hgctrl->free_bitmap = 0; in aia_hgei_init()
540 return -ENOENT; in aia_hgei_init()
543 /* Map per-CPU SGEI interrupt from INTC domain */ in aia_hgei_init()
547 return -ENOMEM; in aia_hgei_init()
550 /* Request per-CPU SGEI interrupt */ in aia_hgei_init()
552 "riscv-kvm", &aia_hgei); in aia_hgei_init()
568 /* Free per-CPU SGEI interrupt */ in aia_hgei_exit()
587 /* Enable per-CPU SGEI interrupt */ in kvm_riscv_aia_enable()
609 /* Disable per-CPU SGEI interrupt */ in kvm_riscv_aia_disable()
615 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
618 vcpu = hgctrl->owners[i]; in kvm_riscv_aia_disable()
623 * We release hgctrl->lock before notifying IMSIC in kvm_riscv_aia_disable()
626 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
640 raw_spin_lock_irqsave(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
643 raw_spin_unlock_irqrestore(&hgctrl->lock, flags); in kvm_riscv_aia_disable()
654 return -ENODEV; in kvm_riscv_aia_init()
657 /* Figure-out number of bits in HGEIE */ in kvm_riscv_aia_init()
658 csr_write(CSR_HGEIE, -1UL); in kvm_riscv_aia_init()
662 kvm_riscv_aia_nr_hgei--; in kvm_riscv_aia_init()
665 * Number of usable HGEI lines should be minimum of per-HART in kvm_riscv_aia_init()
670 BIT(gc->guest_index_bits) - 1); in kvm_riscv_aia_init()
677 kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1; in kvm_riscv_aia_init()
692 /* Enable KVM AIA support */ in kvm_riscv_aia_init()