Lines Matching +full:ipa +full:- +full:clock +full:- +full:enabled

1 // SPDX-License-Identifier: GPL-2.0
17 #include <asm/asm-offsets.h>
23 #include <asm/page-states.h>
29 #include "kvm-s390.h"
34 vcpu->stat.instruction_ri++; in handle_ri()
36 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
50 return -EOPNOTSUPP; in kvm_s390_handle_aa()
55 vcpu->stat.instruction_gs++; in handle_gs()
57 if (test_kvm_facility(vcpu->kvm, 133)) { in handle_gs()
61 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; in handle_gs()
62 restore_gs_cb(current->thread.gs_cb); in handle_gs()
64 vcpu->arch.sie_block->ecb |= ECB_GS; in handle_gs()
65 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in handle_gs()
66 vcpu->arch.gs_enabled = 1; in handle_gs()
75 int code = vcpu->arch.sie_block->ipb & 0xff; in kvm_s390_handle_e3()
80 return -EOPNOTSUPP; in kvm_s390_handle_e3()
82 /* Handle SCK (SET CLOCK) interception */
90 vcpu->stat.instruction_sck++; in handle_set_clock()
92 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
104 * To set the TOD clock the kvm lock must be taken, but the vcpu lock in handle_set_clock()
107 * cases, for example when the multiple epoch facility or TOD clock in handle_set_clock()
110 * the instruction will be retried via -EAGAIN at a later point in in handle_set_clock()
113 if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) { in handle_set_clock()
115 return -EAGAIN; in handle_set_clock()
129 vcpu->stat.instruction_spx++; in handle_set_prefix()
131 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix()
152 if (!kvm_is_gpa_in_memslot(vcpu->kvm, address)) in handle_set_prefix()
167 vcpu->stat.instruction_stpx++; in handle_store_prefix()
169 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix()
192 u16 vcpu_id = vcpu->vcpu_id; in handle_store_cpu_address()
197 vcpu->stat.instruction_stap++; in handle_store_cpu_address()
199 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address()
221 /* Already enabled? */ in kvm_s390_skey_check_enable()
222 if (vcpu->arch.skey_enabled) in kvm_s390_skey_check_enable()
232 if (!vcpu->kvm->arch.use_skf) in kvm_s390_skey_check_enable()
233 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_skey_check_enable()
235 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in kvm_s390_skey_check_enable()
236 vcpu->arch.skey_enabled = true; in kvm_s390_skey_check_enable()
247 if (vcpu->kvm->arch.use_skf) { in try_handle_skey()
248 /* with storage-key facility, SIE interprets it for us */ in try_handle_skey()
251 return -EAGAIN; in try_handle_skey()
264 vcpu->stat.instruction_iske++; in handle_iske()
266 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_iske()
271 return rc != -EAGAIN ? rc : 0; in handle_iske()
275 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_iske()
278 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_iske()
283 mmap_read_lock(current->mm); in handle_iske()
284 rc = get_guest_storage_key(current->mm, vmaddr, &key); in handle_iske()
287 rc = fixup_user_fault(current->mm, vmaddr, in handle_iske()
290 mmap_read_unlock(current->mm); in handle_iske()
294 mmap_read_unlock(current->mm); in handle_iske()
295 if (rc == -EFAULT) in handle_iske()
299 vcpu->run->s.regs.gprs[reg1] &= ~0xff; in handle_iske()
300 vcpu->run->s.regs.gprs[reg1] |= key; in handle_iske()
311 vcpu->stat.instruction_rrbe++; in handle_rrbe()
313 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_rrbe()
318 return rc != -EAGAIN ? rc : 0; in handle_rrbe()
322 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_rrbe()
325 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_rrbe()
330 mmap_read_lock(current->mm); in handle_rrbe()
331 rc = reset_guest_reference_bit(current->mm, vmaddr); in handle_rrbe()
333 rc = fixup_user_fault(current->mm, vmaddr, in handle_rrbe()
336 mmap_read_unlock(current->mm); in handle_rrbe()
340 mmap_read_unlock(current->mm); in handle_rrbe()
341 if (rc == -EFAULT) in handle_rrbe()
355 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; in handle_sske()
362 vcpu->stat.instruction_sske++; in handle_sske()
364 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sske()
369 return rc != -EAGAIN ? rc : 0; in handle_sske()
371 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_sske()
373 if (!test_kvm_facility(vcpu->kvm, 10)) in handle_sske()
375 if (!test_kvm_facility(vcpu->kvm, 14)) in handle_sske()
380 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; in handle_sske()
381 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_sske()
385 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); in handle_sske()
392 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_sske()
398 mmap_read_lock(current->mm); in handle_sske()
399 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey, in handle_sske()
404 rc = fixup_user_fault(current->mm, vmaddr, in handle_sske()
406 rc = !rc ? -EAGAIN : rc; in handle_sske()
408 mmap_read_unlock(current->mm); in handle_sske()
409 if (rc == -EFAULT) in handle_sske()
411 if (rc == -EAGAIN) in handle_sske()
424 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; in handle_sske()
425 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; in handle_sske()
429 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) in handle_sske()
430 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; in handle_sske()
432 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; in handle_sske()
434 vcpu->run->s.regs.gprs[reg2] |= end; in handle_sske()
441 vcpu->stat.instruction_ipte_interlock++; in handle_ipte_interlock()
442 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) in handle_ipte_interlock()
444 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); in handle_ipte_interlock()
455 vcpu->stat.instruction_tb++; in handle_test_block()
457 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_test_block()
461 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_test_block()
464 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block()
467 if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr)) in handle_test_block()
473 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) in handle_test_block()
474 return -EFAULT; in handle_test_block()
476 vcpu->run->s.regs.gprs[0] = 0; in handle_test_block()
489 vcpu->stat.instruction_tpi++; in handle_tpi()
495 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi()
501 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; in handle_tpi()
502 tpi_data[1] = inti->io.io_int_parm; in handle_tpi()
503 tpi_data[2] = inti->io.io_int_word; in handle_tpi()
506 * Store the two-word I/O interruption code into the in handle_tpi()
509 len = sizeof(tpi_data) - 4; in handle_tpi()
517 * Store the three-word I/O interruption code into in handle_tpi()
523 rc = -EFAULT; in handle_tpi()
538 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { in handle_tpi()
540 rc = -EFAULT; in handle_tpi()
543 return rc ? -EFAULT : 0; in handle_tpi()
551 vcpu->stat.instruction_tsch++; in handle_tsch()
554 if (vcpu->run->s.regs.gprs[1]) in handle_tsch()
555 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, in handle_tsch()
556 vcpu->run->s.regs.gprs[1]); in handle_tsch()
561 * so that userspace can re-inject it if the instruction gets in handle_tsch()
562 * a program check. While this may re-order the pending I/O in handle_tsch()
566 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; in handle_tsch()
567 vcpu->run->s390_tsch.dequeued = !!inti; in handle_tsch()
569 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; in handle_tsch()
570 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; in handle_tsch()
571 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; in handle_tsch()
572 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; in handle_tsch()
574 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch()
576 return -EREMOTE; in handle_tsch()
583 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_io_inst()
586 if (vcpu->kvm->arch.css_support) { in handle_io_inst()
591 if (vcpu->arch.sie_block->ipa == 0xb236) in handle_io_inst()
593 if (vcpu->arch.sie_block->ipa == 0xb235) in handle_io_inst()
596 vcpu->stat.instruction_io_other++; in handle_io_inst()
597 return -EOPNOTSUPP; in handle_io_inst()
633 return -EOPNOTSUPP; in handle_pqap()
635 if (!(vcpu->arch.sie_block->eca & ECA_APIE)) in handle_pqap()
636 return -EOPNOTSUPP; in handle_pqap()
645 reg0 = vcpu->run->s.regs.gprs[0]; in handle_pqap()
648 return -EOPNOTSUPP; in handle_pqap()
651 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pqap()
655 /* bits 41-47 must all be zeros */ in handle_pqap()
659 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL)) in handle_pqap()
662 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL)) in handle_pqap()
667 if (!test_kvm_facility(vcpu->kvm, 65)) in handle_pqap()
675 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
676 if (vcpu->kvm->arch.crypto.pqap_hook) { in handle_pqap()
677 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook; in handle_pqap()
680 if (vcpu->run->s.regs.gprs[1] & 0x00ff0000) in handle_pqap()
685 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
688 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
695 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status)); in handle_pqap()
705 vcpu->stat.instruction_stfl++; in handle_stfl()
707 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stfl()
711 * We need to shift the lower 32 facility bits (bit 0-31) from a u64 in handle_stfl()
712 * into a u32 memory representation. They will remain bits 0-31. in handle_stfl()
714 fac = *vcpu->kvm->arch.model.fac_list >> 32; in handle_stfl()
731 if (psw->mask & PSW_MASK_UNASSIGNED) in is_valid_psw()
733 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { in is_valid_psw()
734 if (psw->addr & ~PSW_ADDR_31) in is_valid_psw()
737 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) in is_valid_psw()
739 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) in is_valid_psw()
741 if (psw->addr & 1) in is_valid_psw()
748 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; in kvm_s390_handle_lpsw()
754 vcpu->stat.instruction_lpsw++; in kvm_s390_handle_lpsw()
756 if (gpsw->mask & PSW_MASK_PSTATE) in kvm_s390_handle_lpsw()
768 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; in kvm_s390_handle_lpsw()
769 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; in kvm_s390_handle_lpsw()
770 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; in kvm_s390_handle_lpsw()
783 vcpu->stat.instruction_lpswe++; in handle_lpswe()
785 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lpswe()
794 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswe()
795 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) in handle_lpswe()
807 vcpu->stat.instruction_lpswey++; in handle_lpswey()
809 if (!test_kvm_facility(vcpu->kvm, 193)) in handle_lpswey()
812 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lpswey()
823 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswey()
824 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) in handle_lpswey()
832 u64 stidp_data = vcpu->kvm->arch.model.cpuid; in handle_stidp()
837 vcpu->stat.instruction_stidp++; in handle_stidp()
839 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stidp()
860 cpus = atomic_read(&vcpu->kvm->online_vcpus); in handle_stsi_3_2_2()
864 mem->count = 0; in handle_stsi_3_2_2()
865 if (mem->count < 8) in handle_stsi_3_2_2()
866 mem->count++; in handle_stsi_3_2_2()
867 for (n = mem->count - 1; n > 0 ; n--) in handle_stsi_3_2_2()
868 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); in handle_stsi_3_2_2()
870 memset(&mem->vm[0], 0, sizeof(mem->vm[0])); in handle_stsi_3_2_2()
871 mem->vm[0].cpus_total = cpus; in handle_stsi_3_2_2()
872 mem->vm[0].cpus_configured = cpus; in handle_stsi_3_2_2()
873 mem->vm[0].cpus_standby = 0; in handle_stsi_3_2_2()
874 mem->vm[0].cpus_reserved = 0; in handle_stsi_3_2_2()
875 mem->vm[0].caf = 1000; in handle_stsi_3_2_2()
876 memcpy(mem->vm[0].name, "KVMguest", 8); in handle_stsi_3_2_2()
877 ASCEBC(mem->vm[0].name, 8); in handle_stsi_3_2_2()
878 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); in handle_stsi_3_2_2()
879 ASCEBC(mem->vm[0].cpi, 16); in handle_stsi_3_2_2()
885 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; in insert_stsi_usr_data()
886 vcpu->run->s390_stsi.addr = addr; in insert_stsi_usr_data()
887 vcpu->run->s390_stsi.ar = ar; in insert_stsi_usr_data()
888 vcpu->run->s390_stsi.fc = fc; in insert_stsi_usr_data()
889 vcpu->run->s390_stsi.sel1 = sel1; in insert_stsi_usr_data()
890 vcpu->run->s390_stsi.sel2 = sel2; in insert_stsi_usr_data()
895 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; in handle_stsi()
896 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; in handle_stsi()
897 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; in handle_stsi()
903 vcpu->stat.instruction_stsi++; in handle_stsi()
906 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stsi()
915 * - PTF/CPU topology support through facility 15 in handle_stsi()
916 * - KVM_CAP_S390_USER_STSI in handle_stsi()
918 if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) || in handle_stsi()
919 !vcpu->kvm->arch.user_stsi)) in handle_stsi()
922 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 in handle_stsi()
923 || vcpu->run->s.regs.gprs[1] & 0xffff0000) in handle_stsi()
927 vcpu->run->s.regs.gprs[0] = 3 << 28; in handle_stsi()
957 return -EREMOTE; in handle_stsi()
960 memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE); in handle_stsi()
969 if (vcpu->kvm->arch.user_stsi) { in handle_stsi()
971 rc = -EREMOTE; in handle_stsi()
976 vcpu->run->s.regs.gprs[0] = 0; in handle_stsi()
987 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b2()
1039 return -EOPNOTSUPP; in kvm_s390_handle_b2()
1047 vcpu->stat.instruction_epsw++; in handle_epsw()
1052 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; in handle_epsw()
1053 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw()
1055 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; in handle_epsw()
1056 vcpu->run->s.regs.gprs[reg2] |= in handle_epsw()
1057 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; in handle_epsw()
1079 vcpu->stat.instruction_pfmf++; in handle_pfmf()
1083 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_pfmf()
1086 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pfmf()
1089 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) in handle_pfmf()
1092 /* Only provide non-quiescing support if enabled for the guest */ in handle_pfmf()
1093 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && in handle_pfmf()
1094 !test_kvm_facility(vcpu->kvm, 14)) in handle_pfmf()
1097 /* Only provide conditional-SSKE support if enabled for the guest */ in handle_pfmf()
1098 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && in handle_pfmf()
1099 test_kvm_facility(vcpu->kvm, 10)) { in handle_pfmf()
1100 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; in handle_pfmf()
1101 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; in handle_pfmf()
1104 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; in handle_pfmf()
1105 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; in handle_pfmf()
1106 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_pfmf()
1109 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
1111 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_pfmf()
1114 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
1118 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); in handle_pfmf()
1121 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1); in handle_pfmf()
1125 not in 24-bit addressing mode */ in handle_pfmf()
1126 if (!test_kvm_facility(vcpu->kvm, 78) || in handle_pfmf()
1127 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) in handle_pfmf()
1129 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1); in handle_pfmf()
1140 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_pfmf()
1144 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
1145 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE)) in handle_pfmf()
1149 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { in handle_pfmf()
1154 mmap_read_lock(current->mm); in handle_pfmf()
1155 rc = cond_set_guest_storage_key(current->mm, vmaddr, in handle_pfmf()
1158 rc = fixup_user_fault(current->mm, vmaddr, in handle_pfmf()
1160 rc = !rc ? -EAGAIN : rc; in handle_pfmf()
1162 mmap_read_unlock(current->mm); in handle_pfmf()
1163 if (rc == -EFAULT) in handle_pfmf()
1165 if (rc == -EAGAIN) in handle_pfmf()
1172 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
1173 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { in handle_pfmf()
1174 vcpu->run->s.regs.gprs[reg2] = end; in handle_pfmf()
1176 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; in handle_pfmf()
1178 vcpu->run->s.regs.gprs[reg2] |= end; in handle_pfmf()
1185 * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
1199 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; in __do_essa()
1200 hva = gfn_to_hva(vcpu->kvm, gfn); in __do_essa()
1201 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in __do_essa()
1206 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); in __do_essa()
1209 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ in __do_essa()
1214 * Set the block-content state part of the result. 0 means resident, so in __do_essa()
1216 * (non-present and non-zero), and 3 for zero pages (non-present and in __do_essa()
1226 vcpu->run->s.regs.gprs[r1] = res; in __do_essa()
1234 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); in __do_essa()
1239 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); in __do_essa()
1242 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
1243 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages); in __do_essa()
1252 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in handle_essa()
1258 gmap = vcpu->arch.gmap; in handle_essa()
1259 vcpu->stat.instruction_essa++; in handle_essa()
1260 if (!vcpu->kvm->arch.use_cmma) in handle_essa()
1263 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_essa()
1266 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; in handle_essa()
1267 /* ORCs 0-6 are always valid */ in handle_essa()
1268 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT in handle_essa()
1272 if (!vcpu->kvm->arch.migration_mode) { in handle_essa()
1274 * CMMA is enabled in the KVM settings, but is disabled in in handle_essa()
1282 if (vcpu->kvm->mm->context.uses_cmm == 0) { in handle_essa()
1283 mmap_write_lock(vcpu->kvm->mm); in handle_essa()
1284 vcpu->kvm->mm->context.uses_cmm = 1; in handle_essa()
1285 mmap_write_unlock(vcpu->kvm->mm); in handle_essa()
1288 * If we are here, we are supposed to have CMMA enabled in in handle_essa()
1289 * the SIE block. Enabling CMMA works on a per-CPU basis, in handle_essa()
1291 * It's possible that the context flag is enabled and the in handle_essa()
1296 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in handle_essa()
1302 mmap_read_lock(vcpu->kvm->mm); in handle_essa()
1303 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in handle_essa()
1305 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in handle_essa()
1306 mmap_read_unlock(vcpu->kvm->mm); in handle_essa()
1312 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ in handle_essa()
1313 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); in handle_essa()
1314 mmap_read_lock(gmap->mm); in handle_essa()
1317 mmap_read_unlock(gmap->mm); in handle_essa()
1323 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b9()
1335 return -EOPNOTSUPP; in kvm_s390_handle_b9()
1341 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_lctl()
1342 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_lctl()
1348 vcpu->stat.instruction_lctl++; in kvm_s390_handle_lctl()
1350 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_lctl()
1361 nr_regs = ((reg3 - reg1) & 0xf) + 1; in kvm_s390_handle_lctl()
1368 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; in kvm_s390_handle_lctl()
1369 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; in kvm_s390_handle_lctl()
1380 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_stctl()
1381 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_stctl()
1387 vcpu->stat.instruction_stctl++; in kvm_s390_handle_stctl()
1389 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_stctl()
1403 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in kvm_s390_handle_stctl()
1414 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_lctlg()
1415 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_lctlg()
1421 vcpu->stat.instruction_lctlg++; in handle_lctlg()
1423 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lctlg()
1434 nr_regs = ((reg3 - reg1) & 0xf) + 1; in handle_lctlg()
1441 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; in handle_lctlg()
1452 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_stctg()
1453 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_stctg()
1459 vcpu->stat.instruction_stctg++; in handle_stctg()
1461 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stctg()
1475 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in handle_stctg()
1486 switch (vcpu->arch.sie_block->ipb & 0x000000ff) { in kvm_s390_handle_eb()
1498 return -EOPNOTSUPP; in kvm_s390_handle_eb()
1511 vcpu->stat.instruction_tprot++; in handle_tprot()
1513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_tprot()
1519 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1520 ipte_lock(vcpu->kvm); in handle_tprot()
1525 gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); in handle_tprot()
1528 /* Write protected? Try again with read-only... */ in handle_tprot()
1533 cc = -1; in handle_tprot()
1548 if (cc != -1) { in handle_tprot()
1556 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1557 ipte_unlock(vcpu->kvm); in handle_tprot()
1563 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_e5()
1567 return -EOPNOTSUPP; in kvm_s390_handle_e5()
1575 vcpu->stat.instruction_sckpf++; in handle_sckpf()
1577 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sckpf()
1580 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) in handle_sckpf()
1584 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; in handle_sckpf()
1585 vcpu->arch.sie_block->todpr = value; in handle_sckpf()
1592 vcpu->stat.instruction_ptff++; in handle_ptff()
1601 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_01()
1607 return -EOPNOTSUPP; in kvm_s390_handle_01()