summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kvm/hyp.S14
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/locore.S16
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c8
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c61
17 files changed, 118 insertions, 73 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2435b8..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_sleep(vcpu);
/*
- * Disarming the background timer must be done in a
- * preemptible context, as this call may sleep.
- */
- kvm_timer_flush_hwstate(vcpu);
-
- /*
* Preparing the interrupts to be injected also
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
preempt_disable();
+ kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
local_irq_disable();
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342da13d..7dace909d5cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
__kvm_flush_dcache_pud(pud);
}
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+ return !pfn_valid(pfn);
+}
+
/**
* stage2_dissolve_pmd() - clear and flush huge PMD entry
* @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte) &&
- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_valid(pfn);
-}
-
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a482ff1..e55848c1edf4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
If unsure, say Y.
+config ARM64_ERRATUM_834220
+ bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
+ depends on KVM
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 834220 on Cortex-A57 parts up to r1p2.
+
+ Affected Cortex-A57 parts might report a Stage 2 translation
+ fault as the result of a Stage 1 fault for load crossing a
+ page boundary when there is a permission or device memory
+ alignment fault at Stage 1 and a translation fault at Stage 2.
+
+ The workaround is to verify that the Stage 1 translation
+ doesn't generate a fault before handling the Stage 2 fault.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..52722ee73dba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
#define ARM64_HAS_PAN 4
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
-#define ARM64_NCAPS 7
+#define ARM64_NCAPS 8
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..3ca894ecf699 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}
+/*
+ * vcpu_reg should always be passed a register number coming from a
+ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
+ * with banked registers.
+ */
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
{
- if (vcpu_mode_is_32bit(vcpu))
- return vcpu_reg32(vcpu, reg_num);
-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2504f7..feb6b4efa641 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
(1 << MIDR_VARIANT_SHIFT) | 2),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_834220
+ {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 834220",
+ .capability = ARM64_WORKAROUND_834220,
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
{
/* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701ef044..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
ENDPROC(__kvm_flush_vm_context)
__kvm_hyp_panic:
+ // Stash PAR_EL1 before corrupting it in __restore_sysregs
+ mrs x0, par_el1
+ push x0, xzr
+
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
// Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
mrs x3, esr_el2
mrs x4, far_el2
mrs x5, hpfar_el2
- mrs x6, par_el1
+ pop x6, xzr // active context PAR_EL1
mrs x7, tpidr_el2
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
ENDPROC(__kvm_hyp_panic)
__hyp_panic_str:
- .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
+ .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
.align 2
@@ -1015,9 +1019,15 @@ el1_trap:
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
+alternative_else
+ nop // Use the permission fault path to
+ nop // check for a valid S1 translation,
+ nop // regardless of the ESR value.
+alternative_endif
/*
* Check for Stage-1 page table walk, which is guaranteed
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
/* Note: These now point to the banked copies */
*vcpu_spsr(vcpu) = new_spsr_value;
- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
if (sctlr & (1 << 13))
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d5fa3eaf39a1..41b1b090f56f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
base = (inst >> 21) & 0x1f;
op_inst = (inst >> 16) & 0x1f;
- offset = inst & 0xffff;
+ offset = (int16_t)inst;
cache = (inst >> 16) & 0x3;
op = (inst >> 18) & 0x7;
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 7bab3a4e8f7d..7e2210846b8b 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
FEXPORT(__kvm_mips_load_asid)
/* Set the ASID for the Guest Kernel */
- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
- /* addresses shift to 0x80000000 */
- bltz t0, 1f /* If kernel */
+ PTR_L t0, VCPU_COP0(k1)
+ LONG_L t0, COP0_STATUS(t0)
+ andi t0, KSU_USER | ST0_ERL | ST0_EXL
+ xori t0, KSU_USER
+ bnez t0, 1f /* If kernel */
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
mtc0 t0, CP0_EPC
/* Set the ASID for the Guest Kernel */
- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
- /* addresses shift to 0x80000000 */
- bltz t0, 1f /* If kernel */
+ PTR_L t0, VCPU_COP0(k1)
+ LONG_L t0, COP0_STATUS(t0)
+ andi t0, KSU_USER | ST0_ERL | ST0_EXL
+ xori t0, KSU_USER
+ bnez t0, 1f /* If kernel */
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 49ff3bfc007e..b9b803facdbf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
if (!gebase) {
err = -ENOMEM;
- goto out_free_cpu;
+ goto out_uninit_cpu;
}
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
@@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
out_free_gebase:
kfree(gebase);
+out_uninit_cpu:
+ kvm_vcpu_uninit(vcpu);
+
out_free_cpu:
kfree(vcpu);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 373e32346d68..6a75352f453c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
src_id, 0);
/* sending vcpu invalid */
- if (src_id >= KVM_MAX_VCPUS ||
- kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
+ if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
return -EINVAL;
if (sclp.has_sigpif)
@@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
irq->u.emerg.code, 0);
+ /* sending vcpu invalid */
+ if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
+ return -EINVAL;
+
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8fe2f1c722dc..846589281b04 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
r = 0;
break;
case KVM_CAP_S390_VECTOR_REGISTERS:
- if (MACHINE_HAS_VX) {
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ r = -EBUSY;
+ } else if (MACHINE_HAS_VX) {
set_kvm_facility(kvm->arch.model.fac->mask, 129);
set_kvm_facility(kvm->arch.model.fac->list, 129);
r = 0;
} else
r = -EINVAL;
+ mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
r ? "(not available)" : "(success)");
break;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 77191b85ea7a..d76b51cb4b62 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- if (!MACHINE_HAS_PFMF)
+ if (!test_kvm_facility(vcpu->kvm, 8))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index da690b69f9fe..77c22d685c7a 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
u16 cpu_addr, u32 parameter, u64 *status_reg)
{
int rc;
- struct kvm_vcpu *dst_vcpu;
+ struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
-
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
@@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
if (order_code == SIGP_EXTERNAL_CALL) {
- dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL);
kvm_s390_vcpu_wakeup(dest_vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87acc5221740..af823a388c19 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
switch (type) {
case VMX_VPID_EXTENT_ALL_CONTEXT:
- if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
- nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return 1;
- }
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 00462bd63129..eed32283d22c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
+{
+ return (!lapic_in_kernel(vcpu) ||
+ kvm_apic_accept_pic_intr(vcpu));
+}
+
+/*
+ * if userspace requested an interrupt window, check that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_interrupt_allowed(vcpu) &&
+ !kvm_cpu_has_interrupt(vcpu) &&
+ !kvm_event_needs_reinjection(vcpu) &&
+ kvm_cpu_accept_dm_intr(vcpu);
+}
+
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
return -EEXIST;
vcpu->arch.pending_external_vector = irq->irq;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
@@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
-/*
- * Check if userspace requested an interrupt window, and that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
- if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
- return false;
-
- if (kvm_cpu_has_interrupt(vcpu))
- return false;
-
- return (irqchip_split(vcpu->kvm)
- ? kvm_apic_accept_pic_intr(vcpu)
- : kvm_arch_interrupt_allowed(vcpu));
+ return vcpu->run->request_interrupt_window &&
+ likely(!pic_in_kernel(vcpu->kvm));
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
- if (!irqchip_in_kernel(vcpu->kvm))
- kvm_run->ready_for_interrupt_injection =
- kvm_arch_interrupt_allowed(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu) &&
- !kvm_event_needs_reinjection(vcpu);
- else if (!pic_in_kernel(vcpu->kvm))
- kvm_run->ready_for_interrupt_injection =
- kvm_apic_accept_pic_intr(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu);
- else
- kvm_run->ready_for_interrupt_injection = 1;
+ kvm_run->ready_for_interrupt_injection =
+ pic_in_kernel(vcpu->kvm) ||
+ kvm_vcpu_ready_for_interrupt_injection(vcpu);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
- bool req_int_win = !lapic_in_kernel(vcpu) &&
- vcpu->run->request_interrupt_window;
+ bool req_int_win =
+ dm_request_for_irq_injection(vcpu) &&
+ kvm_cpu_accept_dm_intr(vcpu);
+
bool req_immediate_exit = false;
if (vcpu->requests) {
@@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
- if (dm_request_for_irq_injection(vcpu)) {
+ if (dm_request_for_irq_injection(vcpu) &&
+ kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
r = 0;
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
++vcpu->stat.request_irq_exits;