summaryrefslogtreecommitdiff
path: root/xen/include/asm-x86/event.h
blob: 5e09ede6d70fba5a73efdc5b2f95c4b4033efe4b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
/******************************************************************************
 * event.h
 *
 * A nice interface for passing asynchronous events to guest OSes.
 * (architecture-dependent part)
 *
 */

#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__

#include <xen/shared.h>

void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);

static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
{
    return !vcpu_info(v, evtchn_upcall_mask);
}

int hvm_local_events_need_delivery(struct vcpu *v);
static always_inline bool local_events_need_delivery(void)
{
    struct vcpu *v = current;

    ASSERT(!is_idle_vcpu(v));

    return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
            (vcpu_info(v, evtchn_upcall_pending) &&
             !vcpu_info(v, evtchn_upcall_mask)));
}

static inline void local_event_delivery_disable(void)
{
    vcpu_info(current, evtchn_upcall_mask) = 1;
}

static inline void local_event_delivery_enable(void)
{
    vcpu_info(current, evtchn_upcall_mask) = 0;
}

/* No arch specific virq definition now. Default to global. */
static inline bool arch_virq_is_global(unsigned int virq)
{
    return true;
}

#ifdef CONFIG_PV_SHIM
# include <asm/pv/shim.h>
# define arch_evtchn_is_special(chn) \
             (pv_shim && (chn)->port && (chn)->state == ECS_RESERVED)
#endif

#endif