summaryrefslogtreecommitdiff
path: root/xen/include/asm-x86/flushtlb.h
blob: 0be2273387edda0385f4d4edc5e2d87265da371a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
/******************************************************************************
 * flushtlb.h
 * 
 * TLB flushes are timestamped using a global virtual 'clock' which ticks
 * on any TLB flush on any processor.
 * 
 * Copyright (c) 2003-2004, K A Fraser
 */

#ifndef __FLUSHTLB_H__
#define __FLUSHTLB_H__

#include <xen/mm.h>
#include <xen/percpu.h>
#include <xen/smp.h>
#include <xen/types.h>

/* The current time as shown by the virtual TLB clock. */
extern u32 tlbflush_clock;

/* Time at which each CPU's TLB was last flushed. */
DECLARE_PER_CPU(u32, tlbflush_time);

/* TLB clock is in use. */
extern bool tlb_clk_enabled;

static inline uint32_t tlbflush_current_time(void)
{
    /* Returning 0 from tlbflush_current_time will always force a flush. */
    return tlb_clk_enabled ? tlbflush_clock : 0;
}

static inline void page_set_tlbflush_timestamp(struct page_info *page)
{
    /* Avoid the write if the TLB clock is disabled. */
    if ( !tlb_clk_enabled )
        return;

    /*
     * Prevent storing a stale time stamp, which could happen if an update
     * to tlbflush_clock plus a subsequent flush IPI happen between the
     * reading of tlbflush_clock and the writing of the struct page_info
     * field.
     */
    ASSERT(local_irq_is_enabled());
    local_irq_disable();
    page->tlbflush_timestamp = tlbflush_current_time();
    local_irq_enable();
}

/*
 * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
 * @lastuse_stamp is a timestamp taken when the PFN we are testing was last 
 * used for a purpose that may have caused the CPU's TLB to become tainted.
 */
static inline bool NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
{
    u32 curr_time = tlbflush_current_time();
    /*
     * Two cases:
     *  1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
     *     safety during this period, we force a flush if @curr_time == 0.
     *  2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
     *     To detect false positives because @cpu_stamp has wrapped, we
     *     also check @curr_time. If less than @lastuse_stamp we definitely
     *     wrapped, so there's no need for a flush (one is forced every wrap).
     */
    return ((curr_time == 0) ||
            ((cpu_stamp <= lastuse_stamp) &&
             (lastuse_stamp <= curr_time)));
}

/*
 * Filter the given set of CPUs, removing those that definitely flushed their
 * TLB since @page_timestamp.
 */
static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp)
{
    unsigned int cpu;

    /* Short-circuit: there's no need to iterate if the clock is disabled. */
    if ( !tlb_clk_enabled )
        return;

    for_each_cpu ( cpu, mask )
        if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) )
            __cpumask_clear_cpu(cpu, mask);
}

void new_tlbflush_clock_period(void);

/* Read pagetable base. */
static inline unsigned long read_cr3(void)
{
    unsigned long cr3;
    __asm__ __volatile__ (
        "mov %%cr3, %0" : "=r" (cr3) : );
    return cr3;
}

/* Write pagetable base and implicitly tick the tlbflush clock. */
void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);

/* flush_* flag fields: */
 /*
  * Area to flush: 2^flush_order pages. Default is flush entire address space.
  * NB. Multi-page areas do not need to have been mapped with a superpage.
  */
#define FLUSH_ORDER_MASK 0xff
#define FLUSH_ORDER(x)   ((x)+1)
 /* Flush TLBs (or parts thereof) */
#define FLUSH_TLB        0x100
 /* Flush TLBs (or parts thereof) including global mappings */
#define FLUSH_TLB_GLOBAL 0x200
 /* Flush data caches */
#define FLUSH_CACHE      0x400
 /* VA for the flush has a valid mapping */
#define FLUSH_VA_VALID   0x800
 /* Flush CPU state */
#define FLUSH_VCPU_STATE 0x1000
 /* Flush the per-cpu root page table */
#define FLUSH_ROOT_PGTBL 0x2000
#if CONFIG_HVM
 /* Flush all HVM guests linear TLB (using ASID/VPID) */
#define FLUSH_HVM_ASID_CORE 0x4000
#else
#define FLUSH_HVM_ASID_CORE 0
#endif
#if defined(CONFIG_PV) || defined(CONFIG_SHADOW_PAGING)
/*
 * Force an IPI to be sent. Note that adding this to the flags passed to
 * flush_area_mask will prevent using the assisted flush without having any
 * other side effect.
 */
# define FLUSH_FORCE_IPI 0x8000
#else
# define FLUSH_FORCE_IPI 0
#endif

/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);
#define flush_local(flags) flush_area_local(NULL, flags)

/* Flush specified CPUs' TLBs/caches */
void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
#define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)

/* Flush all CPUs' TLBs/caches */
#define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags)
#define flush_all(flags) flush_mask(&cpu_online_map, flags)

/* Flush local TLBs */
#define flush_tlb_local()                       \
    flush_local(FLUSH_TLB)
#define flush_tlb_one_local(v)                  \
    flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))

/* Flush specified CPUs' TLBs */
#define flush_tlb_mask(mask)                    \
    flush_mask(mask, FLUSH_TLB)
#define flush_tlb_one_mask(mask,v)              \
    flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))

/*
 * Make the common code TLB flush helper force use of an IPI in order to be
 * on the safe side. Note that not all calls from common code strictly require
 * this.
 */
#define arch_flush_tlb_mask(mask) flush_mask(mask, FLUSH_TLB | FLUSH_FORCE_IPI)

/* Flush all CPUs' TLBs */
#define flush_tlb_all()                         \
    flush_tlb_mask(&cpu_online_map)
#define flush_tlb_one_all(v)                    \
    flush_tlb_one_mask(&cpu_online_map, v)

#define flush_root_pgtbl_domain(d)                                       \
{                                                                        \
    if ( is_pv_domain(d) && (d)->arch.pv.xpti )                          \
        flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL);                \
}

static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
static inline int invalidate_dcache_va_range(const void *p,
                                             unsigned long size)
{ return -EOPNOTSUPP; }
static inline int clean_and_invalidate_dcache_va_range(const void *p,
                                                       unsigned long size)
{
    unsigned int order = get_order_from_bytes(size);
    /* sub-page granularity support needs to be added if necessary */
    flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
    return 0;
}
static inline int clean_dcache_va_range(const void *p, unsigned long size)
{
    return clean_and_invalidate_dcache_va_range(p, size);
}

unsigned int guest_flush_tlb_flags(const struct domain *d);
void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask);

#endif /* __FLUSHTLB_H__ */