aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2018-11-26 17:14:43 -0500
committerAlex Bennée <alex.bennee@linaro.org>2019-10-28 15:12:38 +0000
commitcfbc3c6083dbdd0fdd9cc98965182e79431d3c63 (patch)
tree149a7263be63079a989cc48dc09ae1535cbf712e
parent504f73f7b3724c885317b6b236620e9048f50c0a (diff)
cpu: introduce cpu_in_exclusive_context()
Suggested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Emilio G. Cota <cota@braap.org> [AJB: moved inside start/end_exclusive fns + cleanup] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--accel/tcg/cpu-exec.c5
-rw-r--r--cpus-common.c4
-rw-r--r--include/hw/core/cpu.h13
3 files changed, 18 insertions, 4 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 48272c781b..81c33d6475 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -238,8 +238,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
- /* volatile because we modify it between setjmp and longjmp */
- volatile bool in_exclusive_region = false;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
@@ -253,7 +251,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
- in_exclusive_region = true;
cc->cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@@ -273,7 +270,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
assert_no_pages_locked();
}
- if (in_exclusive_region) {
+ if (cpu_in_exclusive_context(cpu)) {
/* We might longjump out of either the codegen or the
* execution, so must make sure we only end the exclusive
* region if we started it.
diff --git a/cpus-common.c b/cpus-common.c
index af3385a296..eaf590cb38 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -200,11 +200,15 @@ void start_exclusive(void)
* section until end_exclusive resets pending_cpus to 0.
*/
qemu_mutex_unlock(&qemu_cpu_list_lock);
+
+ current_cpu->in_exclusive_context = true;
}
/* Finish an exclusive operation. */
void end_exclusive(void)
{
+ current_cpu->in_exclusive_context = false;
+
qemu_mutex_lock(&qemu_cpu_list_lock);
atomic_set(&pending_cpus, 0);
qemu_cond_broadcast(&exclusive_resume);
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 031f587e51..07f2ab0590 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -372,6 +372,7 @@ struct CPUState {
bool unplug;
bool crash_occurred;
bool exit_request;
+ bool in_exclusive_context;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
@@ -784,6 +785,18 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
+ * cpu_in_exclusive_context()
+ * @cpu: The vCPU to check
+ *
+ * Returns true if @cpu is an exclusive context, for example running
+ * something which has previously been queued via async_safe_run_on_cpu().
+ */
+static inline bool cpu_in_exclusive_context(const CPUState *cpu)
+{
+ return cpu->in_exclusive_context;
+}
+
+/**
* qemu_get_cpu:
* @index: The CPUState@cpu_index value of the CPU to obtain.
*