diff options
author | Pranith Kumar <bobby.prani@gmail.com> | 2017-02-23 18:29:15 +0000 |
---|---|---|
committer | Alex Bennée <alex.bennee@linaro.org> | 2017-02-24 10:32:45 +0000 |
commit | 08e73c48b053566bfe0c994f154f73991cd0ff0e (patch) | |
tree | 5a2a033b6acfc8d41d65424a695943c6f9480337 /cpu-exec.c | |
parent | 372579427a5040a26dfee78464b50e2bdf27ef26 (diff) |
tcg: handle EXCP_ATOMIC exception for system emulation
The patch enables handling atomic code in the guest. This should be
preferably done in cpu_handle_exception(), but the current assumptions
regarding when we can execute atomic sections cause a deadlock.
The current mechanism discards the flags which were set in atomic
execution. We ensure they are properly saved by calling the
cc->cpu_exec_enter/leave() functions around the loop.
As we are running cpu_exec_step_atomic() from the outermost loop we
need to avoid an abort() when single stepping over atomic code since
debug exception longjmp will point to the the setlongjmp in
cpu_exec(). We do this by setting a new jmp_env so that it jumps back
here on an exception.
Signed-off-by: Pranith Kumar <bobby.prani@gmail.com>
[AJB: tweak title, merge with new patches, add mmap_lock]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
CC: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'cpu-exec.c')
-rw-r--r-- | cpu-exec.c | 43 |
1 files changed, 31 insertions, 12 deletions
diff --git a/cpu-exec.c b/cpu-exec.c index 2edd26e823..1a5ad4889d 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -228,24 +228,43 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, static void cpu_exec_step(CPUState *cpu) { + CPUClass *cc = CPU_GET_CLASS(cpu); CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, - 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); - tb->orig_tb = NULL; - tb_unlock(); - /* execute the generated code */ - trace_exec_tb_nocache(tb, pc); - cpu_tb_exec(cpu, tb); - tb_lock(); - tb_phys_invalidate(tb, -1); - tb_free(tb); - tb_unlock(); + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + mmap_lock(); + tb_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, + 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); + tb->orig_tb = NULL; + tb_unlock(); + mmap_unlock(); + + cc->cpu_exec_enter(cpu); + /* execute the generated code */ + trace_exec_tb_nocache(tb, pc); + cpu_tb_exec(cpu, tb); + cc->cpu_exec_exit(cpu); + + tb_lock(); + tb_phys_invalidate(tb, -1); + tb_free(tb); + tb_unlock(); + } else { + /* We may have exited due to another problem here, so we need + * to reset any tb_locks we may have taken but didn't release. + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ +#ifndef CONFIG_SOFTMMU + tcg_debug_assert(!have_mmap_lock()); +#endif + tb_lock_reset(); + } } void cpu_exec_step_atomic(CPUState *cpu) |