diff options
Diffstat (limited to 'linux-user')
-rw-r--r-- | linux-user/elfload.c | 4 | ||||
-rw-r--r-- | linux-user/main.c | 146 | ||||
-rw-r--r-- | linux-user/mmap.c | 16 | ||||
-rw-r--r-- | linux-user/qemu.h | 14 | ||||
-rw-r--r-- | linux-user/signal.c | 4 |
5 files changed, 174 insertions, 10 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c index ca0023e62..67b7535d4 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -89,7 +89,7 @@ enum { static const char *get_elf_platform(void) { static char elf_platform[] = "i386"; - int family = (global_env->cpuid_version >> 8) & 0xff; + int family = (thread_env->cpuid_version >> 8) & 0xff; if (family > 6) family = 6; if (family >= 3) @@ -101,7 +101,7 @@ static const char *get_elf_platform(void) static uint32_t get_elf_hwcap(void) { - return global_env->cpuid_features; + return thread_env->cpuid_features; } #ifdef TARGET_X86_64 diff --git a/linux-user/main.c b/linux-user/main.c index 4bdec7e9b..1f68766f0 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -26,6 +26,8 @@ #include "qemu.h" #include "qemu-common.h" +/* For tb_lock */ +#include "exec-all.h" #define DEBUG_LOGFILE "/tmp/qemu.log" @@ -123,6 +125,135 @@ int64_t cpu_get_real_ticks(void) #endif +#if defined(USE_NPTL) +/***********************************************************/ +/* Helper routines for implementing atomic operations. */ + +/* To implement exclusive operations we force all cpus to syncronise. + We don't require a full sync, only that no cpus are executing guest code. + The alternative is to map target atomic ops onto host equivalents, + which requires quite a lot of per host/target work. */ +static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; +static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; +static int pending_cpus; + +/* Make sure everything is in a consistent state for calling fork(). */ +void fork_start(void) +{ + mmap_fork_start(); + pthread_mutex_lock(&tb_lock); + pthread_mutex_lock(&exclusive_lock); +} + +void fork_end(int child) +{ + if (child) { + /* Child processes created by fork() only have a single thread. + Discard information about the parent threads. */ + first_cpu = thread_env; + thread_env->next_cpu = NULL; + pending_cpus = 0; + pthread_mutex_init(&exclusive_lock, NULL); + pthread_cond_init(&exclusive_cond, NULL); + pthread_cond_init(&exclusive_resume, NULL); + pthread_mutex_init(&tb_lock, NULL); + } else { + pthread_mutex_unlock(&exclusive_lock); + pthread_mutex_unlock(&tb_lock); + } + mmap_fork_end(child); +} + +/* Wait for pending exclusive operations to complete. The exclusive lock + must be held. */ +static inline void exclusive_idle(void) +{ + while (pending_cpus) { + pthread_cond_wait(&exclusive_resume, &exclusive_lock); + } +} + +/* Start an exclusive operation. + Must only be called from outside cpu_arm_exec. */ +static inline void start_exclusive(void) +{ + CPUState *other; + pthread_mutex_lock(&exclusive_lock); + exclusive_idle(); + + pending_cpus = 1; + /* Make all other cpus stop executing. */ + for (other = first_cpu; other; other = other->next_cpu) { + if (other->running) { + pending_cpus++; + cpu_interrupt(other, CPU_INTERRUPT_EXIT); + } + } + if (pending_cpus > 1) { + pthread_cond_wait(&exclusive_cond, &exclusive_lock); + } +} + +/* Finish an exclusive operation. */ +static inline void end_exclusive(void) +{ + pending_cpus = 0; + pthread_cond_broadcast(&exclusive_resume); + pthread_mutex_unlock(&exclusive_lock); +} + +/* Wait for exclusive ops to finish, and begin cpu execution. */ +static inline void cpu_exec_start(CPUState *env) +{ + pthread_mutex_lock(&exclusive_lock); + exclusive_idle(); + env->running = 1; + pthread_mutex_unlock(&exclusive_lock); +} + +/* Mark cpu as not executing, and release pending exclusive ops. */ +static inline void cpu_exec_end(CPUState *env) +{ + pthread_mutex_lock(&exclusive_lock); + env->running = 0; + if (pending_cpus > 1) { + pending_cpus--; + if (pending_cpus == 1) { + pthread_cond_signal(&exclusive_cond); + } + } + exclusive_idle(); + pthread_mutex_unlock(&exclusive_lock); +} +#else /* if !USE_NPTL */ +/* These are no-ops because we are not threadsafe. */ +static inline void cpu_exec_start(CPUState *env) +{ +} + +static inline void cpu_exec_end(CPUState *env) +{ +} + +static inline void start_exclusive(void) +{ +} + +static inline void end_exclusive(void) +{ +} + +void fork_start(void) +{ +} + +void fork_end(int child) +{ +} +#endif + + #ifdef TARGET_I386 /***********************************************************/ /* CPUX86 core interface */ @@ -378,8 +509,11 @@ do_kernel_trap(CPUARMState *env) /* ??? No-op. Will need to do better for SMP. */ break; case 0xffff0fc0: /* __kernel_cmpxchg */ - /* ??? This is not really atomic. However we don't support - threads anyway, so it doesn't realy matter. */ + /* XXX: This only works between threads, not between processes. + It's probably possible to implement this with native host + operations. However things like ldrex/strex are much harder so + there's not much point trying. */ + start_exclusive(); cpsr = cpsr_read(env); addr = env->regs[2]; /* FIXME: This should SEGV if the access fails. */ @@ -396,6 +530,7 @@ do_kernel_trap(CPUARMState *env) cpsr &= ~CPSR_C; } cpsr_write(env, cpsr, CPSR_C); + end_exclusive(); break; case 0xffff0fe0: /* __kernel_get_tls */ env->regs[0] = env->cp15.c13_tls2; @@ -422,7 +557,9 @@ void cpu_loop(CPUARMState *env) uint32_t addr; for(;;) { + cpu_exec_start(env); trapnr = cpu_arm_exec(env); + cpu_exec_end(env); switch(trapnr) { case EXCP_UDEF: { @@ -2044,8 +2181,7 @@ void usage(void) _exit(1); } -/* XXX: currently only used for async signals (see signal.c) */ -CPUState *global_env; +THREAD CPUState *thread_env; void init_task_state(TaskState *ts) { @@ -2203,7 +2339,7 @@ int main(int argc, char **argv) fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } - global_env = env; + thread_env = env; if (getenv("QEMU_STRACE")) { do_strace = 1; diff --git a/linux-user/mmap.c b/linux-user/mmap.c index c0821386d..b4ca1074b 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -46,6 +46,22 @@ void mmap_unlock(void) pthread_mutex_unlock(&mmap_mutex); } } + +/* Grab lock to make sure things are in a consistent state after fork(). */ +void mmap_fork_start(void) +{ + if (mmap_lock_count) + abort(); + pthread_mutex_lock(&mmap_mutex); +} + +void mmap_fork_end(int child) +{ + if (child) + pthread_mutex_init(&mmap_mutex, NULL); + else + pthread_mutex_unlock(&mmap_mutex); +} #else /* We aren't threadsafe to start with, so no need to worry about locking. */ void mmap_lock(void) diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 81f7fb290..d3a3c3c20 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -37,6 +37,12 @@ typedef target_long abi_long; #include "target_signal.h" #include "gdbstub.h" +#if defined(USE_NPTL) +#define THREAD __thread +#else +#define THREAD +#endif + /* This struct is used to hold certain information about the image. * Basically, it replicates in user space what would be certain * task_struct fields in the kernel @@ -184,12 +190,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6); void gemu_log(const char *fmt, ...) __attribute__((format(printf,1,2))); -extern CPUState *global_env; +extern THREAD CPUState *thread_env; void cpu_loop(CPUState *env); void init_paths(const char *prefix); const char *path(const char *pathname); char *target_strerror(int err); int get_osversion(void); +void fork_start(void); +void fork_end(int child); extern int loglevel; extern FILE *logfile; @@ -235,6 +243,10 @@ int target_msync(abi_ulong start, abi_ulong len, int flags); extern unsigned long last_brk; void mmap_lock(void); void mmap_unlock(void); +#if defined(USE_NPTL) +void mmap_fork_start(void); +void mmap_fork_end(int child); +#endif /* user access */ diff --git a/linux-user/signal.c b/linux-user/signal.c index e6e1a0826..623a5e31c 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -424,9 +424,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info, fprintf(stderr, "qemu: got signal %d\n", sig); #endif host_to_target_siginfo_noswap(&tinfo, info); - if (queue_signal(global_env, sig, &tinfo) == 1) { + if (queue_signal(thread_env, sig, &tinfo) == 1) { /* interrupt the virtual CPU as soon as possible */ - cpu_interrupt(global_env, CPU_INTERRUPT_EXIT); + cpu_interrupt(thread_env, CPU_INTERRUPT_EXIT); } } |