diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/block/aio.h | 52 | ||||
-rw-r--r-- | include/block/block_int.h | 4 | ||||
-rw-r--r-- | include/block/blockjob.h | 2 | ||||
-rw-r--r-- | include/block/coroutine.h | 3 | ||||
-rw-r--r-- | include/exec/exec-all.h | 100 | ||||
-rw-r--r-- | include/exec/gen-icount.h | 4 | ||||
-rw-r--r-- | include/exec/softmmu_defs.h | 37 | ||||
-rw-r--r-- | include/exec/softmmu_exec.h | 3 | ||||
-rw-r--r-- | include/exec/softmmu_template.h | 371 | ||||
-rw-r--r-- | include/hw/acpi/acpi.h | 2 | ||||
-rw-r--r-- | include/hw/isa/isa.h | 2 | ||||
-rw-r--r-- | include/hw/pci-host/spapr.h | 8 | ||||
-rw-r--r-- | include/hw/pci/pci.h | 2 | ||||
-rw-r--r-- | include/hw/ppc/spapr.h | 21 | ||||
-rw-r--r-- | include/hw/qdev-core.h | 2 | ||||
-rw-r--r-- | include/hw/scsi/scsi.h | 4 | ||||
-rw-r--r-- | include/hw/usb.h | 3 | ||||
-rw-r--r-- | include/monitor/readline.h | 3 | ||||
-rw-r--r-- | include/qapi/opts-visitor.h | 6 | ||||
-rw-r--r-- | include/qemu/ratelimit.h | 2 | ||||
-rw-r--r-- | include/qemu/timer.h | 676 | ||||
-rw-r--r-- | include/qemu/typedefs.h | 3 | ||||
-rw-r--r-- | include/qom/object.h | 10 | ||||
-rw-r--r-- | include/sysemu/kvm.h | 3 | ||||
-rw-r--r-- | include/sysemu/sysemu.h | 2 |
25 files changed, 949 insertions, 376 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index 5743bf1ba0..2efdf416cf 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -14,10 +14,12 @@ #ifndef QEMU_AIO_H #define QEMU_AIO_H +#include "qemu/typedefs.h" #include "qemu-common.h" #include "qemu/queue.h" #include "qemu/event_notifier.h" #include "qemu/thread.h" +#include "qemu/timer.h" typedef struct BlockDriverAIOCB BlockDriverAIOCB; typedef void BlockDriverCompletionFunc(void *opaque, int ret); @@ -42,7 +44,7 @@ typedef struct AioHandler AioHandler; typedef void QEMUBHFunc(void *opaque); typedef void IOHandler(void *opaque); -typedef struct AioContext { +struct AioContext { GSource source; /* The list of registered AIO handlers */ @@ -72,7 +74,10 @@ typedef struct AioContext { /* Thread pool for performing work and receiving completion callbacks */ struct ThreadPool *thread_pool; -} AioContext; + + /* TimerLists for calling timers - one per clock type */ + QEMUTimerListGroup tlg; +}; /** * aio_context_new: Allocate a new AioContext. @@ -241,4 +246,47 @@ void qemu_aio_set_fd_handler(int fd, void *opaque); #endif +/** + * aio_timer_new: + * @ctx: the aio context + * @type: the clock type + * @scale: the scale + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Allocate a new timer attached to the context @ctx. + * The function is responsible for memory allocation. + * + * The preferred interface is aio_timer_init. Use that + * unless you really need dynamic memory allocation. + * + * Returns: a pointer to the new timer + */ +static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, + int scale, + QEMUTimerCB *cb, void *opaque) +{ + return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); +} + +/** + * aio_timer_init: + * @ctx: the aio context + * @ts: the timer + * @type: the clock type + * @scale: the scale + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Initialise a new timer attached to the context @ctx. + * The caller is responsible for memory allocation. + */ +static inline void aio_timer_init(AioContext *ctx, + QEMUTimer *ts, QEMUClockType type, + int scale, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque); +} + #endif diff --git a/include/block/block_int.h b/include/block/block_int.h index e45f2a0d56..8012e253c9 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -34,6 +34,7 @@ #include "monitor/monitor.h" #include "qemu/hbitmap.h" #include "block/snapshot.h" +#include "qemu/main-loop.h" #define BLOCK_FLAG_ENCRYPT 1 #define BLOCK_FLAG_COMPAT6 4 @@ -281,6 +282,9 @@ struct BlockDriverState { /* Whether the disk can expand beyond total_sectors */ int growable; + /* Whether produces zeros when read beyond eof */ + bool zero_beyond_eof; + /* the memory alignment required for the buffers handled by this driver */ int buffer_alignment; diff --git a/include/block/blockjob.h b/include/block/blockjob.h index c290d07bba..d530409ff5 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -141,7 +141,7 @@ void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs, * Put the job to sleep (assuming that it wasn't canceled) for @ns * nanoseconds. Canceling the job will interrupt the wait immediately. */ -void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns); +void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns); /** * block_job_completed: diff --git a/include/block/coroutine.h b/include/block/coroutine.h index 1f2db3e8a4..4232569c53 100644 --- a/include/block/coroutine.h +++ b/include/block/coroutine.h @@ -16,6 +16,7 @@ #define QEMU_COROUTINE_H #include <stdbool.h> +#include "qemu/typedefs.h" #include "qemu/queue.h" #include "qemu/timer.h" @@ -212,7 +213,7 @@ void qemu_co_rwlock_unlock(CoRwlock *lock); * Note this function uses timers and hence only works when a main loop is in * use. See main-loop.h and do not use from qemu-tool programs. */ -void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns); +void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns); /** * Yield until a file descriptor becomes readable diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 5920f73c90..beb41491b4 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -295,58 +295,42 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, } } -/* The return address may point to the start of the next instruction. - Subtracting one gets us the call instruction itself. */ +/* GETRA is the true target of the return instruction that we'll execute, + defined here for simplicity of defining the follow-up macros. */ #if defined(CONFIG_TCG_INTERPRETER) extern uintptr_t tci_tb_ptr; -# define GETPC() tci_tb_ptr -#elif defined(__s390__) && !defined(__s390x__) -# define GETPC() \ - (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1) -#elif defined(__arm__) -/* Thumb return addresses have the low bit set, so we need to subtract two. - This is still safe in ARM mode because instructions are 4 bytes. */ -# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2) +# define GETRA() tci_tb_ptr +#else +# define GETRA() \ + ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) +#endif + +/* The true return address will often point to a host insn that is part of + the next translated guest insn. Adjust the address backward to point to + the middle of the call insn. Subtracting one would do the job except for + several compressed mode architectures (arm, mips) which set the low bit + to indicate the compressed mode; subtracting two works around that. It + is also the case that there are no host isas that contain a call insn + smaller than 4 bytes, so we don't worry about special-casing this. */ +#if defined(CONFIG_TCG_INTERPRETER) +# define GETPC_ADJ 0 #else -# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1) +# define GETPC_ADJ 2 #endif +#define GETPC() (GETRA() - GETPC_ADJ) + +/* The LDST optimizations splits code generation into fast and slow path. + In some implementations, we pass the "logical" return address manually; + in others, we must infer the logical return from the true return. */ #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) -/* qemu_ld/st optimization split code generation to fast and slow path, thus, - it needs special handling for an MMU helper which is called from the slow - path, to get the fast path's pc without any additional argument. - It uses a tricky solution which embeds the fast path pc into the slow path. - - Code flow in slow path: - (1) pre-process - (2) call MMU helper - (3) jump to (5) - (4) fast path information (implementation specific) - (5) post-process (e.g. stack adjust) - (6) jump to corresponding code of the next of fast path - */ -# if defined(__i386__) || defined(__x86_64__) -/* To avoid broken disassembling, long jmp is used for embedding fast path pc, - so that the destination is the next code of fast path, though this jmp is - never executed. - - call MMU helper - jmp POST_PROC (2byte) <- GETRA() - jmp NEXT_CODE (5byte) - POST_PROCESS ... <- GETRA() + 7 - */ -# define GETRA() ((uintptr_t)__builtin_return_address(0)) -# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \ - *(int32_t *)((void *)GETRA() + 3) - 1)) -# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64) -# define GETRA() ((uintptr_t)__builtin_return_address(0)) -# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1)) +# if defined (_ARCH_PPC) && !defined (_ARCH_PPC64) +# define GETRA_LDST(RA) (*(int32_t *)((RA) - 4)) # elif defined(__arm__) /* We define two insns between the return address and the branch back to straight-line. Find and decode that branch insn. */ -# define GETRA() ((uintptr_t)__builtin_return_address(0)) -# define GETPC_LDST() tcg_getpc_ldst(GETRA()) -static inline uintptr_t tcg_getpc_ldst(uintptr_t ra) +# define GETRA_LDST(RA) tcg_getra_ldst(RA) +static inline uintptr_t tcg_getra_ldst(uintptr_t ra) { int32_t b; ra += 8; /* skip the two insns */ @@ -354,31 +338,32 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra) b = (b << 8) >> (8 - 2); /* extract the displacement */ ra += 8; /* branches are relative to pc+8 */ ra += b; /* apply the displacement */ - ra -= 4; /* return a pointer into the current opcode, - not the start of the next opcode */ return ra; } -#elif defined(__aarch64__) -# define GETRA() ((uintptr_t)__builtin_return_address(0)) -# define GETPC_LDST() tcg_getpc_ldst(GETRA()) -static inline uintptr_t tcg_getpc_ldst(uintptr_t ra) +# elif defined(__aarch64__) +# define GETRA_LDST(RA) tcg_getra_ldst(RA) +static inline uintptr_t tcg_getra_ldst(uintptr_t ra) { int32_t b; ra += 4; /* skip one instruction */ b = *(int32_t *)ra; /* load the branch insn */ b = (b << 6) >> (6 - 2); /* extract the displacement */ ra += b; /* apply the displacement */ - ra -= 4; /* return a pointer into the current opcode, - not the start of the next opcode */ return ra; } -# else -# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!" # endif +#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */ + +/* ??? Delete these once they are no longer used. */ bool is_tcg_gen_code(uintptr_t pc_ptr); -# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC()) +#ifdef GETRA_LDST +# define GETRA_EXT() tcg_getra_ext(GETRA()) +static inline uintptr_t tcg_getra_ext(uintptr_t ra) +{ + return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra; +} #else -# define GETPC_EXT() GETPC() +# define GETRA_EXT() GETRA() #endif #if !defined(CONFIG_USER_ONLY) @@ -392,7 +377,10 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, uintptr_t retaddr); -#include "exec/softmmu_defs.h" +uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); #define ACCESS_TYPE (NB_MMU_MODES + 1) #define MEMSUFFIX _code diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index 4fc7b2981d..39a6b61e4f 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -39,12 +39,12 @@ static inline void gen_tb_start(void) static void gen_tb_end(TranslationBlock *tb, int num_insns) { gen_set_label(exitreq_label); - tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_REQUESTED); + tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED); if (use_icount) { *icount_arg = num_insns; gen_set_label(icount_label); - tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_ICOUNT_EXPIRED); + tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED); } } diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h deleted file mode 100644 index 1f25e33ce4..0000000000 --- a/include/exec/softmmu_defs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Software MMU support - * - * Declare helpers used by TCG for qemu_ld/st ops. - * - * Used by softmmu_exec.h, TCG targets and exec-all.h. - * - */ -#ifndef SOFTMMU_DEFS_H -#define SOFTMMU_DEFS_H - -uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - int mmu_idx); -uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - int mmu_idx); -uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx); -uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx); - -uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val, -int mmu_idx); -uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val, - int mmu_idx); -uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx); -uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx); -#endif diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h index 3e4e886a30..6fde154527 100644 --- a/include/exec/softmmu_exec.h +++ b/include/exec/softmmu_exec.h @@ -19,7 +19,8 @@ #define ldul_executive ldl_executive #define ldul_supervisor ldl_supervisor -#include "exec/softmmu_defs.h" +/* The memory helpers for tcg-generated code need tcg_target_long etc. */ +#include "tcg.h" #define ACCESS_TYPE 0 #define MEMSUFFIX MMU_MODE0_SUFFIX diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h index 8584902cbe..5bbc56afd5 100644 --- a/include/exec/softmmu_template.h +++ b/include/exec/softmmu_template.h @@ -28,24 +28,40 @@ #if DATA_SIZE == 8 #define SUFFIX q -#define USUFFIX q -#define DATA_TYPE uint64_t +#define LSUFFIX q +#define SDATA_TYPE int64_t #elif DATA_SIZE == 4 #define SUFFIX l -#define USUFFIX l -#define DATA_TYPE uint32_t +#define LSUFFIX l +#define SDATA_TYPE int32_t #elif DATA_SIZE == 2 #define SUFFIX w -#define USUFFIX uw -#define DATA_TYPE uint16_t +#define LSUFFIX uw +#define SDATA_TYPE int16_t #elif DATA_SIZE == 1 #define SUFFIX b -#define USUFFIX ub -#define DATA_TYPE uint8_t +#define LSUFFIX ub +#define SDATA_TYPE int8_t #else #error unsupported data size #endif +#define DATA_TYPE glue(u, SDATA_TYPE) + +/* For the benefit of TCG generated code, we want to avoid the complication + of ABI-specific return type promotion and always return a value extended + to the register size of the host. This is tcg_target_long, except in the + case of a 32-bit host and 64-bit data, and for that we always have + uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ +#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 +# define WORD_TYPE DATA_TYPE +# define USUFFIX SUFFIX +#else +# define WORD_TYPE tcg_target_ulong +# define USUFFIX glue(u, SUFFIX) +# define SSUFFIX glue(s, SUFFIX) +#endif + #ifdef SOFTMMU_CODE_ACCESS #define READ_ACCESS_TYPE 2 #define ADDR_READ addr_code @@ -54,10 +70,6 @@ #define ADDR_READ addr_read #endif -static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, - target_ulong addr, - int mmu_idx, - uintptr_t retaddr); static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, hwaddr physaddr, target_ulong addr, @@ -78,123 +90,105 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, } /* handle all cases except unaligned access which span two pages */ -DATA_TYPE -glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - int mmu_idx) +#ifdef SOFTMMU_CODE_ACCESS +static +#endif +WORD_TYPE +glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env, + target_ulong addr, int mmu_idx, + uintptr_t retaddr) { - DATA_TYPE res; - int index; - target_ulong tlb_addr; - hwaddr ioaddr; - uintptr_t retaddr; + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t haddr; - /* test if there is match for unaligned or IO access */ - /* XXX: could done more in memory macro in a non portable way */ - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - if ((addr & (DATA_SIZE - 1)) != 0) - goto do_unaligned_access; - retaddr = GETPC_EXT(); - ioaddr = env->iotlb[mmu_idx][index]; - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { - /* slow unaligned access (it spans two pages or IO) */ - do_unaligned_access: - retaddr = GETPC_EXT(); + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); -#endif - res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr, - mmu_idx, retaddr); - } else { - /* unaligned/aligned access in the same page */ - uintptr_t addend; -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - retaddr = GETPC_EXT(); - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); - } -#endif - addend = env->tlb_table[mmu_idx][index].addend; - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t) - (addr + addend)); } - } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC_EXT(); -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) - do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); #endif tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); - goto redo; + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; } - return res; -} -/* handle all unaligned cases */ -static DATA_TYPE -glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, - target_ulong addr, - int mmu_idx, - uintptr_t retaddr) -{ - DATA_TYPE res, res1, res2; - int index, shift; - hwaddr ioaddr; - target_ulong tlb_addr, addr1, addr2; + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + } - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - if ((addr & (DATA_SIZE - 1)) != 0) - goto do_unaligned_access; - ioaddr = env->iotlb[mmu_idx][index]; - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { - do_unaligned_access: - /* slow unaligned access (it spans two pages) */ - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1, - mmu_idx, retaddr); - res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2, - mmu_idx, retaddr); - shift = (addr & (DATA_SIZE - 1)) * 8; + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2, res; + unsigned shift; + do_unaligned_access: +#ifdef ALIGNED_ONLY + do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); +#endif + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) + (env, addr1, mmu_idx, retaddr + GETPC_ADJ); + res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) + (env, addr2, mmu_idx, retaddr + GETPC_ADJ); + shift = (addr & (DATA_SIZE - 1)) * 8; #ifdef TARGET_WORDS_BIGENDIAN - res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); + res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); #else - res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); + res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); #endif - res = (DATA_TYPE)res; - } else { - /* unaligned/aligned access in the same page */ - uintptr_t addend = env->tlb_table[mmu_idx][index].addend; - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t) - (addr + addend)); - } - } else { - /* the page is not in the TLB : fill it */ - tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); - goto redo; + return res; } - return res; + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); + } +#endif + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + /* Note that ldl_raw is defined with type "int". */ + return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr); +} + +DATA_TYPE +glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, + int mmu_idx) +{ + return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx, + GETRA_EXT()); } #ifndef SOFTMMU_CODE_ACCESS -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, - target_ulong addr, - DATA_TYPE val, - int mmu_idx, - uintptr_t retaddr); +/* Provide signed versions of the load routines as well. We can of course + avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ +#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS +WORD_TYPE +glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env, + target_ulong addr, int mmu_idx, + uintptr_t retaddr) +{ + return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) + (env, addr, mmu_idx, retaddr); +} +#endif static inline void glue(io_write, SUFFIX)(CPUArchState *env, hwaddr physaddr, @@ -214,107 +208,84 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, io_mem_write(mr, physaddr, val, 1 << SHIFT); } -void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, - target_ulong addr, DATA_TYPE val, - int mmu_idx) +void +glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, + target_ulong addr, DATA_TYPE val, + int mmu_idx, uintptr_t retaddr) { - hwaddr ioaddr; - target_ulong tlb_addr; - uintptr_t retaddr; - int index; + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t haddr; - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - if ((addr & (DATA_SIZE - 1)) != 0) - goto do_unaligned_access; - retaddr = GETPC_EXT(); - ioaddr = env->iotlb[mmu_idx][index]; - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { - do_unaligned_access: - retaddr = GETPC_EXT(); + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { #ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { do_unaligned_access(env, addr, 1, mmu_idx, retaddr); -#endif - glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val, - mmu_idx, retaddr); - } else { - /* aligned/unaligned access in the same page */ - uintptr_t addend; -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - retaddr = GETPC_EXT(); - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); - } -#endif - addend = env->tlb_table[mmu_idx][index].addend; - glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t) - (addr + addend), val); } - } else { - /* the page is not in the TLB : fill it */ - retaddr = GETPC_EXT(); -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) - do_unaligned_access(env, addr, 1, mmu_idx, retaddr); #endif tlb_fill(env, addr, 1, mmu_idx, retaddr); - goto redo; + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } -} -/* handles all unaligned cases */ -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, - target_ulong addr, - DATA_TYPE val, - int mmu_idx, - uintptr_t retaddr) -{ - hwaddr ioaddr; - target_ulong tlb_addr; - int index, i; + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + return; + } - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - redo: - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - if ((addr & (DATA_SIZE - 1)) != 0) - goto do_unaligned_access; - ioaddr = env->iotlb[mmu_idx][index]; - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { - do_unaligned_access: - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for(i = DATA_SIZE - 1; i >= 0; i--) { + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i; + do_unaligned_access: +#ifdef ALIGNED_ONLY + do_unaligned_access(env, addr, 1, mmu_idx, retaddr); +#endif + /* XXX: not efficient, but simple */ + /* Note: relies on the fact that tlb_fill() does not remove the + * previous page from the TLB cache. */ + for (i = DATA_SIZE - 1; i >= 0; i--) { #ifdef TARGET_WORDS_BIGENDIAN - glue(slow_stb, MMUSUFFIX)(env, addr + i, - val >> (((DATA_SIZE - 1) * 8) - (i * 8)), - mmu_idx, retaddr); + uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); #else - glue(slow_stb, MMUSUFFIX)(env, addr + i, - val >> (i * 8), - mmu_idx, retaddr); + uint8_t val8 = val >> (i * 8); #endif - } - } else { - /* aligned/unaligned access in the same page */ - uintptr_t addend = env->tlb_table[mmu_idx][index].addend; - glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t) - (addr + addend), val); + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + mmu_idx, retaddr + GETPC_ADJ); } - } else { - /* the page is not in the TLB : fill it */ - tlb_fill(env, addr, 1, mmu_idx, retaddr); - goto redo; + return; + } + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + do_unaligned_access(env, addr, 1, mmu_idx, retaddr); } +#endif + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val); +} + +void +glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, + DATA_TYPE val, int mmu_idx) +{ + glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx, + GETRA_EXT()); } #endif /* !defined(SOFTMMU_CODE_ACCESS) */ @@ -323,6 +294,10 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, #undef SHIFT #undef DATA_TYPE #undef SUFFIX -#undef USUFFIX +#undef LSUFFIX #undef DATA_SIZE #undef ADDR_READ +#undef WORD_TYPE +#undef SDATA_TYPE +#undef USUFFIX +#undef SSUFFIX diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h index 635be7be10..51733d3390 100644 --- a/include/hw/acpi/acpi.h +++ b/include/hw/acpi/acpi.h @@ -136,7 +136,7 @@ void acpi_pm_tmr_reset(ACPIREGS *ar); #include "qemu/timer.h" static inline int64_t acpi_pm_tmr_get_clock(void) { - return muldiv64(qemu_get_clock_ns(vm_clock), PM_TIMER_FREQUENCY, + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY, get_ticks_per_sec()); } diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h index 495bcf3a08..fa45a5b094 100644 --- a/include/hw/isa/isa.h +++ b/include/hw/isa/isa.h @@ -78,7 +78,7 @@ void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start); * @dev: the ISADevice against which these are registered; may be NULL. * @start: the base I/O port against which the portio->offset is applied. * @portio: the ports, sorted by offset. - * @opaque: passed into the old_portio callbacks. + * @opaque: passed into the portio callbacks. * @name: passed into memory_region_init_io. */ void isa_register_portio_list(ISADevice *dev, uint16_t start, diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h index 93f9511325..970b4a9e4a 100644 --- a/include/hw/pci-host/spapr.h +++ b/include/hw/pci-host/spapr.h @@ -43,8 +43,7 @@ typedef struct sPAPRPHBState { MemoryRegion memspace, iospace; hwaddr mem_win_addr, mem_win_size, io_win_addr, io_win_size; - hwaddr msi_win_addr; - MemoryRegion memwindow, iowindow, msiwindow; + MemoryRegion memwindow, iowindow; uint32_t dma_liobn; uint64_t dma_window_start; @@ -73,7 +72,8 @@ typedef struct sPAPRPHBState { #define SPAPR_PCI_MMIO_WIN_SIZE 0x20000000 #define SPAPR_PCI_IO_WIN_OFF 0x80000000 #define SPAPR_PCI_IO_WIN_SIZE 0x10000 -#define SPAPR_PCI_MSI_WIN_OFF 0x90000000 + +#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL #define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL @@ -88,6 +88,8 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt); +void spapr_pci_msi_init(sPAPREnvironment *spapr, hwaddr addr); + void spapr_pci_rtas_init(void); #endif /* __HW_SPAPR_PCI_H__ */ diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index 2374aa95ba..37979aa723 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -348,7 +348,7 @@ typedef int (*pci_hotplug_fn)(DeviceState *qdev, PCIDevice *pci_dev, bool pci_bus_is_express(PCIBus *bus); bool pci_bus_is_root(PCIBus *bus); -void pci_bus_new_inplace(PCIBus *bus, DeviceState *parent, +void pci_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, const char *name, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 9fc197286c..e37b41983c 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -13,6 +13,8 @@ struct sPAPRNVRAM; typedef struct sPAPREnvironment { struct VIOsPAPRBus *vio_bus; QLIST_HEAD(, sPAPRPHBState) phbs; + hwaddr msi_win_addr; + MemoryRegion msiwindow; struct sPAPRNVRAM *nvram; XICSState *icp; @@ -109,6 +111,15 @@ typedef struct sPAPREnvironment { #define H_NOT_ENOUGH_RESOURCES -44 #define H_R_STATE -45 #define H_RESCINDEND -46 +#define H_P2 -55 +#define H_P3 -56 +#define H_P4 -57 +#define H_P5 -58 +#define H_P6 -59 +#define H_P7 -60 +#define H_P8 -61 +#define H_P9 -62 +#define H_UNSUPPORTED_FLAG -256 #define H_MULTI_THREADS_ACTIVE -9005 @@ -143,6 +154,11 @@ typedef struct sPAPREnvironment { #define H_PP1 (1ULL<<(63-62)) #define H_PP2 (1ULL<<(63-63)) +/* H_SET_MODE flags */ +#define H_SET_MODE_ENDIAN 4 +#define H_SET_MODE_ENDIAN_BIG 0 +#define H_SET_MODE_ENDIAN_LITTLE 1 + /* VASI States */ #define H_VASI_INVALID 0 #define H_VASI_ENABLED 1 @@ -267,7 +283,8 @@ typedef struct sPAPREnvironment { #define H_GET_EM_PARMS 0x2B8 #define H_SET_MPP 0x2D0 #define H_GET_MPP 0x2D4 -#define MAX_HCALL_OPCODE H_GET_MPP +#define H_SET_MODE 0x31C +#define MAX_HCALL_OPCODE H_SET_MODE /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. @@ -303,7 +320,7 @@ target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, target_ulong *args); int spapr_allocate_irq(int hint, bool lsi); -int spapr_allocate_irq_block(int num, bool lsi); +int spapr_allocate_irq_block(int num, bool lsi, bool msi); static inline int spapr_allocate_msi(int hint) { diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 46972f4961..a62f231eb9 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -264,7 +264,7 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id); typedef int (qbus_walkerfn)(BusState *bus, void *opaque); typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); -void qbus_create_inplace(void *bus, const char *typename, +void qbus_create_inplace(void *bus, size_t size, const char *typename, DeviceState *parent, const char *name); BusState *qbus_create(const char *typename, DeviceState *parent, const char *name); /* Returns > 0 if either devfn or busfn skip walk somewhere in cursion, diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 87865313eb..1b6651054a 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -152,8 +152,8 @@ struct SCSIBus { const SCSIBusInfo *info; }; -void scsi_bus_new(SCSIBus *bus, DeviceState *host, const SCSIBusInfo *info, - const char *bus_name); +void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name); static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d) { diff --git a/include/hw/usb.h b/include/hw/usb.h index 901b0da8b0..1b8acba6f6 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -496,7 +496,8 @@ struct USBBusOps { void (*wakeup_endpoint)(USBBus *bus, USBEndpoint *ep, unsigned int stream); }; -void usb_bus_new(USBBus *bus, USBBusOps *ops, DeviceState *host); +void usb_bus_new(USBBus *bus, size_t bus_size, + USBBusOps *ops, DeviceState *host); USBBus *usb_bus_find(int busnr); void usb_legacy_register(const char *typename, const char *usbdevice_name, USBDevice *(*usbdevice_init)(USBBus *bus, diff --git a/include/monitor/readline.h b/include/monitor/readline.h index fc9806ecf1..0faf6e1db7 100644 --- a/include/monitor/readline.h +++ b/include/monitor/readline.h @@ -8,7 +8,8 @@ #define READLINE_MAX_COMPLETIONS 256 typedef void ReadLineFunc(Monitor *mon, const char *str, void *opaque); -typedef void ReadLineCompletionFunc(const char *cmdline); +typedef void ReadLineCompletionFunc(Monitor *mon, + const char *cmdline); typedef struct ReadLineState { char cmd_buf[READLINE_CMD_BUF_SIZE + 1]; diff --git a/include/qapi/opts-visitor.h b/include/qapi/opts-visitor.h index 5939eeebc7..fd48c14ec8 100644 --- a/include/qapi/opts-visitor.h +++ b/include/qapi/opts-visitor.h @@ -16,6 +16,12 @@ #include "qapi/visitor.h" #include "qemu/option.h" +/* Inclusive upper bound on the size of any flattened range. This is a safety + * (= anti-annoyance) measure; wrong ranges should not cause long startup + * delays nor exhaust virtual memory. + */ +#define OPTS_VISITOR_RANGE_MAX 65536 + typedef struct OptsVisitor OptsVisitor; /* Contrarily to qemu-option.c::parse_option_number(), OptsVisitor's "int" diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h index d1610f135b..d413a4a696 100644 --- a/include/qemu/ratelimit.h +++ b/include/qemu/ratelimit.h @@ -23,7 +23,7 @@ typedef struct { static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) { - int64_t now = qemu_get_clock_ns(rt_clock); + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); if (limit->next_slice_time < now) { limit->next_slice_time = now + limit->slice_ns; diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 9dd206ce7f..e4934dd61b 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -1,8 +1,8 @@ #ifndef QEMU_TIMER_H #define QEMU_TIMER_H +#include "qemu/typedefs.h" #include "qemu-common.h" -#include "qemu/main-loop.h" #include "qemu/notify.h" /* timers */ @@ -11,80 +11,643 @@ #define SCALE_US 1000 #define SCALE_NS 1 -typedef struct QEMUClock QEMUClock; +/** + * QEMUClockType: + * + * The following clock types are available: + * + * @QEMU_CLOCK_REALTIME: Real time clock + * + * The real time clock should be used only for stuff which does not + * change the virtual machine state, as it is run even if the virtual + * machine is stopped. The real time clock has a frequency of 1000 + * Hz. + * + * @QEMU_CLOCK_VIRTUAL: virtual clock + * + * The virtual clock is only run during the emulation. It is stopped + * when the virtual machine is stopped. Virtual timers use a high + * precision clock, usually cpu cycles (use ticks_per_sec). + * + * @QEMU_CLOCK_HOST: host clock + * + * The host clock should be use for device models that emulate accurate + * real time sources. It will continue to run when the virtual machine + * is suspended, and it will reflect system time changes the host may + * undergo (e.g. due to NTP). The host clock has the same precision as + * the virtual clock. + */ + +typedef enum { + QEMU_CLOCK_REALTIME = 0, + QEMU_CLOCK_VIRTUAL = 1, + QEMU_CLOCK_HOST = 2, + QEMU_CLOCK_MAX +} QEMUClockType; + +typedef struct QEMUTimerList QEMUTimerList; + +struct QEMUTimerListGroup { + QEMUTimerList *tl[QEMU_CLOCK_MAX]; +}; + typedef void QEMUTimerCB(void *opaque); +typedef void QEMUTimerListNotifyCB(void *opaque); + +struct QEMUTimer { + int64_t expire_time; /* in nanoseconds */ + QEMUTimerList *timer_list; + QEMUTimerCB *cb; + void *opaque; + QEMUTimer *next; + int scale; +}; + +extern QEMUTimerListGroup main_loop_tlg; + +/* + * QEMUClockType + */ + +/* + * qemu_clock_get_ns; + * @type: the clock type + * + * Get the nanosecond value of a clock with + * type @type + * + * Returns: the clock value in nanoseconds + */ +int64_t qemu_clock_get_ns(QEMUClockType type); + +/** + * qemu_clock_get_ms; + * @type: the clock type + * + * Get the millisecond value of a clock with + * type @type + * + * Returns: the clock value in milliseconds + */ +static inline int64_t qemu_clock_get_ms(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_MS; +} + +/** + * qemu_clock_get_us; + * @type: the clock type + * + * Get the microsecond value of a clock with + * type @type + * + * Returns: the clock value in microseconds + */ +static inline int64_t qemu_clock_get_us(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_US; +} + +/** + * qemu_clock_has_timers: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has timers attached + * + * Returns: true if the clock's default timer list + * has timers attached + */ +bool qemu_clock_has_timers(QEMUClockType type); + +/** + * qemu_clock_expired: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has an expired clock. + * + * Returns: true if the clock's default timer list has + * an expired timer + */ +bool qemu_clock_expired(QEMUClockType type); + +/** + * qemu_clock_use_for_deadline: + * @type: the clock type + * + * Determine whether a clock should be used for deadline + * calculations. Some clocks, for instance vm_clock with + * use_icount set, do not count in nanoseconds. Such clocks + * are not used for deadline calculations, and are presumed + * to interrupt any poll using qemu_notify/aio_notify + * etc. + * + * Returns: true if the clock runs in nanoseconds and + * should be used for a deadline. + */ +bool qemu_clock_use_for_deadline(QEMUClockType type); + +/** + * qemu_clock_deadline_ns_all: + * @type: the clock type + * + * Calculate the deadline across all timer lists associated + * with a clock (as opposed to just the default one) + * in nanoseconds, or -1 if no timer is set to expire. + * + * Returns: time until expiry in nanoseconds or -1 + */ +int64_t qemu_clock_deadline_ns_all(QEMUClockType type); + +/** + * qemu_clock_get_main_loop_timerlist: + * @type: the clock type + * + * Return the default timer list assocatiated with a clock. + * + * Returns: the default timer list + */ +QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type); -/* The real time clock should be used only for stuff which does not - change the virtual machine state, as it is run even if the virtual - machine is stopped. The real time clock has a frequency of 1000 - Hz. */ -extern QEMUClock *rt_clock; - -/* The virtual clock is only run during the emulation. It is stopped - when the virtual machine is stopped. Virtual timers use a high - precision clock, usually cpu cycles (use ticks_per_sec). */ -extern QEMUClock *vm_clock; - -/* The host clock should be use for device models that emulate accurate - real time sources. It will continue to run when the virtual machine - is suspended, and it will reflect system time changes the host may - undergo (e.g. due to NTP). The host clock has the same precision as - the virtual clock. */ -extern QEMUClock *host_clock; - -int64_t qemu_get_clock_ns(QEMUClock *clock); -int64_t qemu_clock_has_timers(QEMUClock *clock); -int64_t qemu_clock_expired(QEMUClock *clock); -int64_t qemu_clock_deadline(QEMUClock *clock); -void qemu_clock_enable(QEMUClock *clock, bool enabled); -void qemu_clock_warp(QEMUClock *clock); - -void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier); -void qemu_unregister_clock_reset_notifier(QEMUClock *clock, +/** + * qemu_clock_nofify: + * @type: the clock type + * + * Call the notifier callback connected with the default timer + * list linked to the clock, or qemu_notify() if none. + */ +void qemu_clock_notify(QEMUClockType type); + +/** + * qemu_clock_enable: + * @type: the clock type + * @enabled: true to enable, false to disable + * + * Enable or disable a clock + */ +void qemu_clock_enable(QEMUClockType type, bool enabled); + +/** + * qemu_clock_warp: + * @type: the clock type + * + * Warp a clock to a new value + */ +void qemu_clock_warp(QEMUClockType type); + +/** + * qemu_clock_register_reset_notifier: + * @type: the clock type + * @notifier: the notifier function + * + * Register a notifier function to call when the clock + * concerned is reset. + */ +void qemu_clock_register_reset_notifier(QEMUClockType type, + Notifier *notifier); + +/** + * qemu_clock_unregister_reset_notifier: + * @type: the clock type + * @notifier: the notifier function + * + * Unregister a notifier function to call when the clock + * concerned is reset. + */ +void qemu_clock_unregister_reset_notifier(QEMUClockType type, Notifier *notifier); -QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, - QEMUTimerCB *cb, void *opaque); -void qemu_free_timer(QEMUTimer *ts); -void qemu_del_timer(QEMUTimer *ts); -void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time); -void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time); -bool qemu_timer_pending(QEMUTimer *ts); -bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time); -uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts); - -void qemu_run_timers(QEMUClock *clock); -void qemu_run_all_timers(void); -void configure_alarms(char const *opt); -void init_clocks(void); -int init_timer_alarm(void); +/** + * qemu_clock_run_timers: + * @type: clock on which to operate + * + * Run all the timers associated with the default timer list + * of a clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_timers(QEMUClockType type); -int64_t cpu_get_ticks(void); -void cpu_enable_ticks(void); -void cpu_disable_ticks(void); +/** + * qemu_clock_run_all_timers: + * + * Run all the timers associated with the default timer list + * of every clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_all_timers(void); + +/* + * QEMUTimerList + */ + +/** + * timerlist_new: + * @type: the clock type to associate with the timerlist + * @cb: the callback to call on notification + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timerlist associated with the clock of + * type @type. + * + * Returns: a pointer to the QEMUTimerList created + */ +QEMUTimerList *timerlist_new(QEMUClockType type, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlist_free: + * @timer_list: the timer list to free + * + * Frees a timer_list. It must have no active timers. + */ +void timerlist_free(QEMUTimerList *timer_list); + +/** + * timerlist_has_timers: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has active timers + * + * Returns: true if the timer list has timers. + */ +bool timerlist_has_timers(QEMUTimerList *timer_list); + +/** + * timerlist_expired: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has any timers which + * are expired. + * + * Returns: true if the timer list has timers which + * have expired. + */ +bool timerlist_expired(QEMUTimerList *timer_list); + +/** + * timerlist_deadline_ns: + * @timer_list: the timer list to operate on + * + * Determine the deadline for a timer_list, i.e. + * the number of nanoseconds until the first timer + * expires. Return -1 if there are no timers. + * + * Returns: the number of nanoseconds until the earliest + * timer expires -1 if none + */ +int64_t timerlist_deadline_ns(QEMUTimerList *timer_list); + +/** + * timerlist_get_clock: + * @timer_list: the timer list to operate on + * + * Determine the clock type associated with a timer list. + * + * Returns: the clock type associated with the + * timer list. + */ +QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); + +/** + * timerlist_run_timers: + * @timer_list: the timer list to use + * + * Call all expired timers associated with the timer list. + * + * Returns: true if any timer expired + */ +bool timerlist_run_timers(QEMUTimerList *timer_list); + +/** + * timerlist_notify: + * @timer_list: the timer list to use + * + * call the notifier callback associated with the timer list. + */ +void timerlist_notify(QEMUTimerList *timer_list); + +/* + * QEMUTimerListGroup + */ + +/** + * timerlistgroup_init: + * @tlg: the timer list group + * @cb: the callback to call when a notify is required + * @opaque: the opaque pointer to be passed to the callback. + * + * Initialise a timer list group. This must already be + * allocated in memory and zeroed. The notifier callback is + * called whenever a clock in the timer list group is + * reenabled or whenever a timer associated with any timer + * list is modified. If @cb is specified as null, qemu_notify() + * is used instead. + */ +void timerlistgroup_init(QEMUTimerListGroup *tlg, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlistgroup_deinit: + * @tlg: the timer list group + * + * Deinitialise a timer list group. This must already be + * initialised. Note the memory is not freed. + */ +void timerlistgroup_deinit(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_run_timers: + * @tlg: the timer list group + * + * Run the timers associated with a timer list group. + * This will run timers on multiple clocks. + * + * Returns: true if any timer callback ran + */ +bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_deadline_ns: + * @tlg: the timer list group + * + * Determine the deadline of the soonest timer to + * expire associated with any timer list linked to + * the timer list group. Only clocks suitable for + * deadline calculation are included. + * + * Returns: the deadline in nanoseconds or -1 if no + * timers are to expire. + */ +int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg); + +/* + * QEMUTimer + */ + +/** + * timer_init: + * @ts: the timer to be initialised + * @timer_list: the timer list to attach the timer to + * @scale: the scale value for the tiemr + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Initialise a new timer and associate it with @timer_list. + * The caller is responsible for allocating the memory. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +void timer_init(QEMUTimer *ts, + QEMUTimerList *timer_list, int scale, + QEMUTimerCB *cb, void *opaque); + +/** + * timer_new_tl: + * @timer_list: the timer list to attach the timer to + * @scale: the scale value for the tiemr + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Creeate a new timer and associate it with @timer_list. + * The memory is allocated by the function. + * + * This is not the preferred interface unless you know you + * are going to call timer_free. Use timer_init instead. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list, + int scale, + QEMUTimerCB *cb, + void *opaque) +{ + QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer)); + timer_init(ts, timer_list, scale, cb, opaque); + return ts; +} -static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb, - void *opaque) +/** + * timer_new: + * @type: the clock type to use + * @scale: the scale value for the tiemr + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Creeate a new timer and associate it with the default + * timer list for the clock type @type. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new(QEMUClockType type, int scale, + QEMUTimerCB *cb, void *opaque) { - return qemu_new_timer(clock, SCALE_NS, cb, opaque); + return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque); } -static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb, - void *opaque) +/** + * timer_new_ns: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with nanosecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) { - return qemu_new_timer(clock, SCALE_MS, cb, opaque); + return timer_new(type, SCALE_NS, cb, opaque); } -static inline int64_t qemu_get_clock_ms(QEMUClock *clock) +/** + * timer_new_us: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with microsecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) { - return qemu_get_clock_ns(clock) / SCALE_MS; + return timer_new(type, SCALE_US, cb, opaque); } +/** + * timer_new_ms: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with millisecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_MS, cb, opaque); +} + +/** + * timer_free: + * @ts: the timer + * + * Free a timer (it must not be on the active list) + */ +void timer_free(QEMUTimer *ts); + +/** + * timer_del: + * @ts: the timer + * + * Delete a timer from the active list. + */ +void timer_del(QEMUTimer *ts); + +/** + * timer_mod_ns: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time + */ +void timer_mod_ns(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_mod: + * @ts: the timer + * @expire_time: the expire time in the units associated with the timer + * + * Modify a timer to expiry at @expire_time, taking into + * account the scale associated with the timer. + */ +void timer_mod(QEMUTimer *ts, int64_t expire_timer); + +/** + * timer_pending: + * @ts: the timer + * + * Determines whether a timer is pending (i.e. is on the + * active list of timers, whether or not it has not yet expired). + * + * Returns: true if the timer is pending + */ +bool timer_pending(QEMUTimer *ts); + +/** + * timer_expired: + * @ts: the timer + * + * Determines whether a timer has expired. + * + * Returns: true if the timer has expired + */ +bool timer_expired(QEMUTimer *timer_head, int64_t current_time); + +/** + * timer_expire_time_ns: + * @ts: the timer + * + * Determine the expiry time of a timer + * + * Returns: the expiry time in nanoseconds + */ +uint64_t timer_expire_time_ns(QEMUTimer *ts); + +/** + * timer_get: + * @f: the file + * @ts: the timer + * + * Read a timer @ts from a file @f + */ +void timer_get(QEMUFile *f, QEMUTimer *ts); + +/** + * timer_put: + * @f: the file + * @ts: the timer + */ +void timer_put(QEMUFile *f, QEMUTimer *ts); + +/* + * General utility functions + */ + +/** + * qemu_timeout_ns_to_ms: + * @ns: nanosecond timeout value + * + * Convert a nanosecond timeout value (or -1) to + * a millisecond value (or -1), always rounding up. + * + * Returns: millisecond timeout value + */ +int qemu_timeout_ns_to_ms(int64_t ns); + +/** + * qemu_poll_ns: + * @fds: Array of file descriptors + * @nfds: number of file descriptors + * @timeout: timeout in nanoseconds + * + * Perform a poll like g_poll but with a timeout in nanoseconds. + * See g_poll documentation for further details. + * + * Returns: number of fds ready + */ +int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout); + +/** + * qemu_soonest_timeout: + * @timeout1: first timeout in nanoseconds (or -1 for infinite) + * @timeout2: second timeout in nanoseconds (or -1 for infinite) + * + * Calculates the soonest of two timeout values. -1 means infinite, which + * is later than any other value. + * + * Returns: soonest timeout value in nanoseconds (or -1 for infinite) + */ +static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) +{ + /* we can abuse the fact that -1 (which means infinite) is a maximal + * value when cast to unsigned. As this is disgusting, it's kept in + * one inline function. + */ + return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2; +} + +/** + * initclocks: + * + * Initialise the clock & timer infrastructure + */ +void init_clocks(void); + +int64_t cpu_get_ticks(void); +void cpu_enable_ticks(void); +void cpu_disable_ticks(void); + static inline int64_t get_ticks_per_sec(void) { return 1000000000LL; } +/* + * Low level clock functions + */ + /* real time host monotonic timer */ static inline int64_t get_clock_realtime(void) { @@ -128,9 +691,6 @@ static inline int64_t get_clock(void) } #endif -void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); -void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); - /* icount */ int64_t cpu_get_icount(void); int64_t cpu_get_clock(void); diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index ac9f8d41a3..3205540059 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -4,9 +4,12 @@ /* A load of opaque types so that device init declarations don't have to pull in all the real definitions. */ typedef struct QEMUTimer QEMUTimer; +typedef struct QEMUTimerListGroup QEMUTimerListGroup; typedef struct QEMUFile QEMUFile; typedef struct QEMUBH QEMUBH; +typedef struct AioContext AioContext; + struct Monitor; typedef struct Monitor Monitor; typedef struct MigrationParams MigrationParams; diff --git a/include/qom/object.h b/include/qom/object.h index 9b69065b7a..1a7b71aba5 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -249,7 +249,7 @@ typedef struct InterfaceInfo InterfaceInfo; * MyClass parent_class; * * MyDoSomething parent_do_something; - * } MyClass; + * } DerivedClass; * * static void derived_do_something(MyState *obj) * { @@ -585,25 +585,27 @@ Object *object_new_with_type(Type type); /** * object_initialize_with_type: - * @obj: A pointer to the memory to be used for the object. + * @data: A pointer to the memory to be used for the object. + * @size: The maximum size available at @data for the object. * @type: The type of the object to instantiate. * * This function will initialize an object. The memory for the object should * have already been allocated. The returned object has a reference count of 1, * and will be finalized when the last reference is dropped. */ -void object_initialize_with_type(void *data, Type type); +void object_initialize_with_type(void *data, size_t size, Type type); /** * object_initialize: * @obj: A pointer to the memory to be used for the object. + * @size: The maximum size available at @obj for the object. * @typename: The name of the type of the object to instantiate. * * This function will initialize an object. The memory for the object should * have already been allocated. The returned object has a reference count of 1, * and will be finalized when the last reference is dropped. */ -void object_initialize(void *obj, const char *typename); +void object_initialize(void *obj, size_t size, const char *typename); /** * object_dynamic_cast: diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index de74411f41..8e7668524b 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -309,7 +309,8 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg); int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg); void kvm_irqchip_release_virq(KVMState *s, int virq); -int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq); int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); void kvm_pc_gsi_handler(void *opaque, int n, int level); void kvm_pc_setup_irq_routing(bool pci_enabled); diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index d7a77b6488..b1aa059102 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -124,7 +124,7 @@ extern int boot_menu; extern uint8_t *boot_splash_filedata; extern size_t boot_splash_filedata_size; extern uint8_t qemu_extra_params_fw[2]; -extern QEMUClock *rtc_clock; +extern QEMUClockType rtc_clock; #define MAX_NODES 64 #define MAX_CPUMASK_BITS 255 |