diff options
Diffstat (limited to 'arch/um')
-rw-r--r-- | arch/um/Kconfig | 12 | ||||
-rw-r--r-- | arch/um/configs/i386_defconfig | 1 | ||||
-rw-r--r-- | arch/um/configs/x86_64_defconfig | 1 | ||||
-rw-r--r-- | arch/um/drivers/chan_kern.c | 52 | ||||
-rw-r--r-- | arch/um/drivers/ssl.c | 1 | ||||
-rw-r--r-- | arch/um/drivers/ssl.h | 13 | ||||
-rw-r--r-- | arch/um/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/um/include/shared/os.h | 10 | ||||
-rw-r--r-- | arch/um/include/shared/timer-internal.h | 48 | ||||
-rw-r--r-- | arch/um/kernel/irq.c | 9 | ||||
-rw-r--r-- | arch/um/kernel/process.c | 42 | ||||
-rw-r--r-- | arch/um/kernel/skas/Makefile | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/syscall.c | 11 | ||||
-rw-r--r-- | arch/um/kernel/time.c | 131 | ||||
-rw-r--r-- | arch/um/os-Linux/time.c | 127 |
15 files changed, 320 insertions, 142 deletions
diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 6b6eb938fcc1..3c3adfc486f2 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -184,6 +184,18 @@ config SECCOMP If unsure, say Y. +config UML_TIME_TRAVEL_SUPPORT + bool + prompt "Support time-travel mode (e.g. for test execution)" + help + Enable this option to support time travel inside the UML instance. + + After enabling this option, two modes are accessible at runtime + (selected by the kernel command line), see the kernel's command- + line help for more details. + + It is safe to say Y, but you probably don't need this. + endmenu source "arch/um/drivers/Kconfig" diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig index 8f114e3b0a7a..73e98bb57bf5 100644 --- a/arch/um/configs/i386_defconfig +++ b/arch/um/configs/i386_defconfig @@ -36,7 +36,6 @@ CONFIG_XTERM_CHAN=y CONFIG_CON_CHAN="pts" CONFIG_SSL_CHAN="pts" CONFIG_UML_SOUND=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_UBD=y diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig index 5d0875fc0db2..3281d7600225 100644 --- a/arch/um/configs/x86_64_defconfig +++ b/arch/um/configs/x86_64_defconfig @@ -34,7 +34,6 @@ CONFIG_XTERM_CHAN=y CONFIG_CON_CHAN="pts" CONFIG_SSL_CHAN="pts" CONFIG_UML_SOUND=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_UBD=y diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index a4e64edb8f38..749d2bf59599 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -171,19 +171,55 @@ int enable_chan(struct line *line) return err; } +/* Items are added in IRQ context, when free_irq can't be called, and + * removed in process context, when it can. + * This handles interrupt sources which disappear, and which need to + * be permanently disabled. This is discovered in IRQ context, but + * the freeing of the IRQ must be done later. + */ +static DEFINE_SPINLOCK(irqs_to_free_lock); +static LIST_HEAD(irqs_to_free); + +void free_irqs(void) +{ + struct chan *chan; + LIST_HEAD(list); + struct list_head *ele; + unsigned long flags; + + spin_lock_irqsave(&irqs_to_free_lock, flags); + list_splice_init(&irqs_to_free, &list); + spin_unlock_irqrestore(&irqs_to_free_lock, flags); + + list_for_each(ele, &list) { + chan = list_entry(ele, struct chan, free_list); + + if (chan->input && chan->enabled) + um_free_irq(chan->line->driver->read_irq, chan); + if (chan->output && chan->enabled) + um_free_irq(chan->line->driver->write_irq, chan); + chan->enabled = 0; + } +} + static void close_one_chan(struct chan *chan, int delay_free_irq) { + unsigned long flags; + if (!chan->opened) return; - /* we can safely call free now - it will be marked - * as free and freed once the IRQ stopped processing - */ - if (chan->input && chan->enabled) - um_free_irq(chan->line->driver->read_irq, chan); - if (chan->output && chan->enabled) - um_free_irq(chan->line->driver->write_irq, chan); - chan->enabled = 0; + if (delay_free_irq) { + spin_lock_irqsave(&irqs_to_free_lock, flags); + list_add(&chan->free_list, &irqs_to_free); + spin_unlock_irqrestore(&irqs_to_free_lock, flags); + } else { + if (chan->input && chan->enabled) + um_free_irq(chan->line->driver->read_irq, chan); + if (chan->output && chan->enabled) + um_free_irq(chan->line->driver->write_irq, chan); + chan->enabled = 0; + } if (chan->ops->close != NULL) (*chan->ops->close)(chan->fd, chan->data); diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index b8d14fa52059..7ae407d5337e 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -12,7 +12,6 @@ #include <linux/console.h> #include <asm/termbits.h> #include <asm/irq.h> -#include "ssl.h" #include "chan.h" #include <init.h> #include <irq_user.h> diff --git a/arch/um/drivers/ssl.h b/arch/um/drivers/ssl.h deleted file mode 100644 index 314d17725ce6..000000000000 --- a/arch/um/drivers/ssl.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) - * Licensed under the GPL - */ - -#ifndef __SSL_H__ -#define __SSL_H__ - -extern int ssl_read(int fd, int line); -extern void ssl_receive_char(int line, char ch); - -#endif - diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 9f4b4bb78120..00cefd33afdd 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) * when the new ->mm is used for the first time. */ __switch_mm(&new->context.id); - down_write(&new->mmap_sem); + down_write_nested(&new->mmap_sem, 1); uml_setup_stubs(new); up_write(&new->mmap_sem); } diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index ebf23012a59b..4a62ac4251a5 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -250,15 +250,13 @@ extern void os_warn(const char *fmt, ...) /* time.c */ extern void os_idle_sleep(unsigned long long nsecs); -extern int os_timer_create(void* timer); -extern int os_timer_set_interval(void* timer, void* its); -extern int os_timer_one_shot(int ticks); -extern long long os_timer_disable(void); -extern long os_timer_remain(void* timer); +extern int os_timer_create(void); +extern int os_timer_set_interval(unsigned long long nsecs); +extern int os_timer_one_shot(unsigned long long nsecs); +extern void os_timer_disable(void); extern void uml_idle_timer(void); extern long long os_persistent_clock_emulation(void); extern long long os_nsecs(void); -extern long long os_vnsecs(void); /* skas/mem.c */ extern long run_syscall_stub(struct mm_id * mm_idp, diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h index 03e6f217f807..8574338bf23b 100644 --- a/arch/um/include/shared/timer-internal.h +++ b/arch/um/include/shared/timer-internal.h @@ -10,4 +10,52 @@ #define TIMER_MULTIPLIER 256 #define TIMER_MIN_DELTA 500 +enum time_travel_mode { + TT_MODE_OFF, + TT_MODE_BASIC, + TT_MODE_INFCPU, +}; + +enum time_travel_timer_mode { + TT_TMR_DISABLED, + TT_TMR_ONESHOT, + TT_TMR_PERIODIC, +}; + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +extern enum time_travel_mode time_travel_mode; +extern unsigned long long time_travel_time; +extern enum time_travel_timer_mode time_travel_timer_mode; +extern unsigned long long time_travel_timer_expiry; +extern unsigned long long time_travel_timer_interval; + +static inline void time_travel_set_time(unsigned long long ns) +{ + time_travel_time = ns; +} + +static inline void time_travel_set_timer(enum time_travel_timer_mode mode, + unsigned long long expiry) +{ + time_travel_timer_mode = mode; + time_travel_timer_expiry = expiry; +} +#else +#define time_travel_mode TT_MODE_OFF +#define time_travel_time 0 +#define time_travel_timer_expiry 0 +#define time_travel_timer_interval 0 + +static inline void time_travel_set_time(unsigned long long ns) +{ +} + +static inline void time_travel_set_timer(enum time_travel_timer_mode mode, + unsigned long long expiry) +{ +} + +#define time_travel_timer_mode TT_TMR_DISABLED +#endif + #endif diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 598d7b3d9355..efde1f16c603 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -21,6 +21,8 @@ #include <irq_user.h> +extern void free_irqs(void); + /* When epoll triggers we do not know why it did so * we can also have different IRQs for read and write. * This is why we keep a small irq_fd array for each fd - @@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) } } } + + free_irqs(); } static int assign_epoll_events_to_irq(struct irq_entry *irq_entry) @@ -380,10 +384,8 @@ EXPORT_SYMBOL(deactivate_fd); */ int deactivate_all_fds(void) { - unsigned long flags; struct irq_entry *to_free; - spin_lock_irqsave(&irq_lock, flags); /* Stop IO. The IRQ loop has no lock so this is our * only way of making sure we are safe to dispose * of all IRQ handlers @@ -399,8 +401,7 @@ int deactivate_all_fds(void) ); to_free = to_free->next; } - garbage_collect_irq_entries(); - spin_unlock_irqrestore(&irq_lock, flags); + /* don't garbage collect - we can no longer call kfree() here */ os_close_epoll_fd(); return 0; } diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 691b83b10649..67c0d1a860e9 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -203,10 +203,50 @@ void initial_thread_cb(void (*proc)(void *), void *arg) kmalloc_ok = save_kmalloc_ok; } +static void time_travel_sleep(unsigned long long duration) +{ + unsigned long long next = time_travel_time + duration; + + if (time_travel_mode != TT_MODE_INFCPU) + os_timer_disable(); + + if (time_travel_timer_mode != TT_TMR_DISABLED || + time_travel_timer_expiry < next) { + if (time_travel_timer_mode == TT_TMR_ONESHOT) + time_travel_set_timer(TT_TMR_DISABLED, 0); + /* + * time_travel_time will be adjusted in the timer + * IRQ handler so it works even when the signal + * comes from the OS timer + */ + deliver_alarm(); + } else { + time_travel_set_time(next); + } + + if (time_travel_mode != TT_MODE_INFCPU) { + if (time_travel_timer_mode == TT_TMR_PERIODIC) + os_timer_set_interval(time_travel_timer_interval); + else if (time_travel_timer_mode == TT_TMR_ONESHOT) + os_timer_one_shot(time_travel_timer_expiry - next); + } +} + +static void um_idle_sleep(void) +{ + unsigned long long duration = UM_NSEC_PER_SEC; + + if (time_travel_mode != TT_MODE_OFF) { + time_travel_sleep(duration); + } else { + os_idle_sleep(duration); + } +} + void arch_cpu_idle(void) { cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); - os_idle_sleep(UM_NSEC_PER_SEC); + um_idle_sleep(); local_irq_enable(); } diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile index 0b76d8869c94..5bd3edfcfedf 100644 --- a/arch/um/kernel/skas/Makefile +++ b/arch/um/kernel/skas/Makefile @@ -12,4 +12,6 @@ obj-y := clone.o mmu.o process.o syscall.o uaccess.o CFLAGS_clone.o := $(CFLAGS_NO_HARDENING) UNPROFILE_OBJS := clone.o +KCOV_INSTRUMENT := n + include arch/um/scripts/Makefile.rules diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index b783ac87d98a..44bb10785075 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c @@ -10,12 +10,23 @@ #include <sysdep/ptrace.h> #include <sysdep/ptrace_user.h> #include <sysdep/syscalls.h> +#include <shared/timer-internal.h> void handle_syscall(struct uml_pt_regs *r) { struct pt_regs *regs = container_of(r, struct pt_regs, regs); int syscall; + /* + * If we have infinite CPU resources, then make every syscall also a + * preemption point, since we don't have any other preemption in this + * case, and kernel threads would basically never run until userspace + * went to sleep, even if said userspace interacts with the kernel in + * various ways. + */ + if (time_travel_mode == TT_MODE_INFCPU) + schedule(); + /* Initialize the syscall number and default return value. */ UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 0c572a48158e..6a051b078359 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -19,11 +19,29 @@ #include <kern_util.h> #include <os.h> #include <timer-internal.h> +#include <shared/init.h> + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +enum time_travel_mode time_travel_mode; +unsigned long long time_travel_time; +enum time_travel_timer_mode time_travel_timer_mode; +unsigned long long time_travel_timer_expiry; +unsigned long long time_travel_timer_interval; + +static bool time_travel_start_set; +static unsigned long long time_travel_start; +#else +#define time_travel_start_set 0 +#define time_travel_start 0 +#endif void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { unsigned long flags; + if (time_travel_mode != TT_MODE_OFF) + time_travel_set_time(time_travel_timer_expiry); + local_irq_save(flags); do_IRQ(TIMER_IRQ, regs); local_irq_restore(flags); @@ -31,26 +49,47 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) static int itimer_shutdown(struct clock_event_device *evt) { - os_timer_disable(); + if (time_travel_mode != TT_MODE_OFF) + time_travel_set_timer(TT_TMR_DISABLED, 0); + + if (time_travel_mode != TT_MODE_INFCPU) + os_timer_disable(); + return 0; } static int itimer_set_periodic(struct clock_event_device *evt) { - os_timer_set_interval(NULL, NULL); + unsigned long long interval = NSEC_PER_SEC / HZ; + + if (time_travel_mode != TT_MODE_OFF) + time_travel_set_timer(TT_TMR_PERIODIC, + time_travel_time + interval); + + if (time_travel_mode != TT_MODE_INFCPU) + os_timer_set_interval(interval); + return 0; } static int itimer_next_event(unsigned long delta, struct clock_event_device *evt) { - return os_timer_one_shot(delta); + delta += 1; + + if (time_travel_mode != TT_MODE_OFF) + time_travel_set_timer(TT_TMR_ONESHOT, + time_travel_time + delta); + + if (time_travel_mode != TT_MODE_INFCPU) + return os_timer_one_shot(delta); + + return 0; } static int itimer_one_shot(struct clock_event_device *evt) { - os_timer_one_shot(1); - return 0; + return itimer_next_event(0, evt); } static struct clock_event_device timer_clockevent = { @@ -87,6 +126,17 @@ static irqreturn_t um_timer(int irq, void *dev) static u64 timer_read(struct clocksource *cs) { + if (time_travel_mode != TT_MODE_OFF) { + /* + * We make reading the timer cost a bit so that we don't get + * stuck in loops that expect time to move more than the + * exact requested sleep amount, e.g. python's socket server, + * see https://bugs.python.org/issue37026. + */ + time_travel_set_time(time_travel_time + TIMER_MULTIPLIER); + return time_travel_time / TIMER_MULTIPLIER; + } + return os_nsecs() / TIMER_MULTIPLIER; } @@ -107,7 +157,7 @@ static void __init um_timer_setup(void) printk(KERN_ERR "register_timer : request_irq failed - " "errno = %d\n", -err); - err = os_timer_create(NULL); + err = os_timer_create(); if (err != 0) { printk(KERN_ERR "creation of timer failed - errno = %d\n", -err); return; @@ -123,7 +173,12 @@ static void __init um_timer_setup(void) void read_persistent_clock64(struct timespec64 *ts) { - long long nsecs = os_persistent_clock_emulation(); + long long nsecs; + + if (time_travel_start_set) + nsecs = time_travel_start + time_travel_time; + else + nsecs = os_persistent_clock_emulation(); set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC, nsecs % NSEC_PER_SEC); @@ -134,3 +189,65 @@ void __init time_init(void) timer_set_signal_handler(); late_time_init = um_timer_setup; } + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +unsigned long calibrate_delay_is_known(void) +{ + if (time_travel_mode == TT_MODE_INFCPU) + return 1; + return 0; +} + +int setup_time_travel(char *str) +{ + if (strcmp(str, "=inf-cpu") == 0) { + time_travel_mode = TT_MODE_INFCPU; + timer_clockevent.name = "time-travel-timer-infcpu"; + timer_clocksource.name = "time-travel-clock"; + return 1; + } + + if (!*str) { + time_travel_mode = TT_MODE_BASIC; + timer_clockevent.name = "time-travel-timer"; + timer_clocksource.name = "time-travel-clock"; + return 1; + } + + return -EINVAL; +} + +__setup("time-travel", setup_time_travel); +__uml_help(setup_time_travel, +"time-travel\n" +"This option just enables basic time travel mode, in which the clock/timers\n" +"inside the UML instance skip forward when there's nothing to do, rather than\n" +"waiting for real time to elapse. However, instance CPU speed is limited by\n" +"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n" +"clock (but quicker when there's nothing to do).\n" +"\n" +"time-travel=inf-cpu\n" +"This enables time travel mode with infinite processing power, in which there\n" +"are no wall clock timers, and any CPU processing happens - as seen from the\n" +"guest - instantly. This can be useful for accurate simulation regardless of\n" +"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n" +"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"); + +int setup_time_travel_start(char *str) +{ + int err; + + err = kstrtoull(str, 0, &time_travel_start); + if (err) + return err; + + time_travel_start_set = 1; + return 1; +} + +__setup("time-travel-start", setup_time_travel_start); +__uml_help(setup_time_travel_start, +"time-travel-start=<seconds>\n" +"Configure the UML instance's wall clock to start at this value rather than\n" +"the host's wall clock at the time of UML boot.\n"); +#endif diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index 0e39b9978729..6d94ff52362c 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -26,11 +26,11 @@ static inline long long timeval_to_ns(const struct timeval *tv) static inline long long timespec_to_ns(const struct timespec *ts) { - return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + - ts->tv_nsec; + return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec; } -long long os_persistent_clock_emulation (void) { +long long os_persistent_clock_emulation(void) +{ struct timespec realtime_tp; clock_gettime(CLOCK_REALTIME, &realtime_tp); @@ -40,94 +40,41 @@ long long os_persistent_clock_emulation (void) { /** * os_timer_create() - create an new posix (interval) timer */ -int os_timer_create(void* timer) { - - timer_t* t = timer; - - if(t == NULL) { - t = &event_high_res_timer; - } +int os_timer_create(void) +{ + timer_t *t = &event_high_res_timer; - if (timer_create( - CLOCK_MONOTONIC, - NULL, - t) == -1) { + if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1) return -1; - } + return 0; } -int os_timer_set_interval(void* timer, void* i) +int os_timer_set_interval(unsigned long long nsecs) { struct itimerspec its; - unsigned long long nsec; - timer_t* t = timer; - struct itimerspec* its_in = i; - - if(t == NULL) { - t = &event_high_res_timer; - } - nsec = UM_NSEC_PER_SEC / UM_HZ; + its.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC; + its.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC; - if(its_in != NULL) { - its.it_value.tv_sec = its_in->it_value.tv_sec; - its.it_value.tv_nsec = its_in->it_value.tv_nsec; - } else { - its.it_value.tv_sec = 0; - its.it_value.tv_nsec = nsec; - } + its.it_interval.tv_sec = nsecs / UM_NSEC_PER_SEC; + its.it_interval.tv_nsec = nsecs % UM_NSEC_PER_SEC; - its.it_interval.tv_sec = 0; - its.it_interval.tv_nsec = nsec; - - if(timer_settime(*t, 0, &its, NULL) == -1) { + if (timer_settime(event_high_res_timer, 0, &its, NULL) == -1) return -errno; - } return 0; } -/** - * os_timer_remain() - returns the remaining nano seconds of the given interval - * timer - * Because this is the remaining time of an interval timer, which correspondends - * to HZ, this value can never be bigger than one second. Just - * the nanosecond part of the timer is returned. - * The returned time is relative to the start time of the interval timer. - * Return an negative value in an error case. - */ -long os_timer_remain(void* timer) +int os_timer_one_shot(unsigned long long nsecs) { - struct itimerspec its; - timer_t* t = timer; - - if(t == NULL) { - t = &event_high_res_timer; - } - - if(timer_gettime(t, &its) == -1) { - return -errno; - } + struct itimerspec its = { + .it_value.tv_sec = nsecs / UM_NSEC_PER_SEC, + .it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC, - return its.it_value.tv_nsec; -} - -int os_timer_one_shot(int ticks) -{ - struct itimerspec its; - unsigned long long nsec; - unsigned long sec; - - nsec = (ticks + 1); - sec = nsec / UM_NSEC_PER_SEC; - nsec = nsec % UM_NSEC_PER_SEC; - - its.it_value.tv_sec = nsec / UM_NSEC_PER_SEC; - its.it_value.tv_nsec = nsec; - - its.it_interval.tv_sec = 0; - its.it_interval.tv_nsec = 0; // we cheat here + .it_interval.tv_sec = 0, + .it_interval.tv_nsec = 0, // we cheat here + }; timer_settime(event_high_res_timer, 0, &its, NULL); return 0; @@ -135,24 +82,13 @@ int os_timer_one_shot(int ticks) /** * os_timer_disable() - disable the posix (interval) timer - * Returns the remaining interval timer time in nanoseconds */ -long long os_timer_disable(void) +void os_timer_disable(void) { struct itimerspec its; memset(&its, 0, sizeof(struct itimerspec)); - timer_settime(event_high_res_timer, 0, &its, &its); - - return its.it_value.tv_sec * UM_NSEC_PER_SEC + its.it_value.tv_nsec; -} - -long long os_vnsecs(void) -{ - struct timespec ts; - - clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&ts); - return timespec_to_ns(&ts); + timer_settime(event_high_res_timer, 0, &its, NULL); } long long os_nsecs(void) @@ -169,21 +105,14 @@ long long os_nsecs(void) */ void os_idle_sleep(unsigned long long nsecs) { - struct timespec ts; - - if (nsecs <= 0) { - return; - } - - ts = ((struct timespec) { - .tv_sec = nsecs / UM_NSEC_PER_SEC, - .tv_nsec = nsecs % UM_NSEC_PER_SEC - }); + struct timespec ts = { + .tv_sec = nsecs / UM_NSEC_PER_SEC, + .tv_nsec = nsecs % UM_NSEC_PER_SEC + }; /* * Relay the signal if clock_nanosleep is interrupted. */ - if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) { + if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) deliver_alarm(); - } } |