summaryrefslogtreecommitdiff
path: root/kernel/time/sched_clock.c
diff options
context:
space:
mode:
authorAhmed S. Darwish <a.darwish@linutronix.de>2020-08-27 13:40:40 +0200
committerPeter Zijlstra <peterz@infradead.org>2020-09-10 11:19:29 +0200
commita690ed07353ec45f056b0a6f87c23a12a59c030d (patch)
tree2951501cb8a5869d891a28b73a3a86cf0a3b5367 /kernel/time/sched_clock.c
parent80793c3471d90d4dc2b48deadb6413bdfe39500f (diff)
time/sched_clock: Use seqcount_latch_t
Latch sequence counters have unique read and write APIs, and thus seqcount_latch_t was recently introduced at seqlock.h. Use that new data type instead of plain seqcount_t. This adds the necessary type-safety and ensures only latching-safe seqcount APIs are to be used. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200827114044.11173-5-a.darwish@linutronix.de
Diffstat (limited to 'kernel/time/sched_clock.c')
-rw-r--r--kernel/time/sched_clock.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 8c6b5febd7a0..0642013dace4 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -35,7 +35,7 @@
* into a single 64-byte cache line.
*/
struct clock_data {
- seqcount_t seq;
+ seqcount_latch_t seq;
struct clock_read_data read_data[2];
ktime_t wrap_kt;
unsigned long rate;
@@ -76,7 +76,7 @@ struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
int sched_clock_read_retry(unsigned int seq)
{
- return read_seqcount_retry(&cd.seq, seq);
+ return read_seqcount_latch_retry(&cd.seq, seq);
}
unsigned long long notrace sched_clock(void)