summaryrefslogtreecommitdiff
path: root/mm/kmsan/instrumentation.c
blob: 02a405e55d6ca26803b6cc4d829d2cd75768d683 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
// SPDX-License-Identifier: GPL-2.0
/*
 * KMSAN compiler API.
 *
 * This file implements __msan_XXX hooks that Clang inserts into the code
 * compiled with -fsanitize=kernel-memory.
 * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
 * instrumentation works.
 *
 * Copyright (C) 2017-2022 Google LLC
 * Author: Alexander Potapenko <glider@google.com>
 *
 */

#include "kmsan.h"
#include <linux/gfp.h>
#include <linux/kmsan.h>
#include <linux/kmsan_string.h>
#include <linux/mm.h>
#include <linux/uaccess.h>

static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
{
	if (IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) &&
	    (u64)addr < TASK_SIZE)
		return true;
	if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
		return true;
	return false;
}

static inline struct shadow_origin_ptr
get_shadow_origin_ptr(void *addr, u64 size, bool store)
{
	unsigned long ua_flags = user_access_save();
	struct shadow_origin_ptr ret;

	ret = kmsan_get_shadow_origin_ptr(addr, size, store);
	user_access_restore(ua_flags);
	return ret;
}

/*
 * KMSAN instrumentation functions follow. They are not declared elsewhere in
 * the kernel code, so they are preceded by prototypes, to silence
 * -Wmissing-prototypes warnings.
 */

/* Get shadow and origin pointers for a memory load with non-standard size. */
struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
							uintptr_t size);
struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
							uintptr_t size)
{
	return get_shadow_origin_ptr(addr, size, /*store*/ false);
}
EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);

/* Get shadow and origin pointers for a memory store with non-standard size. */
struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
							 uintptr_t size);
struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
							 uintptr_t size)
{
	return get_shadow_origin_ptr(addr, size, /*store*/ true);
}
EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);

/*
 * Declare functions that obtain shadow/origin pointers for loads and stores
 * with fixed size.
 */
#define DECLARE_METADATA_PTR_GETTER(size)                                  \
	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
		void *addr);                                               \
	struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(      \
		void *addr)                                                \
	{                                                                  \
		return get_shadow_origin_ptr(addr, size, /*store*/ false); \
	}                                                                  \
	EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size);                \
	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
		void *addr);                                               \
	struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(     \
		void *addr)                                                \
	{                                                                  \
		return get_shadow_origin_ptr(addr, size, /*store*/ true);  \
	}                                                                  \
	EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)

DECLARE_METADATA_PTR_GETTER(1);
DECLARE_METADATA_PTR_GETTER(2);
DECLARE_METADATA_PTR_GETTER(4);
DECLARE_METADATA_PTR_GETTER(8);

/*
 * Handle a memory store performed by inline assembly. KMSAN conservatively
 * attempts to unpoison the outputs of asm() directives to prevent false
 * positives caused by missed stores.
 *
 * __msan_instrument_asm_store() may be called for inline assembly code when
 * entering or leaving IRQ. We omit the check for kmsan_in_runtime() to ensure
 * the memory written to in these cases is also marked as initialized.
 */
void __msan_instrument_asm_store(void *addr, uintptr_t size);
void __msan_instrument_asm_store(void *addr, uintptr_t size)
{
	unsigned long ua_flags;

	if (!kmsan_enabled)
		return;

	ua_flags = user_access_save();
	/*
	 * Most of the accesses are below 32 bytes. The exceptions so far are
	 * clwb() (64 bytes), FPU state (512 bytes) and chsc() (4096 bytes).
	 */
	if (size > 4096) {
		WARN_ONCE(1, "assembly store size too big: %ld\n", size);
		size = 8;
	}
	if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
		user_access_restore(ua_flags);
		return;
	}
	/* Unpoisoning the memory on best effort. */
	kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
	user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__msan_instrument_asm_store);

/*
 * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
 * intrinsics with calls to respective __msan_ functions. We use
 * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
 * values for the destination argument of these functions and use them for the
 * functions' return values.
 */
static inline void get_param0_metadata(u64 *shadow,
				       depot_stack_handle_t *origin)
{
	struct kmsan_ctx *ctx = kmsan_get_context();

	*shadow = *(u64 *)(ctx->cstate.param_tls);
	*origin = ctx->cstate.param_origin_tls[0];
}

static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
{
	struct kmsan_ctx *ctx = kmsan_get_context();

	*(u64 *)(ctx->cstate.retval_tls) = shadow;
	ctx->cstate.retval_origin_tls = origin;
}

/* Handle llvm.memmove intrinsic. */
void *__msan_memmove(void *dst, const void *src, uintptr_t n);
void *__msan_memmove(void *dst, const void *src, uintptr_t n)
{
	depot_stack_handle_t origin;
	void *result;
	u64 shadow;

	get_param0_metadata(&shadow, &origin);
	result = __memmove(dst, src, n);
	if (!n)
		/* Some people call memmove() with zero length. */
		return result;
	if (!kmsan_enabled || kmsan_in_runtime())
		return result;

	kmsan_enter_runtime();
	kmsan_internal_memmove_metadata(dst, (void *)src, n);
	kmsan_leave_runtime();

	set_retval_metadata(shadow, origin);
	return result;
}
EXPORT_SYMBOL(__msan_memmove);

/* Handle llvm.memcpy intrinsic. */
void *__msan_memcpy(void *dst, const void *src, uintptr_t n);
void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
{
	depot_stack_handle_t origin;
	void *result;
	u64 shadow;

	get_param0_metadata(&shadow, &origin);
	result = __memcpy(dst, src, n);
	if (!n)
		/* Some people call memcpy() with zero length. */
		return result;

	if (!kmsan_enabled || kmsan_in_runtime())
		return result;

	kmsan_enter_runtime();
	/* Using memmove instead of memcpy doesn't affect correctness. */
	kmsan_internal_memmove_metadata(dst, (void *)src, n);
	kmsan_leave_runtime();

	set_retval_metadata(shadow, origin);
	return result;
}
EXPORT_SYMBOL(__msan_memcpy);

/* Handle llvm.memset intrinsic. */
void *__msan_memset(void *dst, int c, uintptr_t n);
void *__msan_memset(void *dst, int c, uintptr_t n)
{
	depot_stack_handle_t origin;
	void *result;
	u64 shadow;

	get_param0_metadata(&shadow, &origin);
	result = __memset(dst, c, n);
	if (!kmsan_enabled || kmsan_in_runtime())
		return result;

	kmsan_enter_runtime();
	/*
	 * Clang doesn't pass parameter metadata here, so it is impossible to
	 * use shadow of @c to set up the shadow for @dst.
	 */
	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
	kmsan_leave_runtime();

	set_retval_metadata(shadow, origin);
	return result;
}
EXPORT_SYMBOL(__msan_memset);

/*
 * Create a new origin from an old one. This is done when storing an
 * uninitialized value to memory. When reporting an error, KMSAN unrolls and
 * prints the whole chain of stores that preceded the use of this value.
 */
depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin);
depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
{
	depot_stack_handle_t ret = 0;
	unsigned long ua_flags;

	if (!kmsan_enabled || kmsan_in_runtime())
		return ret;

	ua_flags = user_access_save();

	/* Creating new origins may allocate memory. */
	kmsan_enter_runtime();
	ret = kmsan_internal_chain_origin(origin);
	kmsan_leave_runtime();
	user_access_restore(ua_flags);
	return ret;
}
EXPORT_SYMBOL(__msan_chain_origin);

/* Poison a local variable when entering a function. */
void __msan_poison_alloca(void *address, uintptr_t size, char *descr);
void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
{
	depot_stack_handle_t handle;
	unsigned long entries[4];
	unsigned long ua_flags;

	if (!kmsan_enabled || kmsan_in_runtime())
		return;

	ua_flags = user_access_save();
	entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
	entries[1] = (u64)descr;
	entries[2] = (u64)__builtin_return_address(0);
	/*
	 * With frame pointers enabled, it is possible to quickly fetch the
	 * second frame of the caller stack without calling the unwinder.
	 * Without them, simply do not bother.
	 */
	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
		entries[3] = (u64)__builtin_return_address(1);
	else
		entries[3] = 0;

	/* stack_depot_save() may allocate memory. */
	kmsan_enter_runtime();
	handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
	kmsan_leave_runtime();

	kmsan_internal_set_shadow_origin(address, size, -1, handle,
					 /*checked*/ true);
	user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__msan_poison_alloca);

/* Unpoison a local variable. */
void __msan_unpoison_alloca(void *address, uintptr_t size);
void __msan_unpoison_alloca(void *address, uintptr_t size)
{
	if (!kmsan_enabled || kmsan_in_runtime())
		return;

	kmsan_enter_runtime();
	kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
	kmsan_leave_runtime();
}
EXPORT_SYMBOL(__msan_unpoison_alloca);

/*
 * Report that an uninitialized value with the given origin was used in a way
 * that constituted undefined behavior.
 */
void __msan_warning(u32 origin);
void __msan_warning(u32 origin)
{
	if (!kmsan_enabled || kmsan_in_runtime())
		return;
	kmsan_enter_runtime();
	kmsan_report(origin, /*address*/ NULL, /*size*/ 0,
		     /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ NULL,
		     REASON_ANY);
	kmsan_leave_runtime();
}
EXPORT_SYMBOL(__msan_warning);

/*
 * At the beginning of an instrumented function, obtain the pointer to
 * `struct kmsan_context_state` holding the metadata for function parameters.
 */
struct kmsan_context_state *__msan_get_context_state(void);
struct kmsan_context_state *__msan_get_context_state(void)
{
	return &kmsan_get_context()->cstate;
}
EXPORT_SYMBOL(__msan_get_context_state);