summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/xfile.c
blob: 9b5d98fe1f8ab3125bce69622d8e63edd887d3fa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <djwong@kernel.org>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "scrub/scrub.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
#include "scrub/trace.h"
#include <linux/shmem_fs.h>

/*
 * Swappable Temporary Memory
 * ==========================
 *
 * Online checking sometimes needs to be able to stage a large amount of data
 * in memory.  This information might not fit in the available memory and it
 * doesn't all need to be accessible at all times.  In other words, we want an
 * indexed data buffer to store data that can be paged out.
 *
 * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
 * requirements.  Therefore, the xfile mechanism uses an unlinked shmem file to
 * store our staging data.  This file is not installed in the file descriptor
 * table so that user programs cannot access the data, which means that the
 * xfile must be freed with xfile_destroy.
 *
 * xfiles assume that the caller will handle all required concurrency
 * management; standard vfs locks (freezer and inode) are not taken.  Reads
 * and writes are satisfied directly from the page cache.
 */

/*
 * xfiles must not be exposed to userspace and require upper layers to
 * coordinate access to the one handle returned by the constructor, so
 * establish a separate lock class for xfiles to avoid confusing lockdep.
 */
static struct lock_class_key xfile_i_mutex_key;

/*
 * Create an xfile of the given size.  The description will be used in the
 * trace output.
 */
int
xfile_create(
	const char		*description,
	loff_t			isize,
	struct xfile		**xfilep)
{
	struct inode		*inode;
	struct xfile		*xf;
	int			error;

	xf = kmalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
	if (!xf)
		return -ENOMEM;

	xf->file = shmem_kernel_file_setup(description, isize, VM_NORESERVE);
	if (IS_ERR(xf->file)) {
		error = PTR_ERR(xf->file);
		goto out_xfile;
	}

	inode = file_inode(xf->file);
	lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);

	/*
	 * We don't want to bother with kmapping data during repair, so don't
	 * allow highmem pages to back this mapping.
	 */
	mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);

	trace_xfile_create(xf);

	*xfilep = xf;
	return 0;
out_xfile:
	kfree(xf);
	return error;
}

/* Close the file and release all resources. */
void
xfile_destroy(
	struct xfile		*xf)
{
	struct inode		*inode = file_inode(xf->file);

	trace_xfile_destroy(xf);

	lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
	fput(xf->file);
	kfree(xf);
}

/*
 * Load an object.  Since we're treating this file as "memory", any error or
 * short IO is treated as a failure to allocate memory.
 */
int
xfile_load(
	struct xfile		*xf,
	void			*buf,
	size_t			count,
	loff_t			pos)
{
	struct inode		*inode = file_inode(xf->file);
	unsigned int		pflags;

	if (count > MAX_RW_COUNT)
		return -ENOMEM;
	if (inode->i_sb->s_maxbytes - pos < count)
		return -ENOMEM;

	trace_xfile_load(xf, pos, count);

	pflags = memalloc_nofs_save();
	while (count > 0) {
		struct folio	*folio;
		unsigned int	len;
		unsigned int	offset;

		if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
				SGP_READ) < 0)
			break;
		if (!folio) {
			/*
			 * No data stored at this offset, just zero the output
			 * buffer until the next page boundary.
			 */
			len = min_t(ssize_t, count,
				PAGE_SIZE - offset_in_page(pos));
			memset(buf, 0, len);
		} else {
			if (filemap_check_wb_err(inode->i_mapping, 0)) {
				folio_unlock(folio);
				folio_put(folio);
				break;
			}

			offset = offset_in_folio(folio, pos);
			len = min_t(ssize_t, count, folio_size(folio) - offset);
			memcpy(buf, folio_address(folio) + offset, len);

			folio_unlock(folio);
			folio_put(folio);
		}
		count -= len;
		pos += len;
		buf += len;
	}
	memalloc_nofs_restore(pflags);

	if (count)
		return -ENOMEM;
	return 0;
}

/*
 * Store an object.  Since we're treating this file as "memory", any error or
 * short IO is treated as a failure to allocate memory.
 */
int
xfile_store(
	struct xfile		*xf,
	const void		*buf,
	size_t			count,
	loff_t			pos)
{
	struct inode		*inode = file_inode(xf->file);
	unsigned int		pflags;

	if (count > MAX_RW_COUNT)
		return -ENOMEM;
	if (inode->i_sb->s_maxbytes - pos < count)
		return -ENOMEM;

	trace_xfile_store(xf, pos, count);

	/*
	 * Increase the file size first so that shmem_get_folio(..., SGP_CACHE),
	 * actually allocates a folio instead of erroring out.
	 */
	if (pos + count > i_size_read(inode))
		i_size_write(inode, pos + count);

	pflags = memalloc_nofs_save();
	while (count > 0) {
		struct folio	*folio;
		unsigned int	len;
		unsigned int	offset;

		if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
				SGP_CACHE) < 0)
			break;
		if (filemap_check_wb_err(inode->i_mapping, 0)) {
			folio_unlock(folio);
			folio_put(folio);
			break;
		}

		offset = offset_in_folio(folio, pos);
		len = min_t(ssize_t, count, folio_size(folio) - offset);
		memcpy(folio_address(folio) + offset, buf, len);

		folio_mark_dirty(folio);
		folio_unlock(folio);
		folio_put(folio);

		count -= len;
		pos += len;
		buf += len;
	}
	memalloc_nofs_restore(pflags);

	if (count)
		return -ENOMEM;
	return 0;
}

/* Find the next written area in the xfile data for a given offset. */
loff_t
xfile_seek_data(
	struct xfile		*xf,
	loff_t			pos)
{
	loff_t			ret;

	ret = vfs_llseek(xf->file, pos, SEEK_DATA);
	trace_xfile_seek_data(xf, pos, ret);
	return ret;
}

/*
 * Grab the (locked) folio for a memory object.  The object cannot span a folio
 * boundary.  Returns the locked folio if successful, NULL if there was no
 * folio or it didn't cover the range requested, or an ERR_PTR on failure.
 */
struct folio *
xfile_get_folio(
	struct xfile		*xf,
	loff_t			pos,
	size_t			len,
	unsigned int		flags)
{
	struct inode		*inode = file_inode(xf->file);
	struct folio		*folio = NULL;
	unsigned int		pflags;
	int			error;

	if (inode->i_sb->s_maxbytes - pos < len)
		return ERR_PTR(-ENOMEM);

	trace_xfile_get_folio(xf, pos, len);

	/*
	 * Increase the file size first so that shmem_get_folio(..., SGP_CACHE),
	 * actually allocates a folio instead of erroring out.
	 */
	if ((flags & XFILE_ALLOC) && pos + len > i_size_read(inode))
		i_size_write(inode, pos + len);

	pflags = memalloc_nofs_save();
	error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
			(flags & XFILE_ALLOC) ? SGP_CACHE : SGP_READ);
	memalloc_nofs_restore(pflags);
	if (error)
		return ERR_PTR(error);

	if (!folio)
		return NULL;

	if (len > folio_size(folio) - offset_in_folio(folio, pos)) {
		folio_unlock(folio);
		folio_put(folio);
		return NULL;
	}

	if (filemap_check_wb_err(inode->i_mapping, 0)) {
		folio_unlock(folio);
		folio_put(folio);
		return ERR_PTR(-EIO);
	}

	/*
	 * Mark the folio dirty so that it won't be reclaimed once we drop the
	 * (potentially last) reference in xfile_put_folio.
	 */
	if (flags & XFILE_ALLOC)
		folio_mark_dirty(folio);
	return folio;
}

/*
 * Release the (locked) folio for a memory object.
 */
void
xfile_put_folio(
	struct xfile		*xf,
	struct folio		*folio)
{
	trace_xfile_put_folio(xf, folio_pos(folio), folio_size(folio));

	folio_unlock(folio);
	folio_put(folio);
}

/* Discard the page cache that's backing a range of the xfile. */
void
xfile_discard(
	struct xfile		*xf,
	loff_t			pos,
	u64			count)
{
	trace_xfile_discard(xf, pos, count);

	shmem_truncate_range(file_inode(xf->file), pos, pos + count - 1);
}