btt.c 43 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Vishal Verma's avatar
Vishal Verma committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
/*
 * Block Translation Table
 * Copyright (c) 2014-2015, Intel Corporation.
 */
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/genhd.h>
#include <linux/sizes.h>
#include <linux/ndctl.h>
#include <linux/fs.h>
#include <linux/nd.h>
18
#include <linux/backing-dev.h>
Vishal Verma's avatar
Vishal Verma committed
19
20
21
22
23
24
25
26
#include "btt.h"
#include "nd.h"

enum log_ent_request {
	LOG_NEW_ENT = 0,
	LOG_OLD_ENT
};

27
28
29
30
31
static struct device *to_dev(struct arena_info *arena)
{
	return &arena->nd_btt->dev;
}

32
33
34
35
36
static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
{
	return offset + nd_btt->initial_offset;
}

Vishal Verma's avatar
Vishal Verma committed
37
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
38
		void *buf, size_t n, unsigned long flags)
Vishal Verma's avatar
Vishal Verma committed
39
40
41
42
{
	struct nd_btt *nd_btt = arena->nd_btt;
	struct nd_namespace_common *ndns = nd_btt->ndns;

43
	/* arena offsets may be shifted from the base of the device */
44
	offset = adjust_initial_offset(nd_btt, offset);
45
	return nvdimm_read_bytes(ndns, offset, buf, n, flags);
Vishal Verma's avatar
Vishal Verma committed
46
47
48
}

static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
49
		void *buf, size_t n, unsigned long flags)
Vishal Verma's avatar
Vishal Verma committed
50
51
52
53
{
	struct nd_btt *nd_btt = arena->nd_btt;
	struct nd_namespace_common *ndns = nd_btt->ndns;

54
	/* arena offsets may be shifted from the base of the device */
55
	offset = adjust_initial_offset(nd_btt, offset);
56
	return nvdimm_write_bytes(ndns, offset, buf, n, flags);
Vishal Verma's avatar
Vishal Verma committed
57
58
59
60
61
62
}

static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
{
	int ret;

63
64
65
66
67
	/*
	 * infooff and info2off should always be at least 512B aligned.
	 * We rely on that to make sure rw_bytes does error clearing
	 * correctly, so make sure that is the case.
	 */
68
69
70
71
	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
		"arena->infooff: %#llx is unaligned\n", arena->infooff);
	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
		"arena->info2off: %#llx is unaligned\n", arena->info2off);
72

Vishal Verma's avatar
Vishal Verma committed
73
	ret = arena_write_bytes(arena, arena->info2off, super,
74
			sizeof(struct btt_sb), 0);
Vishal Verma's avatar
Vishal Verma committed
75
76
77
78
	if (ret)
		return ret;

	return arena_write_bytes(arena, arena->infooff, super,
79
			sizeof(struct btt_sb), 0);
Vishal Verma's avatar
Vishal Verma committed
80
81
82
83
84
}

static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
{
	return arena_read_bytes(arena, arena->infooff, super,
85
			sizeof(struct btt_sb), 0);
Vishal Verma's avatar
Vishal Verma committed
86
87
88
89
90
91
92
93
}

/*
 * 'raw' version of btt_map write
 * Assumptions:
 *   mapping is in little-endian
 *   mapping contains 'E' and 'Z' flags as desired
 */
94
95
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
		unsigned long flags)
Vishal Verma's avatar
Vishal Verma committed
96
97
98
{
	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);

99
100
101
102
	if (unlikely(lba >= arena->external_nlba))
		dev_err_ratelimited(to_dev(arena),
			"%s: lba %#x out of range (max: %#x)\n",
			__func__, lba, arena->external_nlba);
103
	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
Vishal Verma's avatar
Vishal Verma committed
104
105
106
}

static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
107
			u32 z_flag, u32 e_flag, unsigned long rwb_flags)
Vishal Verma's avatar
Vishal Verma committed
108
109
110
111
112
113
114
115
{
	u32 ze;
	__le32 mapping_le;

	/*
	 * This 'mapping' is supposed to be just the LBA mapping, without
	 * any flags set, so strip the flag bits.
	 */
116
	mapping = ent_lba(mapping);
Vishal Verma's avatar
Vishal Verma committed
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140

	ze = (z_flag << 1) + e_flag;
	switch (ze) {
	case 0:
		/*
		 * We want to set neither of the Z or E flags, and
		 * in the actual layout, this means setting the bit
		 * positions of both to '1' to indicate a 'normal'
		 * map entry
		 */
		mapping |= MAP_ENT_NORMAL;
		break;
	case 1:
		mapping |= (1 << MAP_ERR_SHIFT);
		break;
	case 2:
		mapping |= (1 << MAP_TRIM_SHIFT);
		break;
	default:
		/*
		 * The case where Z and E are both sent in as '1' could be
		 * construed as a valid 'normal' case, but we decide not to,
		 * to avoid confusion
		 */
141
142
		dev_err_ratelimited(to_dev(arena),
			"Invalid use of Z and E flags\n");
Vishal Verma's avatar
Vishal Verma committed
143
144
145
146
		return -EIO;
	}

	mapping_le = cpu_to_le32(mapping);
147
	return __btt_map_write(arena, lba, mapping_le, rwb_flags);
Vishal Verma's avatar
Vishal Verma committed
148
149
150
}

static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
151
			int *trim, int *error, unsigned long rwb_flags)
Vishal Verma's avatar
Vishal Verma committed
152
153
154
155
156
157
{
	int ret;
	__le32 in;
	u32 raw_mapping, postmap, ze, z_flag, e_flag;
	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);

158
159
160
161
	if (unlikely(lba >= arena->external_nlba))
		dev_err_ratelimited(to_dev(arena),
			"%s: lba %#x out of range (max: %#x)\n",
			__func__, lba, arena->external_nlba);
Vishal Verma's avatar
Vishal Verma committed
162

163
	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
Vishal Verma's avatar
Vishal Verma committed
164
165
166
167
168
	if (ret)
		return ret;

	raw_mapping = le32_to_cpu(in);

169
170
	z_flag = ent_z_flag(raw_mapping);
	e_flag = ent_e_flag(raw_mapping);
Vishal Verma's avatar
Vishal Verma committed
171
	ze = (z_flag << 1) + e_flag;
172
	postmap = ent_lba(raw_mapping);
Vishal Verma's avatar
Vishal Verma committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205

	/* Reuse the {z,e}_flag variables for *trim and *error */
	z_flag = 0;
	e_flag = 0;

	switch (ze) {
	case 0:
		/* Initial state. Return postmap = premap */
		*mapping = lba;
		break;
	case 1:
		*mapping = postmap;
		e_flag = 1;
		break;
	case 2:
		*mapping = postmap;
		z_flag = 1;
		break;
	case 3:
		*mapping = postmap;
		break;
	default:
		return -EIO;
	}

	if (trim)
		*trim = z_flag;
	if (error)
		*error = e_flag;

	return ret;
}

206
207
static int btt_log_group_read(struct arena_info *arena, u32 lane,
			struct log_group *log)
Vishal Verma's avatar
Vishal Verma committed
208
209
{
	return arena_read_bytes(arena,
210
211
			arena->logoff + (lane * LOG_GRP_SIZE), log,
			LOG_GRP_SIZE, 0);
Vishal Verma's avatar
Vishal Verma committed
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
}

static struct dentry *debugfs_root;

static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
				int idx)
{
	char dirname[32];
	struct dentry *d;

	/* If for some reason, parent bttN was not created, exit */
	if (!parent)
		return;

	snprintf(dirname, 32, "arena%d", idx);
	d = debugfs_create_dir(dirname, parent);
	if (IS_ERR_OR_NULL(d))
		return;
	a->debugfs_dir = d;

	debugfs_create_x64("size", S_IRUGO, d, &a->size);
	debugfs_create_x64("external_lba_start", S_IRUGO, d,
				&a->external_lba_start);
	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
				&a->internal_lbasize);
	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
	debugfs_create_u32("external_lbasize", S_IRUGO, d,
				&a->external_lbasize);
	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
251
252
	debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
	debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
Vishal Verma's avatar
Vishal Verma committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
}

static void btt_debugfs_init(struct btt *btt)
{
	int i = 0;
	struct arena_info *arena;

	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
						debugfs_root);
	if (IS_ERR_OR_NULL(btt->debugfs_dir))
		return;

	list_for_each_entry(arena, &btt->arena_list, list) {
		arena_debugfs_init(arena, btt->debugfs_dir, i);
		i++;
	}
}

271
272
273
274
275
static u32 log_seq(struct log_group *log, int log_idx)
{
	return le32_to_cpu(log->ent[log_idx].seq);
}

Vishal Verma's avatar
Vishal Verma committed
276
277
278
279
280
281
282
283
284
/*
 * This function accepts two log entries, and uses the
 * sequence number to find the 'older' entry.
 * It also updates the sequence number in this old entry to
 * make it the 'new' one if the mark_flag is set.
 * Finally, it returns which of the entries was the older one.
 *
 * TODO The logic feels a bit kludge-y. make it better..
 */
285
static int btt_log_get_old(struct arena_info *a, struct log_group *log)
Vishal Verma's avatar
Vishal Verma committed
286
{
287
288
	int idx0 = a->log_index[0];
	int idx1 = a->log_index[1];
Vishal Verma's avatar
Vishal Verma committed
289
290
291
292
293
294
295
	int old;

	/*
	 * the first ever time this is seen, the entry goes into [0]
	 * the next time, the following logic works out to put this
	 * (next) entry into [1]
	 */
296
297
	if (log_seq(log, idx0) == 0) {
		log->ent[idx0].seq = cpu_to_le32(1);
Vishal Verma's avatar
Vishal Verma committed
298
299
300
		return 0;
	}

301
	if (log_seq(log, idx0) == log_seq(log, idx1))
Vishal Verma's avatar
Vishal Verma committed
302
		return -EINVAL;
303
	if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
Vishal Verma's avatar
Vishal Verma committed
304
305
		return -EINVAL;

306
307
	if (log_seq(log, idx0) < log_seq(log, idx1)) {
		if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
Vishal Verma's avatar
Vishal Verma committed
308
309
310
311
			old = 0;
		else
			old = 1;
	} else {
312
		if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
Vishal Verma's avatar
Vishal Verma committed
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
			old = 1;
		else
			old = 0;
	}

	return old;
}

/*
 * This function copies the desired (old/new) log entry into ent if
 * it is not NULL. It returns the sub-slot number (0 or 1)
 * where the desired log entry was found. Negative return values
 * indicate errors.
 */
static int btt_log_read(struct arena_info *arena, u32 lane,
			struct log_entry *ent, int old_flag)
{
	int ret;
	int old_ent, ret_ent;
332
	struct log_group log;
Vishal Verma's avatar
Vishal Verma committed
333

334
	ret = btt_log_group_read(arena, lane, &log);
Vishal Verma's avatar
Vishal Verma committed
335
336
337
	if (ret)
		return -EIO;

338
	old_ent = btt_log_get_old(arena, &log);
Vishal Verma's avatar
Vishal Verma committed
339
	if (old_ent < 0 || old_ent > 1) {
340
		dev_err(to_dev(arena),
Vishal Verma's avatar
Vishal Verma committed
341
				"log corruption (%d): lane %d seq [%d, %d]\n",
342
343
				old_ent, lane, log.ent[arena->log_index[0]].seq,
				log.ent[arena->log_index[1]].seq);
Vishal Verma's avatar
Vishal Verma committed
344
345
346
347
348
349
350
		/* TODO set error state? */
		return -EIO;
	}

	ret_ent = (old_flag ? old_ent : (1 - old_ent));

	if (ent != NULL)
351
		memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
Vishal Verma's avatar
Vishal Verma committed
352
353
354
355
356
357
358
359
360
361

	return ret_ent;
}

/*
 * This function commits a log entry to media
 * It does _not_ prepare the freelist entry for the next write
 * btt_flog_write is the wrapper for updating the freelist elements
 */
static int __btt_log_write(struct arena_info *arena, u32 lane,
362
			u32 sub, struct log_entry *ent, unsigned long flags)
Vishal Verma's avatar
Vishal Verma committed
363
364
{
	int ret;
365
366
	u32 group_slot = arena->log_index[sub];
	unsigned int log_half = LOG_ENT_SIZE / 2;
Vishal Verma's avatar
Vishal Verma committed
367
	void *src = ent;
368
	u64 ns_off;
Vishal Verma's avatar
Vishal Verma committed
369

370
371
	ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
		(group_slot * LOG_ENT_SIZE);
Vishal Verma's avatar
Vishal Verma committed
372
	/* split the 16B write into atomic, durable halves */
373
	ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
Vishal Verma's avatar
Vishal Verma committed
374
375
376
377
378
	if (ret)
		return ret;

	ns_off += log_half;
	src += log_half;
379
	return arena_write_bytes(arena, ns_off, src, log_half, flags);
Vishal Verma's avatar
Vishal Verma committed
380
381
382
383
384
385
386
}

static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
			struct log_entry *ent)
{
	int ret;

387
	ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
388
389
390
391
392
393
394
	if (ret)
		return ret;

	/* prepare the next free entry */
	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
	if (++(arena->freelist[lane].seq) == 4)
		arena->freelist[lane].seq = 1;
395
	if (ent_e_flag(le32_to_cpu(ent->old_map)))
396
		arena->freelist[lane].has_err = 1;
397
	arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
Vishal Verma's avatar
Vishal Verma committed
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417

	return ret;
}

/*
 * This function initializes the BTT map to the initial state, which is
 * all-zeroes, and indicates an identity mapping
 */
static int btt_map_init(struct arena_info *arena)
{
	int ret = -EINVAL;
	void *zerobuf;
	size_t offset = 0;
	size_t chunk_size = SZ_2M;
	size_t mapsize = arena->logoff - arena->mapoff;

	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
	if (!zerobuf)
		return -ENOMEM;

418
419
420
421
422
	/*
	 * mapoff should always be at least 512B  aligned. We rely on that to
	 * make sure rw_bytes does error clearing correctly, so make sure that
	 * is the case.
	 */
423
424
	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
		"arena->mapoff: %#llx is unaligned\n", arena->mapoff);
425

Vishal Verma's avatar
Vishal Verma committed
426
427
428
	while (mapsize) {
		size_t size = min(mapsize, chunk_size);

429
		dev_WARN_ONCE(to_dev(arena), size < 512,
430
			"chunk size: %#zx is unaligned\n", size);
Vishal Verma's avatar
Vishal Verma committed
431
		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
432
				size, 0);
Vishal Verma's avatar
Vishal Verma committed
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
		if (ret)
			goto free;

		offset += size;
		mapsize -= size;
		cond_resched();
	}

 free:
	kfree(zerobuf);
	return ret;
}

/*
 * This function initializes the BTT log with 'fake' entries pointing
 * to the initial reserved set of blocks as being free
 */
static int btt_log_init(struct arena_info *arena)
{
452
453
	size_t logsize = arena->info2off - arena->logoff;
	size_t chunk_size = SZ_4K, offset = 0;
454
	struct log_entry ent;
455
	void *zerobuf;
Vishal Verma's avatar
Vishal Verma committed
456
457
458
	int ret;
	u32 i;

459
460
461
462
463
464
465
466
	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
	if (!zerobuf)
		return -ENOMEM;
	/*
	 * logoff should always be at least 512B  aligned. We rely on that to
	 * make sure rw_bytes does error clearing correctly, so make sure that
	 * is the case.
	 */
467
468
	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
		"arena->logoff: %#llx is unaligned\n", arena->logoff);
469
470
471
472

	while (logsize) {
		size_t size = min(logsize, chunk_size);

473
		dev_WARN_ONCE(to_dev(arena), size < 512,
474
			"chunk size: %#zx is unaligned\n", size);
475
476
477
478
479
480
481
482
483
		ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
				size, 0);
		if (ret)
			goto free;

		offset += size;
		logsize -= size;
		cond_resched();
	}
Vishal Verma's avatar
Vishal Verma committed
484
485

	for (i = 0; i < arena->nfree; i++) {
486
487
488
489
490
		ent.lba = cpu_to_le32(i);
		ent.old_map = cpu_to_le32(arena->external_nlba + i);
		ent.new_map = cpu_to_le32(arena->external_nlba + i);
		ent.seq = cpu_to_le32(LOG_SEQ_INIT);
		ret = __btt_log_write(arena, i, 0, &ent, 0);
Vishal Verma's avatar
Vishal Verma committed
491
		if (ret)
492
			goto free;
Vishal Verma's avatar
Vishal Verma committed
493
494
	}

495
496
497
 free:
	kfree(zerobuf);
	return ret;
Vishal Verma's avatar
Vishal Verma committed
498
499
}

500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
{
	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
}

static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
{
	int ret = 0;

	if (arena->freelist[lane].has_err) {
		void *zero_page = page_address(ZERO_PAGE(0));
		u32 lba = arena->freelist[lane].block;
		u64 nsoff = to_namespace_offset(arena, lba);
		unsigned long len = arena->sector_size;

		mutex_lock(&arena->err_lock);

		while (len) {
			unsigned long chunk = min(len, PAGE_SIZE);

			ret = arena_write_bytes(arena, nsoff, zero_page,
				chunk, 0);
			if (ret)
				break;
			len -= chunk;
			nsoff += chunk;
			if (len == 0)
				arena->freelist[lane].has_err = 0;
		}
		mutex_unlock(&arena->err_lock);
	}
	return ret;
}

Vishal Verma's avatar
Vishal Verma committed
534
535
static int btt_freelist_init(struct arena_info *arena)
{
536
537
	int new, ret;
	struct log_entry log_new;
538
	u32 i, map_entry, log_oldmap, log_newmap;
Vishal Verma's avatar
Vishal Verma committed
539
540
541
542
543
544
545
546
547
548
549

	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
					GFP_KERNEL);
	if (!arena->freelist)
		return -ENOMEM;

	for (i = 0; i < arena->nfree; i++) {
		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
		if (new < 0)
			return new;

550
551
552
553
		/* old and new map entries with any flags stripped out */
		log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
		log_newmap = ent_lba(le32_to_cpu(log_new.new_map));

Vishal Verma's avatar
Vishal Verma committed
554
555
556
		/* sub points to the next one to be overwritten */
		arena->freelist[i].sub = 1 - new;
		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
557
		arena->freelist[i].block = log_oldmap;
Vishal Verma's avatar
Vishal Verma committed
558

559
560
561
562
		/*
		 * FIXME: if error clearing fails during init, we want to make
		 * the BTT read-only
		 */
563
564
		if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
		    !ent_normal(le32_to_cpu(log_new.old_map))) {
565
			arena->freelist[i].has_err = 1;
566
567
			ret = arena_clear_freelist_error(arena, i);
			if (ret)
568
569
				dev_err_ratelimited(to_dev(arena),
					"Unable to clear known errors\n");
570
571
		}

Vishal Verma's avatar
Vishal Verma committed
572
		/* This implies a newly created or untouched flog entry */
573
		if (log_oldmap == log_newmap)
Vishal Verma's avatar
Vishal Verma committed
574
575
576
577
			continue;

		/* Check if map recovery is needed */
		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
578
				NULL, NULL, 0);
Vishal Verma's avatar
Vishal Verma committed
579
580
		if (ret)
			return ret;
581
582
583
584
585
586
587
588
589

		/*
		 * The map_entry from btt_read_map is stripped of any flag bits,
		 * so use the stripped out versions from the log as well for
		 * testing whether recovery is needed. For restoration, use the
		 * 'raw' version of the log entries as that captured what we
		 * were going to write originally.
		 */
		if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
Vishal Verma's avatar
Vishal Verma committed
590
591
592
593
594
			/*
			 * Last transaction wrote the flog, but wasn't able
			 * to complete the map write. So fix up the map.
			 */
			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
595
					le32_to_cpu(log_new.new_map), 0, 0, 0);
Vishal Verma's avatar
Vishal Verma committed
596
597
598
599
600
601
602
603
			if (ret)
				return ret;
		}
	}

	return 0;
}

604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
static bool ent_is_padding(struct log_entry *ent)
{
	return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
		&& (ent->seq == 0);
}

/*
 * Detecting valid log indices: We read a log group (see the comments in btt.h
 * for a description of a 'log_group' and its 'slots'), and iterate over its
 * four slots. We expect that a padding slot will be all-zeroes, and use this
 * to detect a padding slot vs. an actual entry.
 *
 * If a log_group is in the initial state, i.e. hasn't been used since the
 * creation of this BTT layout, it will have three of the four slots with
 * zeroes. We skip over these log_groups for the detection of log_index. If
 * all log_groups are in the initial state (i.e. the BTT has never been
 * written to), it is safe to assume the 'new format' of log entries in slots
 * (0, 1).
 */
static int log_set_indices(struct arena_info *arena)
{
	bool idx_set = false, initial_state = true;
	int ret, log_index[2] = {-1, -1};
	u32 i, j, next_idx = 0;
	struct log_group log;
	u32 pad_count = 0;

	for (i = 0; i < arena->nfree; i++) {
		ret = btt_log_group_read(arena, i, &log);
		if (ret < 0)
			return ret;

		for (j = 0; j < 4; j++) {
			if (!idx_set) {
				if (ent_is_padding(&log.ent[j])) {
					pad_count++;
					continue;
				} else {
					/* Skip if index has been recorded */
					if ((next_idx == 1) &&
						(j == log_index[0]))
						continue;
					/* valid entry, record index */
					log_index[next_idx] = j;
					next_idx++;
				}
				if (next_idx == 2) {
					/* two valid entries found */
					idx_set = true;
				} else if (next_idx > 2) {
					/* too many valid indices */
					return -ENXIO;
				}
			} else {
				/*
				 * once the indices have been set, just verify
				 * that all subsequent log groups are either in
				 * their initial state or follow the same
				 * indices.
				 */
				if (j == log_index[0]) {
					/* entry must be 'valid' */
					if (ent_is_padding(&log.ent[j]))
						return -ENXIO;
				} else if (j == log_index[1]) {
					;
					/*
					 * log_index[1] can be padding if the
					 * lane never got used and it is still
					 * in the initial state (three 'padding'
					 * entries)
					 */
				} else {
					/* entry must be invalid (padding) */
					if (!ent_is_padding(&log.ent[j]))
						return -ENXIO;
				}
			}
		}
		/*
		 * If any of the log_groups have more than one valid,
		 * non-padding entry, then the we are no longer in the
		 * initial_state
		 */
		if (pad_count < 3)
			initial_state = false;
		pad_count = 0;
	}

	if (!initial_state && !idx_set)
		return -ENXIO;

	/*
	 * If all the entries in the log were in the initial state,
	 * assume new padding scheme
	 */
	if (initial_state)
		log_index[1] = 1;

	/*
	 * Only allow the known permutations of log/padding indices,
	 * i.e. (0, 1), and (0, 2)
	 */
	if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
		; /* known index possibilities */
	else {
		dev_err(to_dev(arena), "Found an unknown padding scheme\n");
		return -ENXIO;
	}

	arena->log_index[0] = log_index[0];
	arena->log_index[1] = log_index[1];
	dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
	dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
	return 0;
}

Vishal Verma's avatar
Vishal Verma committed
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
static int btt_rtt_init(struct arena_info *arena)
{
	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
	if (arena->rtt == NULL)
		return -ENOMEM;

	return 0;
}

static int btt_maplocks_init(struct arena_info *arena)
{
	u32 i;

	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
				GFP_KERNEL);
	if (!arena->map_locks)
		return -ENOMEM;

	for (i = 0; i < arena->nfree; i++)
		spin_lock_init(&arena->map_locks[i].lock);

	return 0;
}

static struct arena_info *alloc_arena(struct btt *btt, size_t size,
				size_t start, size_t arena_off)
{
	struct arena_info *arena;
	u64 logsize, mapsize, datasize;
	u64 available = size;

	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
	if (!arena)
		return NULL;
	arena->nd_btt = btt->nd_btt;
756
	arena->sector_size = btt->sector_size;
757
	mutex_init(&arena->err_lock);
Vishal Verma's avatar
Vishal Verma committed
758
759
760
761
762
763
764
765
766
767

	if (!size)
		return arena;

	arena->size = size;
	arena->external_lba_start = start;
	arena->external_lbasize = btt->lbasize;
	arena->internal_lbasize = roundup(arena->external_lbasize,
					INT_LBASIZE_ALIGNMENT);
	arena->nfree = BTT_DEFAULT_NFREE;
768
769
	arena->version_major = btt->nd_btt->version_major;
	arena->version_minor = btt->nd_btt->version_minor;
Vishal Verma's avatar
Vishal Verma committed
770
771
772
773
774
775
776
777

	if (available % BTT_PG_SIZE)
		available -= (available % BTT_PG_SIZE);

	/* Two pages are reserved for the super block and its copy */
	available -= 2 * BTT_PG_SIZE;

	/* The log takes a fixed amount of space based on nfree */
778
	logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
Vishal Verma's avatar
Vishal Verma committed
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
	available -= logsize;

	/* Calculate optimal split between map and data area */
	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
			arena->internal_lbasize + MAP_ENT_SIZE);
	arena->external_nlba = arena->internal_nlba - arena->nfree;

	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
	datasize = available - mapsize;

	/* 'Absolute' values, relative to start of storage space */
	arena->infooff = arena_off;
	arena->dataoff = arena->infooff + BTT_PG_SIZE;
	arena->mapoff = arena->dataoff + datasize;
	arena->logoff = arena->mapoff + mapsize;
	arena->info2off = arena->logoff + logsize;
795
796
797
798

	/* Default log indices are (0,1) */
	arena->log_index[0] = 0;
	arena->log_index[1] = 1;
Vishal Verma's avatar
Vishal Verma committed
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
	return arena;
}

static void free_arenas(struct btt *btt)
{
	struct arena_info *arena, *next;

	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
		list_del(&arena->list);
		kfree(arena->rtt);
		kfree(arena->map_locks);
		kfree(arena->freelist);
		debugfs_remove_recursive(arena->debugfs_dir);
		kfree(arena);
	}
}

/*
 * This function reads an existing valid btt superblock and
 * populates the corresponding arena_info struct
 */
static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
				u64 arena_off)
{
	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
	arena->external_nlba = le32_to_cpu(super->external_nlba);
	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
	arena->nfree = le32_to_cpu(super->nfree);
	arena->version_major = le16_to_cpu(super->version_major);
	arena->version_minor = le16_to_cpu(super->version_minor);

	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
			le64_to_cpu(super->nextoff));
	arena->infooff = arena_off;
	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
	arena->logoff = arena_off + le64_to_cpu(super->logoff);
	arena->info2off = arena_off + le64_to_cpu(super->info2off);

Dan Williams's avatar
Dan Williams committed
839
840
841
	arena->size = (le64_to_cpu(super->nextoff) > 0)
		? (le64_to_cpu(super->nextoff))
		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
Vishal Verma's avatar
Vishal Verma committed
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872

	arena->flags = le32_to_cpu(super->flags);
}

static int discover_arenas(struct btt *btt)
{
	int ret = 0;
	struct arena_info *arena;
	struct btt_sb *super;
	size_t remaining = btt->rawsize;
	u64 cur_nlba = 0;
	size_t cur_off = 0;
	int num_arenas = 0;

	super = kzalloc(sizeof(*super), GFP_KERNEL);
	if (!super)
		return -ENOMEM;

	while (remaining) {
		/* Alloc memory for arena */
		arena = alloc_arena(btt, 0, 0, 0);
		if (!arena) {
			ret = -ENOMEM;
			goto out_super;
		}

		arena->infooff = cur_off;
		ret = btt_info_read(arena, super);
		if (ret)
			goto out;

873
		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
Vishal Verma's avatar
Vishal Verma committed
874
875
876
877
878
			if (remaining == btt->rawsize) {
				btt->init_state = INIT_NOTFOUND;
				dev_info(to_dev(arena), "No existing arenas\n");
				goto out;
			} else {
879
				dev_err(to_dev(arena),
Vishal Verma's avatar
Vishal Verma committed
880
881
882
883
884
885
886
887
888
						"Found corrupted metadata!\n");
				ret = -ENODEV;
				goto out;
			}
		}

		arena->external_lba_start = cur_nlba;
		parse_arena_meta(arena, super, cur_off);

889
890
891
892
893
894
895
		ret = log_set_indices(arena);
		if (ret) {
			dev_err(to_dev(arena),
				"Unable to deduce log/padding indices\n");
			goto out;
		}

Vishal Verma's avatar
Vishal Verma committed
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
		ret = btt_freelist_init(arena);
		if (ret)
			goto out;

		ret = btt_rtt_init(arena);
		if (ret)
			goto out;

		ret = btt_maplocks_init(arena);
		if (ret)
			goto out;

		list_add_tail(&arena->list, &btt->arena_list);

		remaining -= arena->size;
		cur_off += arena->size;
		cur_nlba += arena->external_nlba;
		num_arenas++;

		if (arena->nextoff == 0)
			break;
	}
	btt->num_arenas = num_arenas;
	btt->nlba = cur_nlba;
	btt->init_state = INIT_READY;

	kfree(super);
	return ret;

 out:
	kfree(arena);
	free_arenas(btt);
 out_super:
	kfree(super);
	return ret;
}

static int create_arenas(struct btt *btt)
{
	size_t remaining = btt->rawsize;
	size_t cur_off = 0;

	while (remaining) {
		struct arena_info *arena;
		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);

		remaining -= arena_size;
		if (arena_size < ARENA_MIN_SIZE)
			break;

		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
		if (!arena) {
			free_arenas(btt);
			return -ENOMEM;
		}
		btt->nlba += arena->external_nlba;
		if (remaining >= ARENA_MIN_SIZE)
			arena->nextoff = arena->size;
		else
			arena->nextoff = 0;
		cur_off += arena_size;
		list_add_tail(&arena->list, &btt->arena_list);
	}

	return 0;
}

/*
 * This function completes arena initialization by writing
 * all the metadata.
 * It is only called for an uninitialized arena when a write
 * to that arena occurs for the first time.
 */
969
static int btt_arena_write_layout(struct arena_info *arena)
Vishal Verma's avatar
Vishal Verma committed
970
971
{
	int ret;
972
	u64 sum;
Vishal Verma's avatar
Vishal Verma committed
973
	struct btt_sb *super;
974
	struct nd_btt *nd_btt = arena->nd_btt;
975
	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
Vishal Verma's avatar
Vishal Verma committed
976
977
978
979
980
981
982
983
984
985
986
987
988
989

	ret = btt_map_init(arena);
	if (ret)
		return ret;

	ret = btt_log_init(arena);
	if (ret)
		return ret;

	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
	if (!super)
		return -ENOMEM;

	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
990
	memcpy(super->uuid, nd_btt->uuid, 16);
991
	memcpy(super->parent_uuid, parent_uuid, 16);
Vishal Verma's avatar
Vishal Verma committed
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
	super->flags = cpu_to_le32(arena->flags);
	super->version_major = cpu_to_le16(arena->version_major);
	super->version_minor = cpu_to_le16(arena->version_minor);
	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
	super->external_nlba = cpu_to_le32(arena->external_nlba);
	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
	super->nfree = cpu_to_le32(arena->nfree);
	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
	super->nextoff = cpu_to_le64(arena->nextoff);
	/*
	 * Subtract arena->infooff (arena start) so numbers are relative
	 * to 'this' arena
	 */
	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);

	super->flags = 0;
1012
1013
	sum = nd_sb_checksum((struct nd_gen_sb *) super);
	super->checksum = cpu_to_le64(sum);
Vishal Verma's avatar
Vishal Verma committed
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031

	ret = btt_info_write(arena, super);

	kfree(super);
	return ret;
}

/*
 * This function completes the initialization for the BTT namespace
 * such that it is ready to accept IOs
 */
static int btt_meta_init(struct btt *btt)
{
	int ret = 0;
	struct arena_info *arena;

	mutex_lock(&btt->init_lock);
	list_for_each_entry(arena, &btt->arena_list, list) {
1032
		ret = btt_arena_write_layout(arena);
Vishal Verma's avatar
Vishal Verma committed
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
		if (ret)
			goto unlock;

		ret = btt_freelist_init(arena);
		if (ret)
			goto unlock;

		ret = btt_rtt_init(arena);
		if (ret)
			goto unlock;

		ret = btt_maplocks_init(arena);
		if (ret)
			goto unlock;
	}

	btt->init_state = INIT_READY;

 unlock:
	mutex_unlock(&btt->init_lock);
	return ret;
}

1056
1057
1058
1059
1060
static u32 btt_meta_size(struct btt *btt)
{
	return btt->lbasize - btt->sector_size;
}

Vishal Verma's avatar
Vishal Verma committed
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
/*
 * This function calculates the arena in which the given LBA lies
 * by doing a linear walk. This is acceptable since we expect only
 * a few arenas. If we have backing devices that get much larger,
 * we can construct a balanced binary tree of arenas at init time
 * so that this range search becomes faster.
 */
static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
				struct arena_info **arena)
{
	struct arena_info *arena_list;
	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);

	list_for_each_entry(arena_list, &btt->arena_list, list) {
		if (lba < arena_list->external_nlba) {
			*arena = arena_list;
			*premap = lba;
			return 0;
		}
		lba -= arena_list->external_nlba;
	}

	return -EIO;
}

/*
 * The following (lock_map, unlock_map) are mostly just to improve
 * readability, since they index into an array of locks
 */
static void lock_map(struct arena_info *arena, u32 premap)
		__acquires(&arena->map_locks[idx].lock)
{
	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;

	spin_lock(&arena->map_locks[idx].lock);
}

static void unlock_map(struct arena_info *arena, u32 premap)
		__releases(&arena->map_locks[idx].lock)
{
	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;

	spin_unlock(&arena->map_locks[idx].lock);
}

static int btt_data_read(struct arena_info *arena, struct page *page,
			unsigned int off, u32 lba, u32 len)
{
	int ret;
	u64 nsoff = to_namespace_offset(arena, lba);
	void *mem = kmap_atomic(page);

1113
	ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
	kunmap_atomic(mem);

	return ret;
}

static int btt_data_write(struct arena_info *arena, u32 lba,
			struct page *page, unsigned int off, u32 len)
{
	int ret;
	u64 nsoff = to_namespace_offset(arena, lba);
	void *mem = kmap_atomic(page);

1126
	ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
	kunmap_atomic(mem);

	return ret;
}

static void zero_fill_data(struct page *page, unsigned int off, u32 len)
{
	void *mem = kmap_atomic(page);

	memset(mem + off, 0, len);
	kunmap_atomic(mem);
}

1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
			struct arena_info *arena, u32 postmap, int rw)
{
	unsigned int len = btt_meta_size(btt);
	u64 meta_nsoff;
	int ret = 0;

	if (bip == NULL)
		return 0;

	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;

	while (len) {
		unsigned int cur_len;
		struct bio_vec bv;
		void *mem;

		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
		/*
		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
		 * can use those directly
		 */

		cur_len = min(len, bv.bv_len);
		mem = kmap_atomic(bv.bv_page);
		if (rw)
			ret = arena_write_bytes(arena, meta_nsoff,
1169
1170
					mem + bv.bv_offset, cur_len,
					NVDIMM_IO_ATOMIC);
1171
1172
		else
			ret = arena_read_bytes(arena, meta_nsoff,
1173
1174
					mem + bv.bv_offset, cur_len,
					NVDIMM_IO_ATOMIC);
1175
1176
1177
1178
1179
1180
1181

		kunmap_atomic(mem);
		if (ret)
			return ret;

		len -= cur_len;
		meta_nsoff += cur_len;
1182
1183
		if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
			return -EIO;
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
	}

	return ret;
}

#else /* CONFIG_BLK_DEV_INTEGRITY */
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
			struct arena_info *arena, u32 postmap, int rw)
{
	return 0;
}
#endif

static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
			struct page *page, unsigned int off, sector_t sector,
			unsigned int len)
Vishal Verma's avatar
Vishal Verma committed
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
{
	int ret = 0;
	int t_flag, e_flag;
	struct arena_info *arena = NULL;
	u32 lane = 0, premap, postmap;

	while (len) {
		u32 cur_len;

		lane = nd_region_acquire_lane(btt->nd_region);

		ret = lba_to_arena(btt, sector, &premap, &arena);
		if (ret)
			goto out_lane;

		cur_len = min(btt->sector_size, len);

1217
1218
		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
				NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
		if (ret)
			goto out_lane;

		/*
		 * We loop to make sure that the post map LBA didn't change
		 * from under us between writing the RTT and doing the actual
		 * read.
		 */
		while (1) {
			u32 new_map;
1229
			int new_t, new_e;
Vishal Verma's avatar
Vishal Verma committed
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247

			if (t_flag) {
				zero_fill_data(page, off, cur_len);
				goto out_lane;
			}

			if (e_flag) {
				ret = -EIO;
				goto out_lane;
			}

			arena->rtt[lane] = RTT_VALID | postmap;
			/*
			 * Barrier to make sure this write is not reordered
			 * to do the verification map_read before the RTT store
			 */
			barrier();

1248
1249
			ret = btt_map_read(arena, premap, &new_map, &new_t,
						&new_e, NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1250
1251
1252
			if (ret)
				goto out_rtt;

1253
1254
			if ((postmap == new_map) && (t_flag == new_t) &&
					(e_flag == new_e))
Vishal Verma's avatar
Vishal Verma committed
1255
1256
1257
				break;

			postmap = new_map;
1258
1259
			t_flag = new_t;
			e_flag = new_e;
Vishal Verma's avatar
Vishal Verma committed
1260
1261
1262
		}

		ret = btt_data_read(arena, page, off, postmap, cur_len);
1263
1264
1265
1266
1267
1268
		if (ret) {
			int rc;

			/* Media error - set the e_flag */
			rc = btt_map_write(arena, premap, postmap, 0, 1,
				NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1269
			goto out_rtt;
1270
		}
Vishal Verma's avatar
Vishal Verma committed
1271

1272
1273
1274
1275
1276
1277
		if (bip) {
			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
			if (ret)
				goto out_rtt;
		}

Vishal Verma's avatar
Vishal Verma committed
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
		arena->rtt[lane] = RTT_INVALID;
		nd_region_release_lane(btt->nd_region, lane);

		len -= cur_len;
		off += cur_len;
		sector += btt->sector_size >> SECTOR_SHIFT;
	}

	return 0;

 out_rtt:
	arena->rtt[lane] = RTT_INVALID;
 out_lane:
	nd_region_release_lane(btt->nd_region, lane);
	return ret;
}

1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
/*
 * Normally, arena_{read,write}_bytes will take care of the initial offset
 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
 * we need the final, raw namespace offset here
 */
static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
		u32 postmap)
{
	u64 nsoff = adjust_initial_offset(arena->nd_btt,
			to_namespace_offset(arena, postmap));
	sector_t phys_sector = nsoff >> 9;

	return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
}

1310
1311
1312
static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
			sector_t sector, struct page *page, unsigned int off,
			unsigned int len)
Vishal Verma's avatar
Vishal Verma committed
1313
1314
1315
1316
1317
1318
1319
1320
1321
{
	int ret = 0;
	struct arena_info *arena = NULL;
	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
	struct log_entry log;
	int sub;

	while (len) {
		u32 cur_len;
1322
		int e_flag;
Vishal Verma's avatar
Vishal Verma committed
1323

1324
 retry:
Vishal Verma's avatar
Vishal Verma committed
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
		lane = nd_region_acquire_lane(btt->nd_region);

		ret = lba_to_arena(btt, sector, &premap, &arena);
		if (ret)
			goto out_lane;
		cur_len = min(btt->sector_size, len);

		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
			ret = -EIO;
			goto out_lane;
		}

1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
		if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
			arena->freelist[lane].has_err = 1;

		if (mutex_is_locked(&arena->err_lock)
				|| arena->freelist[lane].has_err) {
			nd_region_release_lane(btt->nd_region, lane);

			ret = arena_clear_freelist_error(arena, lane);
			if (ret)
				return ret;

			/* OK to acquire a different lane/free block */
			goto retry;
		}

Vishal Verma's avatar
Vishal Verma committed
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
		new_postmap = arena->freelist[lane].block;

		/* Wait if the new block is being read from */
		for (i = 0; i < arena->nfree; i++)
			while (arena->rtt[i] == (RTT_VALID | new_postmap))
				cpu_relax();


		if (new_postmap >= arena->internal_nlba) {
			ret = -EIO;
			goto out_lane;
1363
1364
1365
		}

		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
Vishal Verma's avatar
Vishal Verma committed
1366
1367
1368
		if (ret)
			goto out_lane;

1369
1370
1371
1372
1373
1374
1375
		if (bip) {
			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
						WRITE);
			if (ret)
				goto out_lane;
		}

Vishal Verma's avatar
Vishal Verma committed
1376
		lock_map(arena, premap);
1377
		ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1378
				NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1379
1380
1381
1382
1383
1384
		if (ret)
			goto out_map;
		if (old_postmap >= arena->internal_nlba) {
			ret = -EIO;
			goto out_map;
		}
1385
1386
		if (e_flag)
			set_e_flag(old_postmap);
Vishal Verma's avatar
Vishal Verma committed
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396

		log.lba = cpu_to_le32(premap);
		log.old_map = cpu_to_le32(old_postmap);
		log.new_map = cpu_to_le32(new_postmap);
		log.seq = cpu_to_le32(arena->freelist[lane].seq);
		sub = arena->freelist[lane].sub;
		ret = btt_flog_write(arena, lane, sub, &log);
		if (ret)
			goto out_map;

1397
1398
		ret = btt_map_write(arena, premap, new_postmap, 0, 0,
			NVDIMM_IO_ATOMIC);
Vishal Verma's avatar
Vishal Verma committed
1399
1400
1401
1402
1403
1404
		if (ret)
			goto out_map;

		unlock_map(arena, premap);
		nd_region_release_lane(btt->nd_region, lane);

1405
1406
1407
1408
1409
1410
		if (e_flag) {
			ret = arena_clear_freelist_error(arena, lane);
			if (ret)
				return ret;
		}

Vishal Verma's avatar
Vishal Verma committed
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
		len -= cur_len;
		off += cur_len;
		sector += btt->sector_size >> SECTOR_SHIFT;
	}

	return 0;

 out_map:
	unlock_map(arena, premap);
 out_lane:
	nd_region_release_lane(btt->nd_region, lane);
	return ret;
}

1425
1426
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
			struct page *page, unsigned int len, unsigned int off,
1427
			unsigned int op, sector_t sector)
Vishal Verma's avatar
Vishal Verma committed
1428
1429
1430
{
	int ret;

1431
	if (!op_is_write(op)) {
1432
		ret = btt_read_pg(btt, bip, page, off, sector, len);
Vishal Verma's avatar
Vishal Verma committed
1433
1434
1435
		flush_dcache_page(page);
	} else {
		flush_dcache_page(page);
1436
		ret = btt_write_pg(btt, bip, sector, page, off, len);
Vishal Verma's avatar
Vishal Verma committed
1437
1438
1439
1440
1441
	}

	return ret;
}

1442
static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
Vishal Verma's avatar
Vishal Verma committed
1443
{
1444
	struct bio_integrity_payload *bip = bio_integrity(bio);
Vishal Verma's avatar
Vishal Verma committed
1445
1446
	struct btt *btt = q->queuedata;
	struct bvec_iter iter;
Dan Williams's avatar
Dan Williams committed
1447
	unsigned long start;
Vishal Verma's avatar
Vishal Verma committed
1448
	struct bio_vec bvec;
1449
	int err = 0;
Dan Williams's avatar
Dan Williams committed
1450
	bool do_acct;
Vishal Verma's avatar
Vishal Verma committed
1451

1452
1453
	if (!bio_integrity_prep(bio))
		return BLK_QC_T_NONE;
1454

Dan Williams's avatar
Dan Williams committed
1455
	do_acct = nd_iostat_start(bio, &start);
Vishal Verma's avatar
Vishal Verma committed
1456
1457
1458
	bio_for_each_segment(bvec, bio, iter) {
		unsigned int len = bvec.bv_len;

1459
1460
1461
1462
1463
1464
1465
		if (len > PAGE_SIZE || len < btt->sector_size ||
				len % btt->sector_size) {
			dev_err_ratelimited(&btt->nd_btt->dev,
				"unaligned bio segment (len: %d)\n", len);
			bio->bi_status = BLK_STS_IOERR;
			break;
		}
Vishal Verma's avatar
Vishal Verma committed
1466

1467
		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1468
				  bio_op(bio), iter.bi_sector);
Vishal Verma's avatar
Vishal Verma committed
1469
		if (err) {
1470
			dev_err(&btt->nd_btt->dev,
Vishal Verma's avatar
Vishal Verma committed
1471
					"io error in %s sector %lld, len %d,\n",
1472
1473
					(op_is_write(bio_op(bio))) ? "WRITE" :
					"READ",
Vishal Verma's avatar
Vishal Verma committed
1474
					(unsigned long long) iter.bi_sector, len);
1475
			bio->bi_status = errno_to_blk_status(err);
Dan Williams's avatar
Dan Williams committed
1476
			break;
Vishal Verma's avatar
Vishal Verma committed
1477
1478
		}
	}
Dan Williams's avatar
Dan Williams committed
1479
1480
	if (do_acct)
		nd_iostat_end(bio, start);
Vishal Verma's avatar
Vishal Verma committed
1481

1482
	bio_endio(bio);
1483
	return BLK_QC_T_NONE;
Vishal Verma's avatar
Vishal Verma committed
1484
1485
1486
}

static int btt_rw_page(struct block_device *bdev, sector_t sector,
1487
		struct page *page, unsigned int op)
Vishal Verma's avatar
Vishal Verma committed
1488
1489
{
	struct btt *btt = bdev->bd_disk->private_data;
1490
	int rc;
1491
	unsigned int len;
Vishal Verma's avatar
Vishal Verma committed
1492

1493
	len = hpage_nr_pages(page) * PAGE_SIZE;
1494
	rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
1495
	if (rc == 0)
1496
		page_endio(page, op_is_write(op), 0);
1497
1498

	return rc;
Vishal Verma's avatar
Vishal Verma committed
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
}


static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
	/* some standard values */
	geo->heads = 1 << 6;
	geo->sectors = 1 << 5;
	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	return 0;
}

static const struct block_device_operations btt_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		btt_rw_page,
	.getgeo =		btt_getgeo,
1515
	.revalidate_disk =	nvdimm_revalidate_disk,
Vishal Verma's avatar
Vishal Verma committed
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
};

static int btt_blk_init(struct btt *btt)
{
	struct nd_btt *nd_btt = btt->nd_btt;
	struct nd_namespace_common *ndns = nd_btt->ndns;

	/* create a new disk and request queue for btt */
	btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
	if (!btt->btt_queue)
		return -ENOMEM;

	btt->btt_disk = alloc_disk(0);
	if (!btt->btt_disk) {
		blk_cleanup_queue(btt->btt_queue);
		return -ENOMEM;
	}

	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
	btt->btt_disk->first_minor = 0;
	btt->btt_disk->fops = &btt_fops;
	btt->btt_disk->private_data = btt;
	btt->btt_disk->queue = btt->btt_queue;
	btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1540
1541
	btt->btt_disk->queue->backing_dev_info->capabilities |=
			BDI_CAP_SYNCHRONOUS_IO;
Vishal Verma's avatar
Vishal Verma committed
1542
1543
1544
1545

	blk_queue_make_request(btt->btt_queue, btt_make_request);
	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1546
	blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
Vishal Verma's avatar
Vishal Verma committed
1547
1548
	btt->btt_queue->queuedata = btt;

1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
	if (btt_meta_size(btt)) {
		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));

		if (rc) {
			del_gendisk(btt->btt_disk);
			put_disk(btt->btt_disk);
			blk_cleanup_queue(btt->btt_queue);
			return rc;
		}
	}
	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1560
	device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1561
	btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1562
	revalidate_disk(btt->btt_disk);
Vishal Verma's avatar
Vishal Verma committed
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595

	return 0;
}

static void btt_blk_cleanup(struct btt *btt)
{
	del_gendisk(btt->btt_disk);
	put_disk(btt->btt_disk);
	blk_cleanup_queue(btt->btt_queue);
}

/**
 * btt_init - initialize a block translation table for the given device
 * @nd_btt:	device with BTT geometry and backing device info
 * @rawsize:	raw size in bytes of the backing device
 * @lbasize:	lba size of the backing device
 * @uuid:	A uuid for the backing device - this is stored on media
 * @maxlane:	maximum number of parallel requests the device can handle
 *
 * Initialize a Block Translation Table on a backing device to provide
 * single sector power fail atomicity.
 *
 * Context:
 * Might sleep.
 *
 * Returns:
 * Pointer to a new struct btt on success, NULL on failure.
 */
static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
{
	int ret;
	struct btt *btt;
1596
	struct nd_namespace_io *nsio;
Vishal Verma's avatar
Vishal Verma committed
1597
1598
	struct device *dev = &nd_btt->dev;

1599
	btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
Vishal Verma's avatar
Vishal Verma committed
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
	if (!btt)
		return NULL;

	btt->nd_btt = nd_btt;
	btt->rawsize = rawsize;
	btt->lbasize = lbasize;
	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
	INIT_LIST_HEAD(&btt->arena_list);
	mutex_init(&btt->init_lock);
	btt->nd_region = nd_region;
1610
1611
	nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
	btt->phys_bb = &nsio->bb;
Vishal Verma's avatar
Vishal Verma committed
1612
1613
1614
1615

	ret = discover_arenas(btt);
	if (ret) {
		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1616
		return NULL;
Vishal Verma's avatar
Vishal Verma committed
1617
1618
	}

1619
	if