tree-log.c 174 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
3
4
5
6
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/sched.h>
7
#include <linux/slab.h>
8
#include <linux/blkdev.h>
Josef Bacik's avatar
Josef Bacik committed
9
#include <linux/list_sort.h>
10
#include <linux/iversion.h>
11
#include "ctree.h"
12
#include "tree-log.h"
13
14
15
#include "disk-io.h"
#include "locking.h"
#include "print-tree.h"
Mark Fasheh's avatar
Mark Fasheh committed
16
#include "backref.h"
17
#include "compression.h"
18
#include "qgroup.h"
19
#include "inode-map.h"
20
21
22
23
24
25
26
27
28

/* magic values for the inode_only field in btrfs_log_inode:
 *
 * LOG_INODE_ALL means to log everything
 * LOG_INODE_EXISTS means to log just enough to recreate the inode
 * during log replay
 */
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
29
#define LOG_OTHER_INODE 2
30
#define LOG_OTHER_INODE_ALL 3
31

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
/*
 * directory trouble cases
 *
 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
 * log, we must force a full commit before doing an fsync of the directory
 * where the unlink was done.
 * ---> record transid of last unlink/rename per directory
 *
 * mkdir foo/some_dir
 * normal commit
 * rename foo/some_dir foo2/some_dir
 * mkdir foo/some_dir
 * fsync foo/some_dir/some_file
 *
 * The fsync above will unlink the original some_dir without recording
 * it in its new location (foo2).  After a crash, some_dir will be gone
 * unless the fsync of some_file forces a full commit
 *
 * 2) we must log any new names for any file or dir that is in the fsync
 * log. ---> check inode while renaming/linking.
 *
 * 2a) we must log any new names for any file or dir during rename
 * when the directory they are being removed from was logged.
 * ---> check inode and old parent dir during rename
 *
 *  2a is actually the more important variant.  With the extra logging
 *  a crash might unlink the old name without recreating the new one
 *
 * 3) after a crash, we must go through any directories with a link count
 * of zero and redo the rm -rf
 *
 * mkdir f1/foo
 * normal commit
 * rm -rf f1/foo
 * fsync(f1)
 *
 * The directory f1 was fully removed from the FS, but fsync was never
 * called on f1, only its parent dir.  After a crash the rm -rf must
 * be replayed.  This must be able to recurse down the entire
 * directory tree.  The inode link count fixup code takes care of the
 * ugly details.
 */

75
76
77
78
79
80
81
82
83
84
85
/*
 * stages for the tree walking.  The first
 * stage (0) is to only pin down the blocks we find
 * the second stage (1) is to make sure that all the inodes
 * we find in the log are created in the subvolume.
 *
 * The last stage is to deal with directories and links and extents
 * and all the other fun semantics
 */
#define LOG_WALK_PIN_ONLY 0
#define LOG_WALK_REPLAY_INODES 1
86
87
#define LOG_WALK_REPLAY_DIR_INDEX 2
#define LOG_WALK_REPLAY_ALL 3
88

89
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
90
			   struct btrfs_root *root, struct btrfs_inode *inode,
91
92
			   int inode_only,
			   const loff_t start,
93
94
			   const loff_t end,
			   struct btrfs_log_ctx *ctx);
95
96
97
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     struct btrfs_path *path, u64 objectid);
98
99
100
101
102
static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct btrfs_root *log,
				       struct btrfs_path *path,
				       u64 dirid, int del_all);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

/*
 * tree logging is a special write ahead log used to make sure that
 * fsyncs and O_SYNCs can happen without doing full tree commits.
 *
 * Full tree commits are expensive because they require commonly
 * modified blocks to be recowed, creating many dirty pages in the
 * extent tree an 4x-6x higher write load than ext3.
 *
 * Instead of doing a tree commit on every fsync, we use the
 * key ranges and transaction ids to find items for a given file or directory
 * that have changed in this transaction.  Those items are copied into
 * a special tree (one per subvolume root), that tree is written to disk
 * and then the fsync is considered complete.
 *
 * After a crash, items are copied out of the log-tree back into the
 * subvolume tree.  Any file data extents found are recorded in the extent
 * allocation tree, and the log-tree freed.
 *
 * The log tree is read three times, once to pin down all the extents it is
 * using in ram and once, once to create all the inodes logged in the tree
 * and once to do all the other items.
 */

/*
 * start a sub transaction and setup the log tree
 * this increments the log tree writer count to make the people
 * syncing the tree wait for us to finish
 */
static int start_log_trans(struct btrfs_trans_handle *trans,
133
134
			   struct btrfs_root *root,
			   struct btrfs_log_ctx *ctx)
135
{
136
	struct btrfs_fs_info *fs_info = root->fs_info;
137
	int ret = 0;
Yan Zheng's avatar
Yan Zheng committed
138
139

	mutex_lock(&root->log_mutex);
140

Yan Zheng's avatar
Yan Zheng committed
141
	if (root->log_root) {
142
		if (btrfs_need_log_full_commit(trans)) {
143
144
145
			ret = -EAGAIN;
			goto out;
		}
146

147
		if (!root->log_start_pid) {
148
			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
149
			root->log_start_pid = current->pid;
150
		} else if (root->log_start_pid != current->pid) {
151
			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
152
		}
153
	} else {
154
155
156
157
		mutex_lock(&fs_info->tree_log_mutex);
		if (!fs_info->log_root_tree)
			ret = btrfs_init_log_root_tree(trans, fs_info);
		mutex_unlock(&fs_info->tree_log_mutex);
158
159
		if (ret)
			goto out;
160

161
		ret = btrfs_add_log_tree(trans, root);
162
		if (ret)
163
			goto out;
164
165
166

		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
		root->log_start_pid = current->pid;
167
	}
168

Miao Xie's avatar
Miao Xie committed
169
	atomic_inc(&root->log_batch);
Yan Zheng's avatar
Yan Zheng committed
170
	atomic_inc(&root->log_writers);
171
	if (ctx) {
172
		int index = root->log_transid % 2;
173
		list_add_tail(&ctx->list, &root->log_ctxs[index]);
174
		ctx->log_transid = root->log_transid;
175
	}
176

177
out:
Yan Zheng's avatar
Yan Zheng committed
178
	mutex_unlock(&root->log_mutex);
179
	return ret;
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
}

/*
 * returns 0 if there was a log transaction running and we were able
 * to join, or returns -ENOENT if there were not transactions
 * in progress
 */
static int join_running_log_trans(struct btrfs_root *root)
{
	int ret = -ENOENT;

	smp_mb();
	if (!root->log_root)
		return -ENOENT;

Yan Zheng's avatar
Yan Zheng committed
195
	mutex_lock(&root->log_mutex);
196
197
	if (root->log_root) {
		ret = 0;
Yan Zheng's avatar
Yan Zheng committed
198
		atomic_inc(&root->log_writers);
199
	}
Yan Zheng's avatar
Yan Zheng committed
200
	mutex_unlock(&root->log_mutex);
201
202
203
	return ret;
}

204
205
206
207
208
/*
 * This either makes the current running log transaction wait
 * until you call btrfs_end_log_trans() or it makes any future
 * log transactions wait until you call btrfs_end_log_trans()
 */
209
void btrfs_pin_log_trans(struct btrfs_root *root)
210
211
212
213
214
215
{
	mutex_lock(&root->log_mutex);
	atomic_inc(&root->log_writers);
	mutex_unlock(&root->log_mutex);
}

216
217
218
219
/*
 * indicate we're done making changes to the log tree
 * and wake up anyone waiting to do a sync
 */
220
void btrfs_end_log_trans(struct btrfs_root *root)
221
{
Yan Zheng's avatar
Yan Zheng committed
222
	if (atomic_dec_and_test(&root->log_writers)) {
223
224
		/* atomic_dec_and_test implies a barrier */
		cond_wake_up_nomb(&root->log_writer_wait);
Yan Zheng's avatar
Yan Zheng committed
225
	}
226
227
}

228
229
230
231
232
233
234
235
236
237
238
static int btrfs_write_tree_block(struct extent_buffer *buf)
{
	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
					buf->start + buf->len - 1);
}

static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
	filemap_fdatawait_range(buf->pages[0]->mapping,
			        buf->start, buf->start + buf->len - 1);
}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269

/*
 * the walk control struct is used to pass state down the chain when
 * processing the log tree.  The stage field tells us which part
 * of the log tree processing we are currently doing.  The others
 * are state fields used for that specific part
 */
struct walk_control {
	/* should we free the extent on disk when done?  This is used
	 * at transaction commit time while freeing a log tree
	 */
	int free;

	/* should we write out the extent buffer?  This is used
	 * while flushing the log tree to disk during a sync
	 */
	int write;

	/* should we wait for the extent buffer io to finish?  Also used
	 * while flushing the log tree to disk for a sync
	 */
	int wait;

	/* pin only walk, we record which extents on disk belong to the
	 * log trees
	 */
	int pin;

	/* what stage of the replay code we're currently in */
	int stage;

270
271
272
273
274
275
276
	/*
	 * Ignore any items from the inode currently being processed. Needs
	 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
	 * the LOG_WALK_REPLAY_INODES stage.
	 */
	bool ignore_cur_inode;

277
278
279
280
281
282
283
284
285
286
287
288
	/* the root we are currently replaying */
	struct btrfs_root *replay_dest;

	/* the trans handle for the current replay */
	struct btrfs_trans_handle *trans;

	/* the function that gets used to process blocks we find in the
	 * tree.  Note the extent_buffer might not be up to date when it is
	 * passed in, and it must be checked or read if you need the data
	 * inside it
	 */
	int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
289
			    struct walk_control *wc, u64 gen, int level);
290
291
292
293
294
295
296
};

/*
 * process_func used to pin down extents, write them or wait on them
 */
static int process_one_buffer(struct btrfs_root *log,
			      struct extent_buffer *eb,
297
			      struct walk_control *wc, u64 gen, int level)
298
{
299
	struct btrfs_fs_info *fs_info = log->fs_info;
300
301
	int ret = 0;

302
303
304
305
	/*
	 * If this fs is mixed then we need to be able to process the leaves to
	 * pin down any logged extents, so we have to read the block.
	 */
306
	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
307
		ret = btrfs_read_buffer(eb, gen, level, NULL);
308
309
310
311
		if (ret)
			return ret;
	}

Josef Bacik's avatar
Josef Bacik committed
312
	if (wc->pin)
313
314
		ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
						      eb->len);
315

316
	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
317
		if (wc->pin && btrfs_header_level(eb) == 0)
318
			ret = btrfs_exclude_logged_extents(eb);
319
320
321
322
323
		if (wc->write)
			btrfs_write_tree_block(eb);
		if (wc->wait)
			btrfs_wait_tree_block_writeback(eb);
	}
324
	return ret;
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
}

/*
 * Item overwrite used by replay and tree logging.  eb, slot and key all refer
 * to the src data we are copying out.
 *
 * root is the tree we are copying into, and path is a scratch
 * path for use in this function (it should be released on entry and
 * will be released on exit).
 *
 * If the key is already in the destination tree the existing item is
 * overwritten.  If the existing item isn't big enough, it is extended.
 * If it is too large, it is truncated.
 *
 * If the key isn't in the destination yet, a new item is inserted.
 */
static noinline int overwrite_item(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct extent_buffer *eb, int slot,
				   struct btrfs_key *key)
{
	int ret;
	u32 item_size;
	u64 saved_i_size = 0;
	int save_old_i_size = 0;
	unsigned long src_ptr;
	unsigned long dst_ptr;
	int overwrite_root = 0;
354
	bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
355
356
357
358
359
360
361
362
363

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
		overwrite_root = 1;

	item_size = btrfs_item_size_nr(eb, slot);
	src_ptr = btrfs_item_ptr_offset(eb, slot);

	/* look for the key in the destination tree */
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
364
365
366
	if (ret < 0)
		return ret;

367
368
369
370
371
372
373
374
375
	if (ret == 0) {
		char *src_copy;
		char *dst_copy;
		u32 dst_size = btrfs_item_size_nr(path->nodes[0],
						  path->slots[0]);
		if (dst_size != item_size)
			goto insert;

		if (item_size == 0) {
376
			btrfs_release_path(path);
377
378
379
380
			return 0;
		}
		dst_copy = kmalloc(item_size, GFP_NOFS);
		src_copy = kmalloc(item_size, GFP_NOFS);
381
		if (!dst_copy || !src_copy) {
382
			btrfs_release_path(path);
383
384
385
386
			kfree(dst_copy);
			kfree(src_copy);
			return -ENOMEM;
		}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403

		read_extent_buffer(eb, src_copy, src_ptr, item_size);

		dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
		read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
				   item_size);
		ret = memcmp(dst_copy, src_copy, item_size);

		kfree(dst_copy);
		kfree(src_copy);
		/*
		 * they have the same contents, just return, this saves
		 * us from cowing blocks in the destination tree and doing
		 * extra writes that may not have been done by a previous
		 * sync
		 */
		if (ret == 0) {
404
			btrfs_release_path(path);
405
406
407
			return 0;
		}

408
409
410
411
412
413
414
		/*
		 * We need to load the old nbytes into the inode so when we
		 * replay the extents we've logged we get the right nbytes.
		 */
		if (inode_item) {
			struct btrfs_inode_item *item;
			u64 nbytes;
415
			u32 mode;
416
417
418
419
420
421
422

			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
					      struct btrfs_inode_item);
			nbytes = btrfs_inode_nbytes(path->nodes[0], item);
			item = btrfs_item_ptr(eb, slot,
					      struct btrfs_inode_item);
			btrfs_set_inode_nbytes(eb, item, nbytes);
423
424
425
426
427
428
429
430
431

			/*
			 * If this is a directory we need to reset the i_size to
			 * 0 so that we can set it up properly when replaying
			 * the rest of the items in this log.
			 */
			mode = btrfs_inode_mode(eb, item);
			if (S_ISDIR(mode))
				btrfs_set_inode_size(eb, item, 0);
432
433
434
		}
	} else if (inode_item) {
		struct btrfs_inode_item *item;
435
		u32 mode;
436
437
438
439
440
441
442

		/*
		 * New inode, set nbytes to 0 so that the nbytes comes out
		 * properly when we replay the extents.
		 */
		item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
		btrfs_set_inode_nbytes(eb, item, 0);
443
444
445
446
447
448
449
450
451

		/*
		 * If this is a directory we need to reset the i_size to 0 so
		 * that we can set it up properly when replaying the rest of
		 * the items in this log.
		 */
		mode = btrfs_inode_mode(eb, item);
		if (S_ISDIR(mode))
			btrfs_set_inode_size(eb, item, 0);
452
453
	}
insert:
454
	btrfs_release_path(path);
455
	/* try to insert the key into the destination tree */
456
	path->skip_release_on_error = 1;
457
458
	ret = btrfs_insert_empty_item(trans, root, path,
				      key, item_size);
459
	path->skip_release_on_error = 0;
460
461

	/* make sure any existing item is the correct size */
462
	if (ret == -EEXIST || ret == -EOVERFLOW) {
463
464
465
		u32 found_size;
		found_size = btrfs_item_size_nr(path->nodes[0],
						path->slots[0]);
466
		if (found_size > item_size)
467
			btrfs_truncate_item(path, item_size, 1);
468
		else if (found_size < item_size)
469
			btrfs_extend_item(path, item_size - found_size);
470
	} else if (ret) {
471
		return ret;
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
	}
	dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
					path->slots[0]);

	/* don't overwrite an existing inode if the generation number
	 * was logged as zero.  This is done when the tree logging code
	 * is just logging an inode to make sure it exists after recovery.
	 *
	 * Also, don't overwrite i_size on directories during replay.
	 * log replay inserts and removes directory items based on the
	 * state of the tree found in the subvolume, and i_size is modified
	 * as it goes
	 */
	if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
		struct btrfs_inode_item *src_item;
		struct btrfs_inode_item *dst_item;

		src_item = (struct btrfs_inode_item *)src_ptr;
		dst_item = (struct btrfs_inode_item *)dst_ptr;

492
493
		if (btrfs_inode_generation(eb, src_item) == 0) {
			struct extent_buffer *dst_eb = path->nodes[0];
494
			const u64 ino_size = btrfs_inode_size(eb, src_item);
495

496
497
498
499
500
501
502
			/*
			 * For regular files an ino_size == 0 is used only when
			 * logging that an inode exists, as part of a directory
			 * fsync, and the inode wasn't fsynced before. In this
			 * case don't set the size of the inode in the fs/subvol
			 * tree, otherwise we would be throwing valid data away.
			 */
503
			if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
504
505
			    S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
			    ino_size != 0) {
506
507
508
509
510
511
				struct btrfs_map_token token;

				btrfs_init_map_token(&token);
				btrfs_set_token_inode_size(dst_eb, dst_item,
							   ino_size, &token);
			}
512
			goto no_copy;
513
		}
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543

		if (overwrite_root &&
		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
		    S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
			save_old_i_size = 1;
			saved_i_size = btrfs_inode_size(path->nodes[0],
							dst_item);
		}
	}

	copy_extent_buffer(path->nodes[0], eb, dst_ptr,
			   src_ptr, item_size);

	if (save_old_i_size) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
	}

	/* make sure the generation is filled in */
	if (key->type == BTRFS_INODE_ITEM_KEY) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
			btrfs_set_inode_generation(path->nodes[0], dst_item,
						   trans->transid);
		}
	}
no_copy:
	btrfs_mark_buffer_dirty(path->nodes[0]);
544
	btrfs_release_path(path);
545
546
547
548
549
550
551
552
553
554
	return 0;
}

/*
 * simple helper to read an inode off the disk from a given root
 * This can only be called for subvolume roots and not for the log
 */
static noinline struct inode *read_one_inode(struct btrfs_root *root,
					     u64 objectid)
{
555
	struct btrfs_key key;
556
557
	struct inode *inode;

558
559
560
	key.objectid = objectid;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;
561
	inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
562
	if (IS_ERR(inode))
563
		inode = NULL;
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
	return inode;
}

/* replays a single extent in 'eb' at 'slot' with 'key' into the
 * subvolume 'root'.  path is released on entry and should be released
 * on exit.
 *
 * extents in the log tree have not been allocated out of the extent
 * tree yet.  So, this completes the allocation, taking a reference
 * as required if the extent already exists or creating a new extent
 * if it isn't in the extent allocation tree yet.
 *
 * The extent is inserted into the file, dropping any existing extents
 * from the file that overlap the new one.
 */
static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      struct extent_buffer *eb, int slot,
				      struct btrfs_key *key)
{
585
	struct btrfs_fs_info *fs_info = root->fs_info;
586
587
588
	int found_type;
	u64 extent_end;
	u64 start = key->offset;
589
	u64 nbytes = 0;
590
591
592
593
594
595
596
597
	struct btrfs_file_extent_item *item;
	struct inode *inode = NULL;
	unsigned long size;
	int ret = 0;

	item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(eb, item);

Yan Zheng's avatar
Yan Zheng committed
598
	if (found_type == BTRFS_FILE_EXTENT_REG ||
599
600
601
602
603
604
605
606
607
608
609
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
		nbytes = btrfs_file_extent_num_bytes(eb, item);
		extent_end = start + nbytes;

		/*
		 * We don't add to the inodes nbytes if we are prealloc or a
		 * hole.
		 */
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
			nbytes = 0;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
610
		size = btrfs_file_extent_ram_bytes(eb, item);
611
		nbytes = btrfs_file_extent_ram_bytes(eb, item);
612
		extent_end = ALIGN(start + size,
613
				   fs_info->sectorsize);
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
	} else {
		ret = 0;
		goto out;
	}

	inode = read_one_inode(root, key->objectid);
	if (!inode) {
		ret = -EIO;
		goto out;
	}

	/*
	 * first check to see if we already have this extent in the
	 * file.  This must be done before the btrfs_drop_extents run
	 * so we don't try to drop this extent.
	 */
630
631
	ret = btrfs_lookup_file_extent(trans, root, path,
			btrfs_ino(BTRFS_I(inode)), start, 0);
632

Yan Zheng's avatar
Yan Zheng committed
633
634
635
	if (ret == 0 &&
	    (found_type == BTRFS_FILE_EXTENT_REG ||
	     found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
		struct btrfs_file_extent_item cmp1;
		struct btrfs_file_extent_item cmp2;
		struct btrfs_file_extent_item *existing;
		struct extent_buffer *leaf;

		leaf = path->nodes[0];
		existing = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_file_extent_item);

		read_extent_buffer(eb, &cmp1, (unsigned long)item,
				   sizeof(cmp1));
		read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
				   sizeof(cmp2));

		/*
		 * we already have a pointer to this exact extent,
		 * we don't have to do anything
		 */
		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
655
			btrfs_release_path(path);
656
657
658
			goto out;
		}
	}
659
	btrfs_release_path(path);
660
661

	/* drop any overlapping extents */
662
	ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
663
664
	if (ret)
		goto out;
665

Yan Zheng's avatar
Yan Zheng committed
666
667
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
668
		u64 offset;
Yan Zheng's avatar
Yan Zheng committed
669
670
671
		unsigned long dest_offset;
		struct btrfs_key ins;

672
673
674
675
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
		    btrfs_fs_incompat(fs_info, NO_HOLES))
			goto update_inode;

Yan Zheng's avatar
Yan Zheng committed
676
677
		ret = btrfs_insert_empty_item(trans, root, path, key,
					      sizeof(*item));
678
679
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
680
681
682
683
684
685
686
687
		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
						    path->slots[0]);
		copy_extent_buffer(path->nodes[0], eb, dest_offset,
				(unsigned long)item,  sizeof(*item));

		ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
		ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
		ins.type = BTRFS_EXTENT_ITEM_KEY;
688
		offset = key->offset - btrfs_file_extent_offset(eb, item);
Yan Zheng's avatar
Yan Zheng committed
689

690
691
692
693
694
695
696
697
		/*
		 * Manually record dirty extent, as here we did a shallow
		 * file extent item copy and skip normal backref update,
		 * but modifying extent tree all by ourselves.
		 * So need to manually record dirty extent for qgroup,
		 * as the owner of the file extent changed from log tree
		 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
		 */
698
		ret = btrfs_qgroup_trace_extent(trans,
699
700
701
702
703
704
				btrfs_file_extent_disk_bytenr(eb, item),
				btrfs_file_extent_disk_num_bytes(eb, item),
				GFP_NOFS);
		if (ret < 0)
			goto out;

Yan Zheng's avatar
Yan Zheng committed
705
		if (ins.objectid > 0) {
706
			struct btrfs_ref ref = { 0 };
Yan Zheng's avatar
Yan Zheng committed
707
708
709
			u64 csum_start;
			u64 csum_end;
			LIST_HEAD(ordered_sums);
710

Yan Zheng's avatar
Yan Zheng committed
711
712
713
714
			/*
			 * is this extent already allocated in the extent
			 * allocation tree?  If so, just add a reference
			 */
715
			ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
Yan Zheng's avatar
Yan Zheng committed
716
717
						ins.offset);
			if (ret == 0) {
718
719
720
721
722
				btrfs_init_generic_ref(&ref,
						BTRFS_ADD_DELAYED_REF,
						ins.objectid, ins.offset, 0);
				btrfs_init_data_ref(&ref,
						root->root_key.objectid,
723
						key->objectid, offset);
724
				ret = btrfs_inc_extent_ref(trans, &ref);
725
726
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
727
728
729
730
731
			} else {
				/*
				 * insert the extent pointer in the extent
				 * allocation tree
				 */
732
				ret = btrfs_alloc_logged_file_extent(trans,
733
						root->root_key.objectid,
734
						key->objectid, offset, &ins);
735
736
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
737
			}
738
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
739
740
741
742
743
744
745
746
747
748
749
750
751

			if (btrfs_file_extent_compression(eb, item)) {
				csum_start = ins.objectid;
				csum_end = csum_start + ins.offset;
			} else {
				csum_start = ins.objectid +
					btrfs_file_extent_offset(eb, item);
				csum_end = csum_start +
					btrfs_file_extent_num_bytes(eb, item);
			}

			ret = btrfs_lookup_csums_range(root->log_root,
						csum_start, csum_end - 1,
Arne Jansen's avatar
Arne Jansen committed
752
						&ordered_sums, 0);
753
754
			if (ret)
				goto out;
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
			/*
			 * Now delete all existing cums in the csum root that
			 * cover our range. We do this because we can have an
			 * extent that is completely referenced by one file
			 * extent item and partially referenced by another
			 * file extent item (like after using the clone or
			 * extent_same ioctls). In this case if we end up doing
			 * the replay of the one that partially references the
			 * extent first, and we do not do the csum deletion
			 * below, we can get 2 csum items in the csum tree that
			 * overlap each other. For example, imagine our log has
			 * the two following file extent items:
			 *
			 * key (257 EXTENT_DATA 409600)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 20480 nr 20480 ram 102400
			 *
			 * key (257 EXTENT_DATA 819200)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 0 nr 102400 ram 102400
			 *
			 * Where the second one fully references the 100K extent
			 * that starts at disk byte 12845056, and the log tree
			 * has a single csum item that covers the entire range
			 * of the extent:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 *
			 * After the first file extent item is replayed, the
			 * csum tree gets the following csum item:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which covers the 20K sub-range starting at offset 20K
			 * of our extent. Now when we replay the second file
			 * extent item, if we do not delete existing csum items
			 * that cover any of its blocks, we end up getting two
			 * csum items in our csum tree that overlap each other:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which is a problem, because after this anyone trying
			 * to lookup up for the checksum of any block of our
			 * extent starting at an offset of 40K or higher, will
			 * end up looking at the second csum item only, which
			 * does not contain the checksum for any block starting
			 * at offset 40K or higher of our extent.
			 */
Yan Zheng's avatar
Yan Zheng committed
804
805
806
807
808
			while (!list_empty(&ordered_sums)) {
				struct btrfs_ordered_sum *sums;
				sums = list_entry(ordered_sums.next,
						struct btrfs_ordered_sum,
						list);
809
				if (!ret)
810
					ret = btrfs_del_csums(trans, fs_info,
811
812
							      sums->bytenr,
							      sums->len);
813
814
				if (!ret)
					ret = btrfs_csum_file_blocks(trans,
815
						fs_info->csum_root, sums);
Yan Zheng's avatar
Yan Zheng committed
816
817
818
				list_del(&sums->list);
				kfree(sums);
			}
819
820
			if (ret)
				goto out;
Yan Zheng's avatar
Yan Zheng committed
821
		} else {
822
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
823
824
825
826
		}
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		/* inline extents are easy, we just overwrite them */
		ret = overwrite_item(trans, root, path, eb, slot, key);
827
828
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
829
	}
830

831
	inode_add_bytes(inode, nbytes);
832
update_inode:
833
	ret = btrfs_update_inode(trans, root, inode);
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
out:
	if (inode)
		iput(inode);
	return ret;
}

/*
 * when cleaning up conflicts between the directory names in the
 * subvolume, directory names in the log and directory names in the
 * inode back references, we may have to unlink inodes from directories.
 *
 * This is a helper function to do the unlink of a specific directory
 * item
 */
static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
851
				      struct btrfs_inode *dir,
852
853
854
855
856
857
858
859
860
861
862
863
864
865
				      struct btrfs_dir_item *di)
{
	struct inode *inode;
	char *name;
	int name_len;
	struct extent_buffer *leaf;
	struct btrfs_key location;
	int ret;

	leaf = path->nodes[0];

	btrfs_dir_item_key_to_cpu(leaf, di, &location);
	name_len = btrfs_dir_name_len(leaf, di);
	name = kmalloc(name_len, GFP_NOFS);
866
867
868
	if (!name)
		return -ENOMEM;

869
	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
870
	btrfs_release_path(path);
871
872

	inode = read_one_inode(root, location.objectid);
873
	if (!inode) {
874
875
		ret = -EIO;
		goto out;
876
	}
877

878
	ret = link_to_fixup_dir(trans, root, path, location.objectid);
879
880
	if (ret)
		goto out;
881

882
883
	ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
			name_len);
884
885
	if (ret)
		goto out;
886
	else
887
		ret = btrfs_run_delayed_items(trans);
888
out:
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
	kfree(name);
	iput(inode);
	return ret;
}

/*
 * helper function to see if a given name and sequence number found
 * in an inode back reference are already in a directory and correctly
 * point to this inode
 */
static noinline int inode_in_dir(struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 dirid, u64 objectid, u64 index,
				 const char *name, int name_len)
{
	struct btrfs_dir_item *di;
	struct btrfs_key location;
	int match = 0;

	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
					 index, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
916
	btrfs_release_path(path);
917
918
919
920
921
922
923
924
925
926

	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
	match = 1;
out:
927
	btrfs_release_path(path);
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
	return match;
}

/*
 * helper function to check a log tree for a named back reference in
 * an inode.  This is used to decide if a back reference that is
 * found in the subvolume conflicts with what we find in the log.
 *
 * inode backreferences may have multiple refs in a single item,
 * during replay we process one reference at a time, and we don't
 * want to delete valid links to a file from the subvolume if that
 * link is also in the log.
 */
static noinline int backref_in_log(struct btrfs_root *log,
				   struct btrfs_key *key,
Mark Fasheh's avatar
Mark Fasheh committed
943
				   u64 ref_objectid,
944
				   const char *name, int namelen)
945
946
947
948
949
950
951
952
953
954
955
956
{
	struct btrfs_path *path;
	struct btrfs_inode_ref *ref;
	unsigned long ptr;
	unsigned long ptr_end;
	unsigned long name_ptr;
	int found_name_len;
	int item_size;
	int ret;
	int match = 0;

	path = btrfs_alloc_path();
957
958
959
	if (!path)
		return -ENOMEM;

960
961
962
963
964
	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
	if (ret != 0)
		goto out;

	ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
Mark Fasheh's avatar
Mark Fasheh committed
965
966

	if (key->type == BTRFS_INODE_EXTREF_KEY) {
967
968
969
		if (btrfs_find_name_in_ext_backref(path->nodes[0],
						   path->slots[0],
						   ref_objectid,
Mark Fasheh's avatar
Mark Fasheh committed
970
971
972
973
974
975
976
						   name, namelen, NULL))
			match = 1;

		goto out;
	}

	item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
	ptr_end = ptr + item_size;
	while (ptr < ptr_end) {
		ref = (struct btrfs_inode_ref *)ptr;
		found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
		if (found_name_len == namelen) {
			name_ptr = (unsigned long)(ref + 1);
			ret = memcmp_extent_buffer(path->nodes[0], name,
						   name_ptr, namelen);
			if (ret == 0) {
				match = 1;
				goto out;
			}
		}
		ptr = (unsigned long)(ref + 1) + found_name_len;
	}
out:
	btrfs_free_path(path);
	return match;
}

997
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
998
999
				  struct btrfs_root *root,
				  struct btrfs_path *path,
1000
				  struct btrfs_root *log_root,
1001
1002
				  struct btrfs_inode *dir,
				  struct btrfs_inode *inode,
Mark Fasheh's avatar
Mark Fasheh committed
1003
1004
1005
				  u64 inode_objectid, u64 parent_objectid,
				  u64 ref_index, char *name, int namelen,
				  int *search_done)
1006
{
liubo's avatar
liubo committed
1007
	int ret;
Mark Fasheh's avatar
Mark Fasheh committed
1008
1009
1010
	char *victim_name;
	int victim_name_len;
	struct extent_buffer *leaf;
1011
	struct btrfs_dir_item *di;
Mark Fasheh's avatar
Mark Fasheh committed
1012
1013
	struct btrfs_key search_key;
	struct btrfs_inode_extref *extref;
1014

Mark Fasheh's avatar
Mark Fasheh committed
1015
1016
1017
1018
1019
1020
again:
	/* Search old style refs */
	search_key.objectid = inode_objectid;
	search_key.type = BTRFS_INODE_REF_KEY;
	search_key.offset = parent_objectid;
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1021
1022
1023
1024
	if (ret == 0) {
		struct btrfs_inode_ref *victim_ref;
		unsigned long ptr;
		unsigned long ptr_end;
Mark Fasheh's avatar
Mark Fasheh committed
1025
1026

		leaf = path->nodes[0];
1027
1028
1029
1030

		/* are we trying to overwrite a back ref for the root directory
		 * if so, just jump out, we're done
		 */
Mark Fasheh's avatar
Mark Fasheh committed
1031
		if (search_key.objectid == search_key.offset)
1032
			return 1;
1033
1034
1035
1036
1037
1038
1039

		/* check all the names in this back reference to see
		 * if they are in the log.  if so, we allow them to stay
		 * otherwise they must be unlinked as a conflict
		 */
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1040
		while (ptr < ptr_end) {
1041
1042
1043
1044
			victim_ref = (struct btrfs_inode_ref *)ptr;
			victim_name_len = btrfs_inode_ref_name_len(leaf,
								   victim_ref);
			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1045
1046
			if (!victim_name)
				return -ENOMEM;
1047
1048
1049
1050
1051

			read_extent_buffer(leaf, victim_name,
					   (unsigned long)(victim_ref + 1),
					   victim_name_len);

Mark Fasheh's avatar
Mark Fasheh committed
1052
1053
1054
			if (!backref_in_log(log_root, &search_key,
					    parent_objectid,
					    victim_name,
1055
					    victim_name_len)) {
1056
				inc_nlink(&inode->vfs_inode);
1057
				btrfs_release_path(path);
1058

1059
				ret = btrfs_unlink_inode(trans, root, dir, inode,
1060
						victim_name, victim_name_len);
Mark Fasheh's avatar
Mark Fasheh committed
1061
				kfree(victim_name);
1062
1063
				if (ret)
					return ret;
1064
				ret = btrfs_run_delayed_items(trans);
1065
1066
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1067
1068
				*search_done = 1;
				goto again;
1069
1070
			}
			kfree(victim_name);
Mark Fasheh's avatar
Mark Fasheh committed
1071

1072
1073
1074
			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
		}

1075
1076
		/*
		 * NOTE: we have searched root tree and checked the
1077
		 * corresponding ref, it does not need to check again.
1078
		 */
1079
		*search_done = 1;
1080
	}
1081
	btrfs_release_path(path);
1082

Mark Fasheh's avatar
Mark Fasheh committed
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
	/* Same search but for extended refs */
	extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
					   inode_objectid, parent_objectid, 0,
					   0);
	if (!IS_ERR_OR_NULL(extref)) {
		u32 item_size;
		u32 cur_offset = 0;
		unsigned long base;
		struct inode *victim_parent;

		leaf = path->nodes[0];

		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		base = btrfs_item_ptr_offset(leaf, path->slots[0]);

		while (cur_offset < item_size) {
1099
			extref = (struct btrfs_inode_extref *)(base + cur_offset);
Mark Fasheh's avatar
Mark Fasheh committed
1100
1101
1102
1103
1104
1105
1106

			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);

			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
				goto next;

			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1107
1108
			if (!victim_name)
				return -ENOMEM;
Mark Fasheh's avatar
Mark Fasheh committed
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
					   victim_name_len);

			search_key.objectid = inode_objectid;
			search_key.type = BTRFS_INODE_EXTREF_KEY;
			search_key.offset = btrfs_extref_hash(parent_objectid,
							      victim_name,
							      victim_name_len);
			ret = 0;
			if (!backref_in_log(log_root, &search_key,
					    parent_objectid, victim_name,
					    victim_name_len)) {
				ret = -ENOENT;
				victim_parent = read_one_inode(root,
1123
						parent_objectid);
Mark Fasheh's avatar
Mark Fasheh committed
1124
				if (victim_parent) {
1125
					inc_nlink(&inode->vfs_inode);
Mark Fasheh's avatar
Mark Fasheh committed
1126
1127
1128
					btrfs_release_path(path);

					ret = btrfs_unlink_inode(trans, root,
1129
							BTRFS_I(victim_parent),
1130
							inode,
1131
1132
							victim_name,
							victim_name_len);
1133
1134
					if (!ret)
						ret = btrfs_run_delayed_items(
1135
								  trans);
Mark Fasheh's avatar
Mark Fasheh committed
1136
1137
1138
				}
				iput(victim_parent);
				kfree(victim_name);
1139
1140
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
				*search_done = 1;
				goto again;
			}
			kfree(victim_name);
next:
			cur_offset += victim_name_len + sizeof(*extref);
		}
		*search_done = 1;
	}
	btrfs_release_path(path);

liubo's avatar
liubo committed
1152
	/* look for a conflicting sequence number */
1153
	di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
Mark Fasheh's avatar
Mark Fasheh committed
1154
					 ref_index, name, namelen, 0);
liubo's avatar
liubo committed
1155
	if (di && !IS_ERR(di)) {
1156
		ret = drop_one_dir_item(trans, root, path, dir, di);
1157
1158
		if (ret)
			return ret;
liubo's avatar
liubo committed
1159
1160
1161
	}
	btrfs_release_path(path);

1162
	/* look for a conflicting name */
1163
	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
liubo's avatar
liubo committed
1164
1165
				   name, namelen, 0);
	if (di && !IS_ERR(di)) {
1166
		ret = drop_one_dir_item(trans, root, path, dir, di);
1167
1168
		if (ret)
			return ret;
liubo's avatar
liubo committed
1169
1170
1171
	}
	btrfs_release_path(path);

1172
1173
	return 0;
}
1174

1175
1176
1177
static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			     u32 *namelen, char **name, u64 *index,
			     u64 *parent_objectid)
Mark Fasheh's avatar
Mark Fasheh committed
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
{
	struct btrfs_inode_extref *extref;

	extref = (struct btrfs_inode_extref *)ref_ptr;

	*namelen = btrfs_inode_extref_name_len(eb, extref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)&extref->name,
			   *namelen);

1191
1192
	if (index)
		*index = btrfs_inode_extref_index(eb, extref);
Mark Fasheh's avatar
Mark Fasheh committed
1193
1194
1195
1196
1197
1198
	if (parent_objectid)
		*parent_objectid = btrfs_inode_extref_parent(eb, extref);

	return 0;
}

1199
1200
static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			  u32 *namelen, char **name, u64 *index)
Mark Fasheh's avatar
Mark Fasheh committed
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
{
	struct btrfs_inode_ref *ref;

	ref = (struct btrfs_inode_ref *)ref_ptr;

	*namelen = btrfs_inode_ref_name_len(eb, ref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);

1213
1214
	if (index)
		*index = btrfs_inode_ref_index(eb, ref);
Mark Fasheh's avatar
Mark Fasheh committed
1215
1216
1217
1218

	return 0;
}

1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
/*
 * Take an inode reference item from the log tree and iterate all names from the
 * inode reference item in the subvolume tree with the same key (if it exists).
 * For any name that is not in the inode reference item from the log tree, do a
 * proper unlink of that name (that is, remove its entry from the inode
 * reference item and both dir index keys).
 */
static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_inode *inode,
				 struct extent_buffer *log_eb,
				 int log_slot,
				 struct btrfs_key *key)
{
	int ret;
	unsigned long ref_ptr;
	unsigned long ref_end;
	struct extent_buffer *eb;

again:
	btrfs_release_path(path);
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (ret < 0)
		goto out;

	eb = path->nodes[0];
	ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
	ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
	while (ref_ptr < ref_end) {
		char *name = NULL;
		int namelen;
		u64 parent_id;

		if (key->type == BTRFS_INODE_EXTREF_KEY) {
			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
						NULL, &parent_id);
		} else {
			parent_id = key->offset;
			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
					     NULL);
		}
		if (ret)
			goto out;

		if (key->type == BTRFS_INODE_EXTREF_KEY)
			ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
							     parent_id, name,
							     namelen, NULL);
		else
			ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
							 namelen, NULL);

		if (!ret) {
			struct inode *dir;

			btrfs_release_path(path);
			dir = read_one_inode(root, parent_id);
			if (!dir) {
				ret = -ENOENT;
				kfree(name);
				goto out;
			}
			ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
						 inode, name, namelen);
			kfree(name);
			iput(dir);
			if (ret)
				goto out;
			goto again;
		}

		kfree(name);
		ref_ptr += namelen;
		if (key->type == BTRFS_INODE_EXTREF_KEY)
			ref_ptr += sizeof(struct btrfs_inode_extref);
		else
			ref_ptr += sizeof(struct btrfs_inode_ref);
	}
	ret = 0;
 out:
	btrfs_release_path(path);
	return ret;
}

1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
				  const u8 ref_type, const char *name,
				  const int namelen)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	const u64 parent_id = btrfs_ino(BTRFS_I(dir));
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = btrfs_ino(BTRFS_I(inode));
	key.type = ref_type;
	if (key.type == BTRFS_INODE_REF_KEY)
		key.offset = parent_id;
	else
		key.offset = btrfs_extref_hash(parent_id, name, namelen);

	ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (key.type == BTRFS_INODE_EXTREF_KEY)
		ret = btrfs_find_name_in_ext_backref(path->nodes[0],
						     path->slots[0], parent_id,
						     name, namelen, NULL);
	else
		ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
						 name, namelen, NULL);

out:
	btrfs_free_path(path);
	return ret;
}

1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		    struct inode *dir, struct inode *inode, const char *name,
		    int namelen, u64 ref_index)
{
	struct btrfs_dir_item *dir_item;
	struct btrfs_key key;
	struct btrfs_path *path;
	struct inode *other_inode = NULL;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	dir_item = btrfs_lookup_dir_item(NULL, root, path,
					 btrfs_ino(BTRFS_I(dir)),
					 name, namelen, 0);
	if (!dir_item) {
		btrfs_release_path(path);
		goto add_link;
	} else if (IS_ERR(dir_item)) {
		ret = PTR_ERR(dir_item);
		goto out;
	}

	/*
	 * Our inode's dentry collides with the dentry of another inode which is
	 * in the log but not yet processed since it has a higher inode number.
	 * So delete that other dentry.
	 */
	btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
	btrfs_release_path(path);
	other_inode = read_one_inode(root, key.objectid);
	if (!other_inode) {
		ret = -ENOENT;
		goto out;
	}
	ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
				 name, namelen);
	if (ret)
		goto out;
	/*
	 * If we dropped the link count to 0, bump it so that later the iput()
	 * on the inode will not free it. We will fixup the link count later.
	 */
	if (other_inode->i_nlink == 0)
		inc_nlink(other_inode);

	ret = btrfs_run_delayed_items(trans);
	if (ret)
		goto out;
add_link:
	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
			     name, namelen, 0, ref_index);
out:
	iput(other_inode);
	btrfs_free_path(path);

	return ret;
}

1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
/*
 * replay one inode back reference item found in the log tree.
 * eb, slot and key refer to the buffer and key found in the log tree.
 * root is the destination we are replaying into, and path is for temp
 * use by this function.  (it should be released on return).
 */
static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  struct btrfs_root *log,
				  struct btrfs_path *path,
				  struct extent_buffer *eb, int slot,
				  struct btrfs_key *key)
{