tree-log.c 175 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
3
4
5
6
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/sched.h>
7
#include <linux/slab.h>
8
#include <linux/blkdev.h>
Josef Bacik's avatar
Josef Bacik committed
9
#include <linux/list_sort.h>
10
#include <linux/iversion.h>
11
#include "misc.h"
12
#include "ctree.h"
13
#include "tree-log.h"
14
15
16
#include "disk-io.h"
#include "locking.h"
#include "print-tree.h"
Mark Fasheh's avatar
Mark Fasheh committed
17
#include "backref.h"
18
#include "compression.h"
19
#include "qgroup.h"
20
#include "inode-map.h"
21
22
23
24
25
26
27

/* magic values for the inode_only field in btrfs_log_inode:
 *
 * LOG_INODE_ALL means to log everything
 * LOG_INODE_EXISTS means to log just enough to recreate the inode
 * during log replay
 */
28
29
30
31
32
33
enum {
	LOG_INODE_ALL,
	LOG_INODE_EXISTS,
	LOG_OTHER_INODE,
	LOG_OTHER_INODE_ALL,
};
34

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
/*
 * directory trouble cases
 *
 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
 * log, we must force a full commit before doing an fsync of the directory
 * where the unlink was done.
 * ---> record transid of last unlink/rename per directory
 *
 * mkdir foo/some_dir
 * normal commit
 * rename foo/some_dir foo2/some_dir
 * mkdir foo/some_dir
 * fsync foo/some_dir/some_file
 *
 * The fsync above will unlink the original some_dir without recording
 * it in its new location (foo2).  After a crash, some_dir will be gone
 * unless the fsync of some_file forces a full commit
 *
 * 2) we must log any new names for any file or dir that is in the fsync
 * log. ---> check inode while renaming/linking.
 *
 * 2a) we must log any new names for any file or dir during rename
 * when the directory they are being removed from was logged.
 * ---> check inode and old parent dir during rename
 *
 *  2a is actually the more important variant.  With the extra logging
 *  a crash might unlink the old name without recreating the new one
 *
 * 3) after a crash, we must go through any directories with a link count
 * of zero and redo the rm -rf
 *
 * mkdir f1/foo
 * normal commit
 * rm -rf f1/foo
 * fsync(f1)
 *
 * The directory f1 was fully removed from the FS, but fsync was never
 * called on f1, only its parent dir.  After a crash the rm -rf must
 * be replayed.  This must be able to recurse down the entire
 * directory tree.  The inode link count fixup code takes care of the
 * ugly details.
 */

78
79
80
81
82
83
84
85
86
/*
 * stages for the tree walking.  The first
 * stage (0) is to only pin down the blocks we find
 * the second stage (1) is to make sure that all the inodes
 * we find in the log are created in the subvolume.
 *
 * The last stage is to deal with directories and links and extents
 * and all the other fun semantics
 */
87
88
89
90
91
92
enum {
	LOG_WALK_PIN_ONLY,
	LOG_WALK_REPLAY_INODES,
	LOG_WALK_REPLAY_DIR_INDEX,
	LOG_WALK_REPLAY_ALL,
};
93

94
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95
			   struct btrfs_root *root, struct btrfs_inode *inode,
96
97
			   int inode_only,
			   const loff_t start,
98
99
			   const loff_t end,
			   struct btrfs_log_ctx *ctx);
100
101
102
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     struct btrfs_path *path, u64 objectid);
103
104
105
106
107
static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct btrfs_root *log,
				       struct btrfs_path *path,
				       u64 dirid, int del_all);
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137

/*
 * tree logging is a special write ahead log used to make sure that
 * fsyncs and O_SYNCs can happen without doing full tree commits.
 *
 * Full tree commits are expensive because they require commonly
 * modified blocks to be recowed, creating many dirty pages in the
 * extent tree an 4x-6x higher write load than ext3.
 *
 * Instead of doing a tree commit on every fsync, we use the
 * key ranges and transaction ids to find items for a given file or directory
 * that have changed in this transaction.  Those items are copied into
 * a special tree (one per subvolume root), that tree is written to disk
 * and then the fsync is considered complete.
 *
 * After a crash, items are copied out of the log-tree back into the
 * subvolume tree.  Any file data extents found are recorded in the extent
 * allocation tree, and the log-tree freed.
 *
 * The log tree is read three times, once to pin down all the extents it is
 * using in ram and once, once to create all the inodes logged in the tree
 * and once to do all the other items.
 */

/*
 * start a sub transaction and setup the log tree
 * this increments the log tree writer count to make the people
 * syncing the tree wait for us to finish
 */
static int start_log_trans(struct btrfs_trans_handle *trans,
138
139
			   struct btrfs_root *root,
			   struct btrfs_log_ctx *ctx)
140
{
141
	struct btrfs_fs_info *fs_info = root->fs_info;
142
	int ret = 0;
Yan Zheng's avatar
Yan Zheng committed
143
144

	mutex_lock(&root->log_mutex);
145

Yan Zheng's avatar
Yan Zheng committed
146
	if (root->log_root) {
147
		if (btrfs_need_log_full_commit(trans)) {
148
149
150
			ret = -EAGAIN;
			goto out;
		}
151

152
		if (!root->log_start_pid) {
153
			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154
			root->log_start_pid = current->pid;
155
		} else if (root->log_start_pid != current->pid) {
156
			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
157
		}
158
	} else {
159
160
161
162
		mutex_lock(&fs_info->tree_log_mutex);
		if (!fs_info->log_root_tree)
			ret = btrfs_init_log_root_tree(trans, fs_info);
		mutex_unlock(&fs_info->tree_log_mutex);
163
164
		if (ret)
			goto out;
165

166
		ret = btrfs_add_log_tree(trans, root);
167
		if (ret)
168
			goto out;
169
170
171

		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
		root->log_start_pid = current->pid;
172
	}
173

Miao Xie's avatar
Miao Xie committed
174
	atomic_inc(&root->log_batch);
Yan Zheng's avatar
Yan Zheng committed
175
	atomic_inc(&root->log_writers);
176
	if (ctx) {
177
		int index = root->log_transid % 2;
178
		list_add_tail(&ctx->list, &root->log_ctxs[index]);
179
		ctx->log_transid = root->log_transid;
180
	}
181

182
out:
Yan Zheng's avatar
Yan Zheng committed
183
	mutex_unlock(&root->log_mutex);
184
	return ret;
185
186
187
188
189
190
191
192
193
194
195
}

/*
 * returns 0 if there was a log transaction running and we were able
 * to join, or returns -ENOENT if there were not transactions
 * in progress
 */
static int join_running_log_trans(struct btrfs_root *root)
{
	int ret = -ENOENT;

Yan Zheng's avatar
Yan Zheng committed
196
	mutex_lock(&root->log_mutex);
197
198
	if (root->log_root) {
		ret = 0;
Yan Zheng's avatar
Yan Zheng committed
199
		atomic_inc(&root->log_writers);
200
	}
Yan Zheng's avatar
Yan Zheng committed
201
	mutex_unlock(&root->log_mutex);
202
203
204
	return ret;
}

205
206
207
208
209
/*
 * This either makes the current running log transaction wait
 * until you call btrfs_end_log_trans() or it makes any future
 * log transactions wait until you call btrfs_end_log_trans()
 */
210
void btrfs_pin_log_trans(struct btrfs_root *root)
211
212
213
214
215
216
{
	mutex_lock(&root->log_mutex);
	atomic_inc(&root->log_writers);
	mutex_unlock(&root->log_mutex);
}

217
218
219
220
/*
 * indicate we're done making changes to the log tree
 * and wake up anyone waiting to do a sync
 */
221
void btrfs_end_log_trans(struct btrfs_root *root)
222
{
Yan Zheng's avatar
Yan Zheng committed
223
	if (atomic_dec_and_test(&root->log_writers)) {
224
225
		/* atomic_dec_and_test implies a barrier */
		cond_wake_up_nomb(&root->log_writer_wait);
Yan Zheng's avatar
Yan Zheng committed
226
	}
227
228
}

229
230
231
232
233
234
235
236
237
238
239
static int btrfs_write_tree_block(struct extent_buffer *buf)
{
	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
					buf->start + buf->len - 1);
}

static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
	filemap_fdatawait_range(buf->pages[0]->mapping,
			        buf->start, buf->start + buf->len - 1);
}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270

/*
 * the walk control struct is used to pass state down the chain when
 * processing the log tree.  The stage field tells us which part
 * of the log tree processing we are currently doing.  The others
 * are state fields used for that specific part
 */
struct walk_control {
	/* should we free the extent on disk when done?  This is used
	 * at transaction commit time while freeing a log tree
	 */
	int free;

	/* should we write out the extent buffer?  This is used
	 * while flushing the log tree to disk during a sync
	 */
	int write;

	/* should we wait for the extent buffer io to finish?  Also used
	 * while flushing the log tree to disk for a sync
	 */
	int wait;

	/* pin only walk, we record which extents on disk belong to the
	 * log trees
	 */
	int pin;

	/* what stage of the replay code we're currently in */
	int stage;

271
272
273
274
275
276
277
	/*
	 * Ignore any items from the inode currently being processed. Needs
	 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
	 * the LOG_WALK_REPLAY_INODES stage.
	 */
	bool ignore_cur_inode;

278
279
280
281
282
283
284
285
286
287
288
289
	/* the root we are currently replaying */
	struct btrfs_root *replay_dest;

	/* the trans handle for the current replay */
	struct btrfs_trans_handle *trans;

	/* the function that gets used to process blocks we find in the
	 * tree.  Note the extent_buffer might not be up to date when it is
	 * passed in, and it must be checked or read if you need the data
	 * inside it
	 */
	int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290
			    struct walk_control *wc, u64 gen, int level);
291
292
293
294
295
296
297
};

/*
 * process_func used to pin down extents, write them or wait on them
 */
static int process_one_buffer(struct btrfs_root *log,
			      struct extent_buffer *eb,
298
			      struct walk_control *wc, u64 gen, int level)
299
{
300
	struct btrfs_fs_info *fs_info = log->fs_info;
301
302
	int ret = 0;

303
304
305
306
	/*
	 * If this fs is mixed then we need to be able to process the leaves to
	 * pin down any logged extents, so we have to read the block.
	 */
307
	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
308
		ret = btrfs_read_buffer(eb, gen, level, NULL);
309
310
311
312
		if (ret)
			return ret;
	}

Josef Bacik's avatar
Josef Bacik committed
313
	if (wc->pin)
314
315
		ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
						      eb->len);
316

317
	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
318
		if (wc->pin && btrfs_header_level(eb) == 0)
319
			ret = btrfs_exclude_logged_extents(eb);
320
321
322
323
324
		if (wc->write)
			btrfs_write_tree_block(eb);
		if (wc->wait)
			btrfs_wait_tree_block_writeback(eb);
	}
325
	return ret;
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
}

/*
 * Item overwrite used by replay and tree logging.  eb, slot and key all refer
 * to the src data we are copying out.
 *
 * root is the tree we are copying into, and path is a scratch
 * path for use in this function (it should be released on entry and
 * will be released on exit).
 *
 * If the key is already in the destination tree the existing item is
 * overwritten.  If the existing item isn't big enough, it is extended.
 * If it is too large, it is truncated.
 *
 * If the key isn't in the destination yet, a new item is inserted.
 */
static noinline int overwrite_item(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct extent_buffer *eb, int slot,
				   struct btrfs_key *key)
{
	int ret;
	u32 item_size;
	u64 saved_i_size = 0;
	int save_old_i_size = 0;
	unsigned long src_ptr;
	unsigned long dst_ptr;
	int overwrite_root = 0;
355
	bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356
357
358
359
360
361
362
363
364

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
		overwrite_root = 1;

	item_size = btrfs_item_size_nr(eb, slot);
	src_ptr = btrfs_item_ptr_offset(eb, slot);

	/* look for the key in the destination tree */
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
365
366
367
	if (ret < 0)
		return ret;

368
369
370
371
372
373
374
375
376
	if (ret == 0) {
		char *src_copy;
		char *dst_copy;
		u32 dst_size = btrfs_item_size_nr(path->nodes[0],
						  path->slots[0]);
		if (dst_size != item_size)
			goto insert;

		if (item_size == 0) {
377
			btrfs_release_path(path);
378
379
380
381
			return 0;
		}
		dst_copy = kmalloc(item_size, GFP_NOFS);
		src_copy = kmalloc(item_size, GFP_NOFS);
382
		if (!dst_copy || !src_copy) {
383
			btrfs_release_path(path);
384
385
386
387
			kfree(dst_copy);
			kfree(src_copy);
			return -ENOMEM;
		}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404

		read_extent_buffer(eb, src_copy, src_ptr, item_size);

		dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
		read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
				   item_size);
		ret = memcmp(dst_copy, src_copy, item_size);

		kfree(dst_copy);
		kfree(src_copy);
		/*
		 * they have the same contents, just return, this saves
		 * us from cowing blocks in the destination tree and doing
		 * extra writes that may not have been done by a previous
		 * sync
		 */
		if (ret == 0) {
405
			btrfs_release_path(path);
406
407
408
			return 0;
		}

409
410
411
412
413
414
415
		/*
		 * We need to load the old nbytes into the inode so when we
		 * replay the extents we've logged we get the right nbytes.
		 */
		if (inode_item) {
			struct btrfs_inode_item *item;
			u64 nbytes;
416
			u32 mode;
417
418
419
420
421
422
423

			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
					      struct btrfs_inode_item);
			nbytes = btrfs_inode_nbytes(path->nodes[0], item);
			item = btrfs_item_ptr(eb, slot,
					      struct btrfs_inode_item);
			btrfs_set_inode_nbytes(eb, item, nbytes);
424
425
426
427
428
429
430
431
432

			/*
			 * If this is a directory we need to reset the i_size to
			 * 0 so that we can set it up properly when replaying
			 * the rest of the items in this log.
			 */
			mode = btrfs_inode_mode(eb, item);
			if (S_ISDIR(mode))
				btrfs_set_inode_size(eb, item, 0);
433
434
435
		}
	} else if (inode_item) {
		struct btrfs_inode_item *item;
436
		u32 mode;
437
438
439
440
441
442
443

		/*
		 * New inode, set nbytes to 0 so that the nbytes comes out
		 * properly when we replay the extents.
		 */
		item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
		btrfs_set_inode_nbytes(eb, item, 0);
444
445
446
447
448
449
450
451
452

		/*
		 * If this is a directory we need to reset the i_size to 0 so
		 * that we can set it up properly when replaying the rest of
		 * the items in this log.
		 */
		mode = btrfs_inode_mode(eb, item);
		if (S_ISDIR(mode))
			btrfs_set_inode_size(eb, item, 0);
453
454
	}
insert:
455
	btrfs_release_path(path);
456
	/* try to insert the key into the destination tree */
457
	path->skip_release_on_error = 1;
458
459
	ret = btrfs_insert_empty_item(trans, root, path,
				      key, item_size);
460
	path->skip_release_on_error = 0;
461
462

	/* make sure any existing item is the correct size */
463
	if (ret == -EEXIST || ret == -EOVERFLOW) {
464
465
466
		u32 found_size;
		found_size = btrfs_item_size_nr(path->nodes[0],
						path->slots[0]);
467
		if (found_size > item_size)
468
			btrfs_truncate_item(path, item_size, 1);
469
		else if (found_size < item_size)
470
			btrfs_extend_item(path, item_size - found_size);
471
	} else if (ret) {
472
		return ret;
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
	}
	dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
					path->slots[0]);

	/* don't overwrite an existing inode if the generation number
	 * was logged as zero.  This is done when the tree logging code
	 * is just logging an inode to make sure it exists after recovery.
	 *
	 * Also, don't overwrite i_size on directories during replay.
	 * log replay inserts and removes directory items based on the
	 * state of the tree found in the subvolume, and i_size is modified
	 * as it goes
	 */
	if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
		struct btrfs_inode_item *src_item;
		struct btrfs_inode_item *dst_item;

		src_item = (struct btrfs_inode_item *)src_ptr;
		dst_item = (struct btrfs_inode_item *)dst_ptr;

493
494
		if (btrfs_inode_generation(eb, src_item) == 0) {
			struct extent_buffer *dst_eb = path->nodes[0];
495
			const u64 ino_size = btrfs_inode_size(eb, src_item);
496

497
498
499
500
501
502
503
			/*
			 * For regular files an ino_size == 0 is used only when
			 * logging that an inode exists, as part of a directory
			 * fsync, and the inode wasn't fsynced before. In this
			 * case don't set the size of the inode in the fs/subvol
			 * tree, otherwise we would be throwing valid data away.
			 */
504
			if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
505
506
			    S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
			    ino_size != 0) {
507
508
				struct btrfs_map_token token;

509
				btrfs_init_map_token(&token, dst_eb);
510
511
512
				btrfs_set_token_inode_size(dst_eb, dst_item,
							   ino_size, &token);
			}
513
			goto no_copy;
514
		}
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544

		if (overwrite_root &&
		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
		    S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
			save_old_i_size = 1;
			saved_i_size = btrfs_inode_size(path->nodes[0],
							dst_item);
		}
	}

	copy_extent_buffer(path->nodes[0], eb, dst_ptr,
			   src_ptr, item_size);

	if (save_old_i_size) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
	}

	/* make sure the generation is filled in */
	if (key->type == BTRFS_INODE_ITEM_KEY) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
			btrfs_set_inode_generation(path->nodes[0], dst_item,
						   trans->transid);
		}
	}
no_copy:
	btrfs_mark_buffer_dirty(path->nodes[0]);
545
	btrfs_release_path(path);
546
547
548
549
550
551
552
553
554
555
	return 0;
}

/*
 * simple helper to read an inode off the disk from a given root
 * This can only be called for subvolume roots and not for the log
 */
static noinline struct inode *read_one_inode(struct btrfs_root *root,
					     u64 objectid)
{
556
	struct btrfs_key key;
557
558
	struct inode *inode;

559
560
561
	key.objectid = objectid;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;
562
	inode = btrfs_iget(root->fs_info->sb, &key, root);
563
	if (IS_ERR(inode))
564
		inode = NULL;
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
	return inode;
}

/* replays a single extent in 'eb' at 'slot' with 'key' into the
 * subvolume 'root'.  path is released on entry and should be released
 * on exit.
 *
 * extents in the log tree have not been allocated out of the extent
 * tree yet.  So, this completes the allocation, taking a reference
 * as required if the extent already exists or creating a new extent
 * if it isn't in the extent allocation tree yet.
 *
 * The extent is inserted into the file, dropping any existing extents
 * from the file that overlap the new one.
 */
static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      struct extent_buffer *eb, int slot,
				      struct btrfs_key *key)
{
586
	struct btrfs_fs_info *fs_info = root->fs_info;
587
588
589
	int found_type;
	u64 extent_end;
	u64 start = key->offset;
590
	u64 nbytes = 0;
591
592
593
594
595
596
597
598
	struct btrfs_file_extent_item *item;
	struct inode *inode = NULL;
	unsigned long size;
	int ret = 0;

	item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(eb, item);

Yan Zheng's avatar
Yan Zheng committed
599
	if (found_type == BTRFS_FILE_EXTENT_REG ||
600
601
602
603
604
605
606
607
608
609
610
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
		nbytes = btrfs_file_extent_num_bytes(eb, item);
		extent_end = start + nbytes;

		/*
		 * We don't add to the inodes nbytes if we are prealloc or a
		 * hole.
		 */
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
			nbytes = 0;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
611
		size = btrfs_file_extent_ram_bytes(eb, item);
612
		nbytes = btrfs_file_extent_ram_bytes(eb, item);
613
		extent_end = ALIGN(start + size,
614
				   fs_info->sectorsize);
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
	} else {
		ret = 0;
		goto out;
	}

	inode = read_one_inode(root, key->objectid);
	if (!inode) {
		ret = -EIO;
		goto out;
	}

	/*
	 * first check to see if we already have this extent in the
	 * file.  This must be done before the btrfs_drop_extents run
	 * so we don't try to drop this extent.
	 */
631
632
	ret = btrfs_lookup_file_extent(trans, root, path,
			btrfs_ino(BTRFS_I(inode)), start, 0);
633

Yan Zheng's avatar
Yan Zheng committed
634
635
636
	if (ret == 0 &&
	    (found_type == BTRFS_FILE_EXTENT_REG ||
	     found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
		struct btrfs_file_extent_item cmp1;
		struct btrfs_file_extent_item cmp2;
		struct btrfs_file_extent_item *existing;
		struct extent_buffer *leaf;

		leaf = path->nodes[0];
		existing = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_file_extent_item);

		read_extent_buffer(eb, &cmp1, (unsigned long)item,
				   sizeof(cmp1));
		read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
				   sizeof(cmp2));

		/*
		 * we already have a pointer to this exact extent,
		 * we don't have to do anything
		 */
		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
656
			btrfs_release_path(path);
657
658
659
			goto out;
		}
	}
660
	btrfs_release_path(path);
661
662

	/* drop any overlapping extents */
663
	ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
664
665
	if (ret)
		goto out;
666

Yan Zheng's avatar
Yan Zheng committed
667
668
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
669
		u64 offset;
Yan Zheng's avatar
Yan Zheng committed
670
671
672
		unsigned long dest_offset;
		struct btrfs_key ins;

673
674
675
676
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
		    btrfs_fs_incompat(fs_info, NO_HOLES))
			goto update_inode;

Yan Zheng's avatar
Yan Zheng committed
677
678
		ret = btrfs_insert_empty_item(trans, root, path, key,
					      sizeof(*item));
679
680
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
681
682
683
684
685
686
687
688
		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
						    path->slots[0]);
		copy_extent_buffer(path->nodes[0], eb, dest_offset,
				(unsigned long)item,  sizeof(*item));

		ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
		ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
		ins.type = BTRFS_EXTENT_ITEM_KEY;
689
		offset = key->offset - btrfs_file_extent_offset(eb, item);
Yan Zheng's avatar
Yan Zheng committed
690

691
692
693
694
695
696
697
698
		/*
		 * Manually record dirty extent, as here we did a shallow
		 * file extent item copy and skip normal backref update,
		 * but modifying extent tree all by ourselves.
		 * So need to manually record dirty extent for qgroup,
		 * as the owner of the file extent changed from log tree
		 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
		 */
699
		ret = btrfs_qgroup_trace_extent(trans,
700
701
702
703
704
705
				btrfs_file_extent_disk_bytenr(eb, item),
				btrfs_file_extent_disk_num_bytes(eb, item),
				GFP_NOFS);
		if (ret < 0)
			goto out;

Yan Zheng's avatar
Yan Zheng committed
706
		if (ins.objectid > 0) {
707
			struct btrfs_ref ref = { 0 };
Yan Zheng's avatar
Yan Zheng committed
708
709
710
			u64 csum_start;
			u64 csum_end;
			LIST_HEAD(ordered_sums);
711

Yan Zheng's avatar
Yan Zheng committed
712
713
714
715
			/*
			 * is this extent already allocated in the extent
			 * allocation tree?  If so, just add a reference
			 */
716
			ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
Yan Zheng's avatar
Yan Zheng committed
717
718
						ins.offset);
			if (ret == 0) {
719
720
721
722
723
				btrfs_init_generic_ref(&ref,
						BTRFS_ADD_DELAYED_REF,
						ins.objectid, ins.offset, 0);
				btrfs_init_data_ref(&ref,
						root->root_key.objectid,
724
						key->objectid, offset);
725
				ret = btrfs_inc_extent_ref(trans, &ref);
726
727
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
728
729
730
731
732
			} else {
				/*
				 * insert the extent pointer in the extent
				 * allocation tree
				 */
733
				ret = btrfs_alloc_logged_file_extent(trans,
734
						root->root_key.objectid,
735
						key->objectid, offset, &ins);
736
737
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
738
			}
739
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
740
741
742
743
744
745
746
747
748
749
750
751
752

			if (btrfs_file_extent_compression(eb, item)) {
				csum_start = ins.objectid;
				csum_end = csum_start + ins.offset;
			} else {
				csum_start = ins.objectid +
					btrfs_file_extent_offset(eb, item);
				csum_end = csum_start +
					btrfs_file_extent_num_bytes(eb, item);
			}

			ret = btrfs_lookup_csums_range(root->log_root,
						csum_start, csum_end - 1,
Arne Jansen's avatar
Arne Jansen committed
753
						&ordered_sums, 0);
754
755
			if (ret)
				goto out;
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
			/*
			 * Now delete all existing cums in the csum root that
			 * cover our range. We do this because we can have an
			 * extent that is completely referenced by one file
			 * extent item and partially referenced by another
			 * file extent item (like after using the clone or
			 * extent_same ioctls). In this case if we end up doing
			 * the replay of the one that partially references the
			 * extent first, and we do not do the csum deletion
			 * below, we can get 2 csum items in the csum tree that
			 * overlap each other. For example, imagine our log has
			 * the two following file extent items:
			 *
			 * key (257 EXTENT_DATA 409600)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 20480 nr 20480 ram 102400
			 *
			 * key (257 EXTENT_DATA 819200)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 0 nr 102400 ram 102400
			 *
			 * Where the second one fully references the 100K extent
			 * that starts at disk byte 12845056, and the log tree
			 * has a single csum item that covers the entire range
			 * of the extent:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 *
			 * After the first file extent item is replayed, the
			 * csum tree gets the following csum item:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which covers the 20K sub-range starting at offset 20K
			 * of our extent. Now when we replay the second file
			 * extent item, if we do not delete existing csum items
			 * that cover any of its blocks, we end up getting two
			 * csum items in our csum tree that overlap each other:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which is a problem, because after this anyone trying
			 * to lookup up for the checksum of any block of our
			 * extent starting at an offset of 40K or higher, will
			 * end up looking at the second csum item only, which
			 * does not contain the checksum for any block starting
			 * at offset 40K or higher of our extent.
			 */
Yan Zheng's avatar
Yan Zheng committed
805
806
807
808
809
			while (!list_empty(&ordered_sums)) {
				struct btrfs_ordered_sum *sums;
				sums = list_entry(ordered_sums.next,
						struct btrfs_ordered_sum,
						list);
810
				if (!ret)
811
812
					ret = btrfs_del_csums(trans,
							      fs_info->csum_root,
813
814
							      sums->bytenr,
							      sums->len);
815
816
				if (!ret)
					ret = btrfs_csum_file_blocks(trans,
817
						fs_info->csum_root, sums);
Yan Zheng's avatar
Yan Zheng committed
818
819
820
				list_del(&sums->list);
				kfree(sums);
			}
821
822
			if (ret)
				goto out;
Yan Zheng's avatar
Yan Zheng committed
823
		} else {
824
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
825
826
827
828
		}
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		/* inline extents are easy, we just overwrite them */
		ret = overwrite_item(trans, root, path, eb, slot, key);
829
830
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
831
	}
832

833
	inode_add_bytes(inode, nbytes);
834
update_inode:
835
	ret = btrfs_update_inode(trans, root, inode);
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
out:
	if (inode)
		iput(inode);
	return ret;
}

/*
 * when cleaning up conflicts between the directory names in the
 * subvolume, directory names in the log and directory names in the
 * inode back references, we may have to unlink inodes from directories.
 *
 * This is a helper function to do the unlink of a specific directory
 * item
 */
static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
853
				      struct btrfs_inode *dir,
854
855
856
857
858
859
860
861
862
863
864
865
866
867
				      struct btrfs_dir_item *di)
{
	struct inode *inode;
	char *name;
	int name_len;
	struct extent_buffer *leaf;
	struct btrfs_key location;
	int ret;

	leaf = path->nodes[0];

	btrfs_dir_item_key_to_cpu(leaf, di, &location);
	name_len = btrfs_dir_name_len(leaf, di);
	name = kmalloc(name_len, GFP_NOFS);
868
869
870
	if (!name)
		return -ENOMEM;

871
	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
872
	btrfs_release_path(path);
873
874

	inode = read_one_inode(root, location.objectid);
875
	if (!inode) {
876
877
		ret = -EIO;
		goto out;
878
	}
879

880
	ret = link_to_fixup_dir(trans, root, path, location.objectid);
881
882
	if (ret)
		goto out;
883

884
885
	ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
			name_len);
886
887
	if (ret)
		goto out;
888
	else
889
		ret = btrfs_run_delayed_items(trans);
890
out:
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
	kfree(name);
	iput(inode);
	return ret;
}

/*
 * helper function to see if a given name and sequence number found
 * in an inode back reference are already in a directory and correctly
 * point to this inode
 */
static noinline int inode_in_dir(struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 dirid, u64 objectid, u64 index,
				 const char *name, int name_len)
{
	struct btrfs_dir_item *di;
	struct btrfs_key location;
	int match = 0;

	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
					 index, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
918
	btrfs_release_path(path);
919
920
921
922
923
924
925
926
927
928

	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
	match = 1;
out:
929
	btrfs_release_path(path);
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
	return match;
}

/*
 * helper function to check a log tree for a named back reference in
 * an inode.  This is used to decide if a back reference that is
 * found in the subvolume conflicts with what we find in the log.
 *
 * inode backreferences may have multiple refs in a single item,
 * during replay we process one reference at a time, and we don't
 * want to delete valid links to a file from the subvolume if that
 * link is also in the log.
 */
static noinline int backref_in_log(struct btrfs_root *log,
				   struct btrfs_key *key,
Mark Fasheh's avatar
Mark Fasheh committed
945
				   u64 ref_objectid,
946
				   const char *name, int namelen)
947
948
949
950
951
{
	struct btrfs_path *path;
	int ret;

	path = btrfs_alloc_path();
952
953
954
	if (!path)
		return -ENOMEM;

955
	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
956
957
958
	if (ret < 0) {
		goto out;
	} else if (ret == 1) {
959
		ret = 0;
Mark Fasheh's avatar
Mark Fasheh committed
960
961
962
		goto out;
	}

963
964
965
966
967
968
969
970
971
	if (key->type == BTRFS_INODE_EXTREF_KEY)
		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
						       path->slots[0],
						       ref_objectid,
						       name, namelen);
	else
		ret = !!btrfs_find_name_in_backref(path->nodes[0],
						   path->slots[0],
						   name, namelen);
972
973
out:
	btrfs_free_path(path);
974
	return ret;
975
976
}

977
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
978
979
				  struct btrfs_root *root,
				  struct btrfs_path *path,
980
				  struct btrfs_root *log_root,
981
982
				  struct btrfs_inode *dir,
				  struct btrfs_inode *inode,
Mark Fasheh's avatar
Mark Fasheh committed
983
984
985
				  u64 inode_objectid, u64 parent_objectid,
				  u64 ref_index, char *name, int namelen,
				  int *search_done)
986
{
liubo's avatar
liubo committed
987
	int ret;
Mark Fasheh's avatar
Mark Fasheh committed
988
989
990
	char *victim_name;
	int victim_name_len;
	struct extent_buffer *leaf;
991
	struct btrfs_dir_item *di;
Mark Fasheh's avatar
Mark Fasheh committed
992
993
	struct btrfs_key search_key;
	struct btrfs_inode_extref *extref;
994

Mark Fasheh's avatar
Mark Fasheh committed
995
996
997
998
999
1000
again:
	/* Search old style refs */
	search_key.objectid = inode_objectid;
	search_key.type = BTRFS_INODE_REF_KEY;
	search_key.offset = parent_objectid;
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1001
1002
1003
1004
	if (ret == 0) {
		struct btrfs_inode_ref *victim_ref;
		unsigned long ptr;
		unsigned long ptr_end;
Mark Fasheh's avatar
Mark Fasheh committed
1005
1006

		leaf = path->nodes[0];
1007
1008
1009
1010

		/* are we trying to overwrite a back ref for the root directory
		 * if so, just jump out, we're done
		 */
Mark Fasheh's avatar
Mark Fasheh committed
1011
		if (search_key.objectid == search_key.offset)
1012
			return 1;
1013
1014
1015
1016
1017
1018
1019

		/* check all the names in this back reference to see
		 * if they are in the log.  if so, we allow them to stay
		 * otherwise they must be unlinked as a conflict
		 */
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1020
		while (ptr < ptr_end) {
1021
1022
1023
1024
			victim_ref = (struct btrfs_inode_ref *)ptr;
			victim_name_len = btrfs_inode_ref_name_len(leaf,
								   victim_ref);
			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1025
1026
			if (!victim_name)
				return -ENOMEM;
1027
1028
1029
1030
1031

			read_extent_buffer(leaf, victim_name,
					   (unsigned long)(victim_ref + 1),
					   victim_name_len);

1032
1033
1034
1035
1036
1037
1038
			ret = backref_in_log(log_root, &search_key,
					     parent_objectid, victim_name,
					     victim_name_len);
			if (ret < 0) {
				kfree(victim_name);
				return ret;
			} else if (!ret) {
1039
				inc_nlink(&inode->vfs_inode);
1040
				btrfs_release_path(path);
1041

1042
				ret = btrfs_unlink_inode(trans, root, dir, inode,
1043
						victim_name, victim_name_len);
Mark Fasheh's avatar
Mark Fasheh committed
1044
				kfree(victim_name);
1045
1046
				if (ret)
					return ret;
1047
				ret = btrfs_run_delayed_items(trans);
1048
1049
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1050
1051
				*search_done = 1;
				goto again;
1052
1053
			}
			kfree(victim_name);
Mark Fasheh's avatar
Mark Fasheh committed
1054

1055
1056
1057
			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
		}

1058
1059
		/*
		 * NOTE: we have searched root tree and checked the
1060
		 * corresponding ref, it does not need to check again.
1061
		 */
1062
		*search_done = 1;
1063
	}
1064
	btrfs_release_path(path);
1065

Mark Fasheh's avatar
Mark Fasheh committed
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
	/* Same search but for extended refs */
	extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
					   inode_objectid, parent_objectid, 0,
					   0);
	if (!IS_ERR_OR_NULL(extref)) {
		u32 item_size;
		u32 cur_offset = 0;
		unsigned long base;
		struct inode *victim_parent;

		leaf = path->nodes[0];

		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		base = btrfs_item_ptr_offset(leaf, path->slots[0]);

		while (cur_offset < item_size) {
1082
			extref = (struct btrfs_inode_extref *)(base + cur_offset);
Mark Fasheh's avatar
Mark Fasheh committed
1083
1084
1085
1086
1087
1088
1089

			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);

			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
				goto next;

			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1090
1091
			if (!victim_name)
				return -ENOMEM;
Mark Fasheh's avatar
Mark Fasheh committed
1092
1093
1094
1095
1096
1097
1098
1099
			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
					   victim_name_len);

			search_key.objectid = inode_objectid;
			search_key.type = BTRFS_INODE_EXTREF_KEY;
			search_key.offset = btrfs_extref_hash(parent_objectid,
							      victim_name,
							      victim_name_len);
1100
1101
1102
1103
1104
1105
			ret = backref_in_log(log_root, &search_key,
					     parent_objectid, victim_name,
					     victim_name_len);
			if (ret < 0) {
				return ret;
			} else if (!ret) {
Mark Fasheh's avatar
Mark Fasheh committed
1106
1107
				ret = -ENOENT;
				victim_parent = read_one_inode(root,
1108
						parent_objectid);
Mark Fasheh's avatar
Mark Fasheh committed
1109
				if (victim_parent) {
1110
					inc_nlink(&inode->vfs_inode);
Mark Fasheh's avatar
Mark Fasheh committed
1111
1112
1113
					btrfs_release_path(path);

					ret = btrfs_unlink_inode(trans, root,
1114
							BTRFS_I(victim_parent),
1115
							inode,
1116
1117
							victim_name,
							victim_name_len);
1118
1119
					if (!ret)
						ret = btrfs_run_delayed_items(
1120
								  trans);
Mark Fasheh's avatar
Mark Fasheh committed
1121
1122
1123
				}
				iput(victim_parent);
				kfree(victim_name);
1124
1125
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
				*search_done = 1;
				goto again;
			}
			kfree(victim_name);
next:
			cur_offset += victim_name_len + sizeof(*extref);
		}
		*search_done = 1;
	}
	btrfs_release_path(path);

liubo's avatar
liubo committed
1137
	/* look for a conflicting sequence number */
1138
	di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
Mark Fasheh's avatar
Mark Fasheh committed
1139
					 ref_index, name, namelen, 0);
liubo's avatar
liubo committed
1140
	if (di && !IS_ERR(di)) {
1141
		ret = drop_one_dir_item(trans, root, path, dir, di);
1142
1143
		if (ret)
			return ret;
liubo's avatar
liubo committed
1144
1145
1146
	}
	btrfs_release_path(path);

1147
	/* look for a conflicting name */
1148
	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
liubo's avatar
liubo committed
1149
1150
				   name, namelen, 0);
	if (di && !IS_ERR(di)) {
1151
		ret = drop_one_dir_item(trans, root, path, dir, di);
1152
1153
		if (ret)
			return ret;
liubo's avatar
liubo committed
1154
1155
1156
	}
	btrfs_release_path(path);

1157
1158
	return 0;
}
1159

1160
1161
1162
static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			     u32 *namelen, char **name, u64 *index,
			     u64 *parent_objectid)
Mark Fasheh's avatar
Mark Fasheh committed
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
{
	struct btrfs_inode_extref *extref;

	extref = (struct btrfs_inode_extref *)ref_ptr;

	*namelen = btrfs_inode_extref_name_len(eb, extref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)&extref->name,
			   *namelen);

1176
1177
	if (index)
		*index = btrfs_inode_extref_index(eb, extref);
Mark Fasheh's avatar
Mark Fasheh committed
1178
1179
1180
1181
1182
1183
	if (parent_objectid)
		*parent_objectid = btrfs_inode_extref_parent(eb, extref);

	return 0;
}

1184
1185
static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			  u32 *namelen, char **name, u64 *index)
Mark Fasheh's avatar
Mark Fasheh committed
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
{
	struct btrfs_inode_ref *ref;

	ref = (struct btrfs_inode_ref *)ref_ptr;

	*namelen = btrfs_inode_ref_name_len(eb, ref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);

1198
1199
	if (index)
		*index = btrfs_inode_ref_index(eb, ref);
Mark Fasheh's avatar
Mark Fasheh committed
1200
1201
1202
1203

	return 0;
}

1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
/*
 * Take an inode reference item from the log tree and iterate all names from the
 * inode reference item in the subvolume tree with the same key (if it exists).
 * For any name that is not in the inode reference item from the log tree, do a
 * proper unlink of that name (that is, remove its entry from the inode
 * reference item and both dir index keys).
 */
static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_inode *inode,
				 struct extent_buffer *log_eb,
				 int log_slot,
				 struct btrfs_key *key)
{
	int ret;
	unsigned long ref_ptr;
	unsigned long ref_end;
	struct extent_buffer *eb;

again:
	btrfs_release_path(path);
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (ret < 0)
		goto out;

	eb = path->nodes[0];
	ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
	ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
	while (ref_ptr < ref_end) {
		char *name = NULL;
		int namelen;
		u64 parent_id;

		if (key->type == BTRFS_INODE_EXTREF_KEY) {
			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
						NULL, &parent_id);
		} else {
			parent_id = key->offset;
			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
					     NULL);
		}
		if (ret)
			goto out;

		if (key->type == BTRFS_INODE_EXTREF_KEY)
1254
1255
1256
			ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
							       parent_id, name,
							       namelen);
1257
		else
1258
1259
			ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
							   name, namelen);
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292

		if (!ret) {
			struct inode *dir;

			btrfs_release_path(path);
			dir = read_one_inode(root, parent_id);
			if (!dir) {
				ret = -ENOENT;
				kfree(name);
				goto out;
			}
			ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
						 inode, name, namelen);
			kfree(name);
			iput(dir);
			if (ret)
				goto out;
			goto again;
		}

		kfree(name);
		ref_ptr += namelen;
		if (key->type == BTRFS_INODE_EXTREF_KEY)
			ref_ptr += sizeof(struct btrfs_inode_extref);
		else
			ref_ptr += sizeof(struct btrfs_inode_ref);
	}
	ret = 0;
 out:
	btrfs_release_path(path);
	return ret;
}

1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
				  const u8 ref_type, const char *name,
				  const int namelen)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	const u64 parent_id = btrfs_ino(BTRFS_I(dir));
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = btrfs_ino(BTRFS_I(inode));
	key.type = ref_type;
	if (key.type == BTRFS_INODE_REF_KEY)
		key.offset = parent_id;
	else
		key.offset = btrfs_extref_hash(parent_id, name, namelen);

	ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (key.type == BTRFS_INODE_EXTREF_KEY)
1321
1322
		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
				path->slots[0], parent_id, name, namelen);
1323
	else
1324
1325
		ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
						   name, namelen);
1326
1327
1328
1329
1330
1331

out:
	btrfs_free_path(path);
	return ret;
}

1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		    struct inode *dir, struct inode *inode, const char *name,
		    int namelen, u64 ref_index)
{
	struct btrfs_dir_item *dir_item;
	struct btrfs_key key;
	struct btrfs_path *path;
	struct inode *other_inode = NULL;
	int ret;

	path =