ctree.c 152 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Chris Mason's avatar
Chris Mason committed
2
/*
3
 * Copyright (C) 2007,2008 Oracle.  All rights reserved.
Chris Mason's avatar
Chris Mason committed
4 5
 */

6
#include <linux/sched.h>
7
#include <linux/slab.h>
8
#include <linux/rbtree.h>
9
#include <linux/mm.h>
10 11
#include "ctree.h"
#include "disk-io.h"
12
#include "transaction.h"
13
#include "print-tree.h"
14
#include "locking.h"
15

16 17
static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
		      *root, struct btrfs_path *path, int level);
18 19 20
static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		      const struct btrfs_key *ins_key, struct btrfs_path *path,
		      int data_size, int extend);
21
static int push_node_left(struct btrfs_trans_handle *trans,
22 23
			  struct btrfs_fs_info *fs_info,
			  struct extent_buffer *dst,
24
			  struct extent_buffer *src, int empty);
25
static int balance_node_right(struct btrfs_trans_handle *trans,
26
			      struct btrfs_fs_info *fs_info,
27 28
			      struct extent_buffer *dst_buf,
			      struct extent_buffer *src_buf);
29 30
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
		    int level, int slot);
31

32
struct btrfs_path *btrfs_alloc_path(void)
33
{
34
	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
35 36
}

37 38 39 40 41 42 43 44
/*
 * set all locked nodes in the path to blocking locks.  This should
 * be done before scheduling
 */
noinline void btrfs_set_path_blocking(struct btrfs_path *p)
{
	int i;
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
45 46 47 48 49 50 51
		if (!p->nodes[i] || !p->locks[i])
			continue;
		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
		if (p->locks[i] == BTRFS_READ_LOCK)
			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
		else if (p->locks[i] == BTRFS_WRITE_LOCK)
			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
52 53 54 55 56
	}
}

/*
 * reset all the locked nodes in the patch to spinning locks.
57 58 59 60 61
 *
 * held is used to keep lockdep happy, when lockdep is enabled
 * we set held to a blocking lock before we go around and
 * retake all the spinlocks in the path.  You can safely use NULL
 * for held
62
 */
63
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
64
					struct extent_buffer *held, int held_rw)
65 66
{
	int i;
67

68 69 70 71 72 73 74
	if (held) {
		btrfs_set_lock_blocking_rw(held, held_rw);
		if (held_rw == BTRFS_WRITE_LOCK)
			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
		else if (held_rw == BTRFS_READ_LOCK)
			held_rw = BTRFS_READ_LOCK_BLOCKING;
	}
75 76 77
	btrfs_set_path_blocking(p);

	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
78 79 80 81 82 83 84
		if (p->nodes[i] && p->locks[i]) {
			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
				p->locks[i] = BTRFS_WRITE_LOCK;
			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
				p->locks[i] = BTRFS_READ_LOCK;
		}
85
	}
86 87

	if (held)
88
		btrfs_clear_lock_blocking_rw(held, held_rw);
89 90
}

91
/* this also releases the path */
92
void btrfs_free_path(struct btrfs_path *p)
93
{
94 95
	if (!p)
		return;
96
	btrfs_release_path(p);
97
	kmem_cache_free(btrfs_path_cachep, p);
98 99
}

100 101 102 103 104 105
/*
 * path release drops references on the extent buffers in the path
 * and it drops any locks held by this path
 *
 * It is safe to call this on paths that no locks or extent buffers held.
 */
106
noinline void btrfs_release_path(struct btrfs_path *p)
107 108
{
	int i;
109

110
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
111
		p->slots[i] = 0;
112
		if (!p->nodes[i])
113 114
			continue;
		if (p->locks[i]) {
115
			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
116 117
			p->locks[i] = 0;
		}
118
		free_extent_buffer(p->nodes[i]);
119
		p->nodes[i] = NULL;
120 121 122
	}
}

123 124 125 126 127 128 129 130 131 132
/*
 * safely gets a reference on the root node of a tree.  A lock
 * is not taken, so a concurrent writer may put a different node
 * at the root of the tree.  See btrfs_lock_root_node for the
 * looping required.
 *
 * The extent buffer returned by this has a reference taken, so
 * it won't disappear.  It may stop being the root of the tree
 * at any time because there are no locks held.
 */
133 134 135
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
	struct extent_buffer *eb;
136

137 138 139 140 141 142
	while (1) {
		rcu_read_lock();
		eb = rcu_dereference(root->node);

		/*
		 * RCU really hurts here, we could free up the root node because
143
		 * it was COWed but we may not get the new root node yet so do
144 145 146 147 148 149 150 151 152 153
		 * the inc_not_zero dance and if it doesn't work then
		 * synchronize_rcu and try again.
		 */
		if (atomic_inc_not_zero(&eb->refs)) {
			rcu_read_unlock();
			break;
		}
		rcu_read_unlock();
		synchronize_rcu();
	}
154 155 156
	return eb;
}

157 158 159 160
/* loop around taking references on and locking the root node of the
 * tree until you end up with a lock on the root.  A locked buffer
 * is returned, with a reference held.
 */
161 162 163 164
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
{
	struct extent_buffer *eb;

165
	while (1) {
166 167
		eb = btrfs_root_node(root);
		btrfs_tree_lock(eb);
168
		if (eb == root->node)
169 170 171 172 173 174 175
			break;
		btrfs_tree_unlock(eb);
		free_extent_buffer(eb);
	}
	return eb;
}

176 177 178 179
/* loop around taking references on and locking the root node of the
 * tree until you end up with a lock on the root.  A locked buffer
 * is returned, with a reference held.
 */
180
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
181 182 183 184 185 186 187 188 189 190 191 192 193 194
{
	struct extent_buffer *eb;

	while (1) {
		eb = btrfs_root_node(root);
		btrfs_tree_read_lock(eb);
		if (eb == root->node)
			break;
		btrfs_tree_read_unlock(eb);
		free_extent_buffer(eb);
	}
	return eb;
}

195 196 197 198
/* cowonly root (everything not a reference counted cow subvolume), just get
 * put onto a simple dirty list.  transaction.c walks this to make sure they
 * get properly updated on disk.
 */
199 200
static void add_root_to_dirty_list(struct btrfs_root *root)
{
201 202
	struct btrfs_fs_info *fs_info = root->fs_info;

203 204 205 206
	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
		return;

207
	spin_lock(&fs_info->trans_lock);
208 209 210 211
	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
		/* Want the extent tree to be the last on the list */
		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
			list_move_tail(&root->dirty_list,
212
				       &fs_info->dirty_cowonly_roots);
213 214
		else
			list_move(&root->dirty_list,
215
				  &fs_info->dirty_cowonly_roots);
216
	}
217
	spin_unlock(&fs_info->trans_lock);
218 219
}

220 221 222 223 224
/*
 * used by snapshot creation to make a copy of a root for a tree with
 * a given objectid.  The buffer with the new root node is returned in
 * cow_ret, and this func returns zero on success or a negative error code.
 */
225 226 227 228 229
int btrfs_copy_root(struct btrfs_trans_handle *trans,
		      struct btrfs_root *root,
		      struct extent_buffer *buf,
		      struct extent_buffer **cow_ret, u64 new_root_objectid)
{
230
	struct btrfs_fs_info *fs_info = root->fs_info;
231 232 233
	struct extent_buffer *cow;
	int ret = 0;
	int level;
234
	struct btrfs_disk_key disk_key;
235

236
	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
237
		trans->transid != fs_info->running_transaction->transid);
238 239
	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
		trans->transid != root->last_trans);
240 241

	level = btrfs_header_level(buf);
242 243 244 245
	if (level == 0)
		btrfs_item_key(buf, &disk_key, 0);
	else
		btrfs_node_key(buf, &disk_key, 0);
246

247 248
	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
			&disk_key, level, buf->start, 0);
249
	if (IS_ERR(cow))
250 251
		return PTR_ERR(cow);

252
	copy_extent_buffer_full(cow, buf);
253 254
	btrfs_set_header_bytenr(cow, cow->start);
	btrfs_set_header_generation(cow, trans->transid);
255 256 257 258 259 260 261
	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
				     BTRFS_HEADER_FLAG_RELOC);
	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
	else
		btrfs_set_header_owner(cow, new_root_objectid);
262

263
	write_extent_buffer_fsid(cow, fs_info->fsid);
Yan Zheng's avatar
Yan Zheng committed
264

265
	WARN_ON(btrfs_header_generation(buf) > trans->transid);
266
	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
267
		ret = btrfs_inc_ref(trans, root, cow, 1);
268
	else
269
		ret = btrfs_inc_ref(trans, root, cow, 0);
270

271 272 273 274 275 276 277 278
	if (ret)
		return ret;

	btrfs_mark_buffer_dirty(cow);
	*cow_ret = cow;
	return 0;
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
enum mod_log_op {
	MOD_LOG_KEY_REPLACE,
	MOD_LOG_KEY_ADD,
	MOD_LOG_KEY_REMOVE,
	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
	MOD_LOG_MOVE_KEYS,
	MOD_LOG_ROOT_REPLACE,
};

struct tree_mod_root {
	u64 logical;
	u8 level;
};

struct tree_mod_elem {
	struct rb_node node;
296
	u64 logical;
297
	u64 seq;
298 299 300 301 302 303 304 305 306 307 308 309 310
	enum mod_log_op op;

	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
	int slot;

	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
	u64 generation;

	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
	struct btrfs_disk_key key;
	u64 blockptr;

	/* this is used for op == MOD_LOG_MOVE_KEYS */
311 312 313 314
	struct {
		int dst_slot;
		int nr_items;
	} move;
315 316 317 318 319

	/* this is used for op == MOD_LOG_ROOT_REPLACE */
	struct tree_mod_root old_root;
};

320
/*
321
 * Pull a new tree mod seq number for our operation.
322
 */
323
static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
324 325 326 327
{
	return atomic64_inc_return(&fs_info->tree_mod_seq);
}

328 329 330 331 332 333 334 335 336 337
/*
 * This adds a new blocker to the tree mod log's blocker list if the @elem
 * passed does not already have a sequence number set. So when a caller expects
 * to record tree modifications, it should ensure to set elem->seq to zero
 * before calling btrfs_get_tree_mod_seq.
 * Returns a fresh, unused tree log modification sequence number, even if no new
 * blocker was added.
 */
u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
			   struct seq_list *elem)
338
{
339
	write_lock(&fs_info->tree_mod_log_lock);
340
	spin_lock(&fs_info->tree_mod_seq_lock);
341
	if (!elem->seq) {
342
		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
343 344
		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
	}
345
	spin_unlock(&fs_info->tree_mod_seq_lock);
346
	write_unlock(&fs_info->tree_mod_log_lock);
347

348
	return elem->seq;
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
}

void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
			    struct seq_list *elem)
{
	struct rb_root *tm_root;
	struct rb_node *node;
	struct rb_node *next;
	struct seq_list *cur_elem;
	struct tree_mod_elem *tm;
	u64 min_seq = (u64)-1;
	u64 seq_putting = elem->seq;

	if (!seq_putting)
		return;

	spin_lock(&fs_info->tree_mod_seq_lock);
	list_del(&elem->list);
367
	elem->seq = 0;
368 369

	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
370
		if (cur_elem->seq < min_seq) {
371 372 373 374 375
			if (seq_putting > cur_elem->seq) {
				/*
				 * blocker with lower sequence number exists, we
				 * cannot remove anything from the log
				 */
376 377
				spin_unlock(&fs_info->tree_mod_seq_lock);
				return;
378 379 380 381
			}
			min_seq = cur_elem->seq;
		}
	}
382 383
	spin_unlock(&fs_info->tree_mod_seq_lock);

384 385 386 387
	/*
	 * anything that's lower than the lowest existing (read: blocked)
	 * sequence number can be removed from the tree.
	 */
388
	write_lock(&fs_info->tree_mod_log_lock);
389 390 391
	tm_root = &fs_info->tree_mod_log;
	for (node = rb_first(tm_root); node; node = next) {
		next = rb_next(node);
392
		tm = rb_entry(node, struct tree_mod_elem, node);
393
		if (tm->seq > min_seq)
394 395 396 397
			continue;
		rb_erase(node, tm_root);
		kfree(tm);
	}
398
	write_unlock(&fs_info->tree_mod_log_lock);
399 400 401 402
}

/*
 * key order of the log:
403
 *       node/leaf start address -> sequence
404
 *
405 406 407
 * The 'start address' is the logical address of the *new* root node
 * for root replace operations, or the logical address of the affected
 * block for all other operations.
408
 *
409
 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
410 411 412 413 414 415 416 417
 */
static noinline int
__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
{
	struct rb_root *tm_root;
	struct rb_node **new;
	struct rb_node *parent = NULL;
	struct tree_mod_elem *cur;
418

419
	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
420 421 422 423

	tm_root = &fs_info->tree_mod_log;
	new = &tm_root->rb_node;
	while (*new) {
424
		cur = rb_entry(*new, struct tree_mod_elem, node);
425
		parent = *new;
426
		if (cur->logical < tm->logical)
427
			new = &((*new)->rb_left);
428
		else if (cur->logical > tm->logical)
429
			new = &((*new)->rb_right);
430
		else if (cur->seq < tm->seq)
431
			new = &((*new)->rb_left);
432
		else if (cur->seq > tm->seq)
433
			new = &((*new)->rb_right);
434 435
		else
			return -EEXIST;
436 437 438 439
	}

	rb_link_node(&tm->node, parent, new);
	rb_insert_color(&tm->node, tm_root);
440
	return 0;
441 442
}

443 444 445 446
/*
 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
 * returns zero with the tree_mod_log_lock acquired. The caller must hold
 * this until all tree mod log insertions are recorded in the rb tree and then
447
 * write unlock fs_info::tree_mod_log_lock.
448
 */
449 450 451 452 453
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
				    struct extent_buffer *eb) {
	smp_mb();
	if (list_empty(&(fs_info)->tree_mod_seq_list))
		return 1;
454 455
	if (eb && btrfs_header_level(eb) == 0)
		return 1;
456

457
	write_lock(&fs_info->tree_mod_log_lock);
458
	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
459
		write_unlock(&fs_info->tree_mod_log_lock);
460 461 462
		return 1;
	}

463 464 465
	return 0;
}

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
				    struct extent_buffer *eb)
{
	smp_mb();
	if (list_empty(&(fs_info)->tree_mod_seq_list))
		return 0;
	if (eb && btrfs_header_level(eb) == 0)
		return 0;

	return 1;
}

static struct tree_mod_elem *
alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
		    enum mod_log_op op, gfp_t flags)
482
{
483
	struct tree_mod_elem *tm;
484

485 486
	tm = kzalloc(sizeof(*tm), flags);
	if (!tm)
487
		return NULL;
488

489
	tm->logical = eb->start;
490 491 492 493 494 495 496
	if (op != MOD_LOG_KEY_ADD) {
		btrfs_node_key(eb, &tm->key, slot);
		tm->blockptr = btrfs_node_blockptr(eb, slot);
	}
	tm->op = op;
	tm->slot = slot;
	tm->generation = btrfs_node_ptr_generation(eb, slot);
497
	RB_CLEAR_NODE(&tm->node);
498

499
	return tm;
500 501
}

502 503
static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
		enum mod_log_op op, gfp_t flags)
504
{
505 506 507
	struct tree_mod_elem *tm;
	int ret;

508
	if (!tree_mod_need_log(eb->fs_info, eb))
509 510 511 512 513 514
		return 0;

	tm = alloc_tree_mod_elem(eb, slot, op, flags);
	if (!tm)
		return -ENOMEM;

515
	if (tree_mod_dont_log(eb->fs_info, eb)) {
516
		kfree(tm);
517
		return 0;
518 519
	}

520
	ret = __tree_mod_log_insert(eb->fs_info, tm);
521
	write_unlock(&eb->fs_info->tree_mod_log_lock);
522 523
	if (ret)
		kfree(tm);
524

525
	return ret;
526 527
}

528 529
static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
		int dst_slot, int src_slot, int nr_items)
530
{
531 532 533
	struct tree_mod_elem *tm = NULL;
	struct tree_mod_elem **tm_list = NULL;
	int ret = 0;
534
	int i;
535
	int locked = 0;
536

537
	if (!tree_mod_need_log(eb->fs_info, eb))
538
		return 0;
539

540
	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
541 542 543
	if (!tm_list)
		return -ENOMEM;

544
	tm = kzalloc(sizeof(*tm), GFP_NOFS);
545 546 547 548 549
	if (!tm) {
		ret = -ENOMEM;
		goto free_tms;
	}

550
	tm->logical = eb->start;
551 552 553 554 555 556 557
	tm->slot = src_slot;
	tm->move.dst_slot = dst_slot;
	tm->move.nr_items = nr_items;
	tm->op = MOD_LOG_MOVE_KEYS;

	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
558
		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
559 560 561 562 563 564
		if (!tm_list[i]) {
			ret = -ENOMEM;
			goto free_tms;
		}
	}

565
	if (tree_mod_dont_log(eb->fs_info, eb))
566 567 568
		goto free_tms;
	locked = 1;

569 570 571 572 573
	/*
	 * When we override something during the move, we log these removals.
	 * This can only happen when we move towards the beginning of the
	 * buffer, i.e. dst_slot < src_slot.
	 */
574
	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
575
		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
576 577
		if (ret)
			goto free_tms;
578 579
	}

580
	ret = __tree_mod_log_insert(eb->fs_info, tm);
581 582
	if (ret)
		goto free_tms;
583
	write_unlock(&eb->fs_info->tree_mod_log_lock);
584
	kfree(tm_list);
585

586 587 588 589
	return 0;
free_tms:
	for (i = 0; i < nr_items; i++) {
		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
590
			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
591 592 593
		kfree(tm_list[i]);
	}
	if (locked)
594
		write_unlock(&eb->fs_info->tree_mod_log_lock);
595 596
	kfree(tm_list);
	kfree(tm);
597

598
	return ret;
599 600
}

601 602 603 604
static inline int
__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
		       struct tree_mod_elem **tm_list,
		       int nritems)
605
{
606
	int i, j;
607 608 609
	int ret;

	for (i = nritems - 1; i >= 0; i--) {
610 611 612 613 614 615 616
		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
		if (ret) {
			for (j = nritems - 1; j > i; j--)
				rb_erase(&tm_list[j]->node,
					 &fs_info->tree_mod_log);
			return ret;
		}
617
	}
618 619

	return 0;
620 621
}

622 623
static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
			 struct extent_buffer *new_root, int log_removal)
624
{
625
	struct btrfs_fs_info *fs_info = old_root->fs_info;
626 627 628 629 630
	struct tree_mod_elem *tm = NULL;
	struct tree_mod_elem **tm_list = NULL;
	int nritems = 0;
	int ret = 0;
	int i;
631

632
	if (!tree_mod_need_log(fs_info, NULL))
633 634
		return 0;

635 636
	if (log_removal && btrfs_header_level(old_root) > 0) {
		nritems = btrfs_header_nritems(old_root);
637
		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
638
				  GFP_NOFS);
639 640 641 642 643 644
		if (!tm_list) {
			ret = -ENOMEM;
			goto free_tms;
		}
		for (i = 0; i < nritems; i++) {
			tm_list[i] = alloc_tree_mod_elem(old_root, i,
645
			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
646 647 648 649 650 651
			if (!tm_list[i]) {
				ret = -ENOMEM;
				goto free_tms;
			}
		}
	}
652

653
	tm = kzalloc(sizeof(*tm), GFP_NOFS);
654 655 656 657
	if (!tm) {
		ret = -ENOMEM;
		goto free_tms;
	}
658

659
	tm->logical = new_root->start;
660 661 662 663 664
	tm->old_root.logical = old_root->start;
	tm->old_root.level = btrfs_header_level(old_root);
	tm->generation = btrfs_header_generation(old_root);
	tm->op = MOD_LOG_ROOT_REPLACE;

665 666 667 668 669 670 671 672
	if (tree_mod_dont_log(fs_info, NULL))
		goto free_tms;

	if (tm_list)
		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
	if (!ret)
		ret = __tree_mod_log_insert(fs_info, tm);

673
	write_unlock(&fs_info->tree_mod_log_lock);
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
	if (ret)
		goto free_tms;
	kfree(tm_list);

	return ret;

free_tms:
	if (tm_list) {
		for (i = 0; i < nritems; i++)
			kfree(tm_list[i]);
		kfree(tm_list);
	}
	kfree(tm);

	return ret;
689 690 691 692 693 694 695 696 697 698 699
}

static struct tree_mod_elem *
__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
		      int smallest)
{
	struct rb_root *tm_root;
	struct rb_node *node;
	struct tree_mod_elem *cur = NULL;
	struct tree_mod_elem *found = NULL;

700
	read_lock(&fs_info->tree_mod_log_lock);
701 702 703
	tm_root = &fs_info->tree_mod_log;
	node = tm_root->rb_node;
	while (node) {
704
		cur = rb_entry(node, struct tree_mod_elem, node);
705
		if (cur->logical < start) {
706
			node = node->rb_left;
707
		} else if (cur->logical > start) {
708
			node = node->rb_right;
709
		} else if (cur->seq < min_seq) {
710 711 712 713
			node = node->rb_left;
		} else if (!smallest) {
			/* we want the node with the highest seq */
			if (found)
714
				BUG_ON(found->seq > cur->seq);
715 716
			found = cur;
			node = node->rb_left;
717
		} else if (cur->seq > min_seq) {
718 719
			/* we want the node with the smallest seq */
			if (found)
720
				BUG_ON(found->seq < cur->seq);
721 722 723 724 725 726 727
			found = cur;
			node = node->rb_right;
		} else {
			found = cur;
			break;
		}
	}
728
	read_unlock(&fs_info->tree_mod_log_lock);
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755

	return found;
}

/*
 * this returns the element from the log with the smallest time sequence
 * value that's in the log (the oldest log item). any element with a time
 * sequence lower than min_seq will be ignored.
 */
static struct tree_mod_elem *
tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
			   u64 min_seq)
{
	return __tree_mod_log_search(fs_info, start, min_seq, 1);
}

/*
 * this returns the element from the log with the largest time sequence
 * value that's in the log (the most recent log item). any element with
 * a time sequence lower than min_seq will be ignored.
 */
static struct tree_mod_elem *
tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
{
	return __tree_mod_log_search(fs_info, start, min_seq, 0);
}

756
static noinline int
757 758
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
		     struct extent_buffer *src, unsigned long dst_offset,
759
		     unsigned long src_offset, int nr_items)
760
{
761 762 763
	int ret = 0;
	struct tree_mod_elem **tm_list = NULL;
	struct tree_mod_elem **tm_list_add, **tm_list_rem;
764
	int i;
765
	int locked = 0;
766

767 768
	if (!tree_mod_need_log(fs_info, NULL))
		return 0;
769

770
	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
771 772
		return 0;

773
	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
774 775 776
			  GFP_NOFS);
	if (!tm_list)
		return -ENOMEM;
777

778 779
	tm_list_add = tm_list;
	tm_list_rem = tm_list + nr_items;
780
	for (i = 0; i < nr_items; i++) {
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
		if (!tm_list_rem[i]) {
			ret = -ENOMEM;
			goto free_tms;
		}

		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
		    MOD_LOG_KEY_ADD, GFP_NOFS);
		if (!tm_list_add[i]) {
			ret = -ENOMEM;
			goto free_tms;
		}
	}

	if (tree_mod_dont_log(fs_info, NULL))
		goto free_tms;
	locked = 1;

	for (i = 0; i < nr_items; i++) {
		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
		if (ret)
			goto free_tms;
		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
		if (ret)
			goto free_tms;
807
	}
808

809
	write_unlock(&fs_info->tree_mod_log_lock);
810 811 812 813 814 815 816 817 818 819 820
	kfree(tm_list);

	return 0;

free_tms:
	for (i = 0; i < nr_items * 2; i++) {
		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
		kfree(tm_list[i]);
	}
	if (locked)
821
		write_unlock(&fs_info->tree_mod_log_lock);
822 823 824
	kfree(tm_list);

	return ret;
825 826
}

827
static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
828
{
829 830 831 832 833 834 835 836
	struct tree_mod_elem **tm_list = NULL;
	int nritems = 0;
	int i;
	int ret = 0;

	if (btrfs_header_level(eb) == 0)
		return 0;

837
	if (!tree_mod_need_log(eb->fs_info, NULL))
838 839 840
		return 0;

	nritems = btrfs_header_nritems(eb);
841
	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
842 843 844 845 846 847 848 849 850 851 852 853
	if (!tm_list)
		return -ENOMEM;

	for (i = 0; i < nritems; i++) {
		tm_list[i] = alloc_tree_mod_elem(eb, i,
		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
		if (!tm_list[i]) {
			ret = -ENOMEM;
			goto free_tms;
		}
	}

854
	if (tree_mod_dont_log(eb->fs_info, eb))
855 856
		goto free_tms;

857
	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
858
	write_unlock(&eb->fs_info->tree_mod_log_lock);
859 860 861 862 863 864 865 866 867 868 869 870
	if (ret)
		goto free_tms;
	kfree(tm_list);

	return 0;

free_tms:
	for (i = 0; i < nritems; i++)
		kfree(tm_list[i]);
	kfree(tm_list);

	return ret;
871 872
}

873 874 875 876 877 878 879
/*
 * check if the tree block can be shared by multiple trees
 */
int btrfs_block_can_be_shared(struct btrfs_root *root,
			      struct extent_buffer *buf)
{
	/*
880
	 * Tree blocks not in reference counted trees and tree roots
881 882 883 884
	 * are never shared. If a block was allocated after the last
	 * snapshot and the block was not allocated by tree relocation,
	 * we know the block is not shared.
	 */
885
	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
886 887 888 889 890 891
	    buf != root->node && buf != root->commit_root &&
	    (btrfs_header_generation(buf) <=
	     btrfs_root_last_snapshot(&root->root_item) ||
	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
		return 1;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
892
	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
893 894 895 896 897 898 899 900 901
	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
		return 1;
#endif
	return 0;
}

static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct extent_buffer *buf,
902 903
				       struct extent_buffer *cow,
				       int *last_ref)
904
{
905
	struct btrfs_fs_info *fs_info = root->fs_info;
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
	u64 refs;
	u64 owner;
	u64 flags;
	u64 new_flags = 0;
	int ret;

	/*
	 * Backrefs update rules:
	 *
	 * Always use full backrefs for extent pointers in tree block
	 * allocated by tree relocation.
	 *
	 * If a shared tree block is no longer referenced by its owner
	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
	 * use full backrefs for extent pointers in tree block.
	 *
	 * If a tree block is been relocating
	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
	 * use full backrefs for extent pointers in tree block.
	 * The reason for this is some operations (such as drop tree)
	 * are only allowed for blocks use full backrefs.
	 */

	if (btrfs_block_can_be_shared(root, buf)) {
930
		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
931 932
					       btrfs_header_level(buf), 1,
					       &refs, &flags);
933 934
		if (ret)
			return ret;
935 936
		if (refs == 0) {
			ret = -EROFS;
937
			btrfs_handle_fs_error(fs_info, ret, NULL);
938 939
			return ret;
		}
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	} else {
		refs = 1;
		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
		else
			flags = 0;
	}

	owner = btrfs_header_owner(buf);
	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));

	if (refs > 1) {
		if ((owner == root->root_key.objectid ||
		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
957
			ret = btrfs_inc_ref(trans, root, buf, 1);
958 959
			if (ret)
				return ret;
960 961 962

			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID) {
963
				ret = btrfs_dec_ref(trans, root, buf, 0);
964 965
				if (ret)
					return ret;
966
				ret = btrfs_inc_ref(trans, root, cow, 1);
967 968
				if (ret)
					return ret;
969 970 971 972 973 974
			}
			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
		} else {

			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID)
975
				ret = btrfs_inc_ref(trans, root, cow, 1);
976
			else
977
				ret = btrfs_inc_ref(trans, root, cow, 0);
978 979
			if (ret)
				return ret;
980 981
		}
		if (new_flags != 0) {
982 983
			int level = btrfs_header_level(buf);

984
			ret = btrfs_set_disk_extent_flags(trans, fs_info,
985 986
							  buf->start,
							  buf->len,
987
							  new_flags, level, 0);
988 989
			if (ret)
				return ret;
990 991 992 993 994
		}
	} else {
		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID)
995
				ret = btrfs_inc_ref(trans, root, cow, 1);
996
			else
997
				ret = btrfs_inc_ref(trans, root, cow, 0);
998 999
			if (ret)
				return ret;
1000
			ret = btrfs_dec_ref(trans, root, buf, 1);
1001 1002
			if (ret)
				return ret;
1003
		}
1004
		clean_tree_block(fs_info, buf);
1005
		*last_ref = 1;
1006 1007 1008 1009
	}
	return 0;
}

1010
/*
1011 1012 1013 1014
 * does the dirty work in cow of a single block.  The parent block (if
 * supplied) is updated to point to the new cow copy.  The new buffer is marked
 * dirty and returned locked.  If you modify the block it needs to be marked
 * dirty again.
1015 1016 1017
 *
 * search_start -- an allocation hint for the new block
 *
1018 1019 1020
 * empty_size -- a hint that you plan on doing more cow.  This is the size in
 * bytes the allocator should try to find free next to the block it returns.
 * This is just a hint and may be ignored by the allocator.
1021
 */
1022
static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1023 1024 1025 1026
			     struct btrfs_root *root,
			     struct extent_buffer *buf,
			     struct extent_buffer *parent, int parent_slot,
			     struct extent_buffer **cow_ret,
1027
			     u64 search_start, u64 empty_size)
1028
{
1029
	struct btrfs_fs_info *fs_info = root->fs_info;
1030
	struct btrfs_disk_key disk_key;
1031
	struct extent_buffer *cow;
1032
	int level, ret;
1033
	int last_ref = 0;
1034
	int unlock_orig = 0;
1035
	u64 parent_start = 0;
1036

1037 1038 1039
	if (*cow_ret == buf)
		unlock_orig = 1;

1040
	btrfs_assert_tree_locked(buf);
1041

1042
	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1043
		trans->transid != fs_info->running_transaction->transid);
1044 1045
	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
		trans->transid != root->last_trans);
1046

1047
	level = btrfs_header_level(buf);
1048

1049 1050 1051 1052 1053
	if (level == 0)
		btrfs_item_key(buf, &disk_key, 0);
	else
		btrfs_node_key(buf, &disk_key, 0);

1054 1055
	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
		parent_start = parent->start;
1056

1057 1058 1059
	cow = btrfs_alloc_tree_block(trans, root, parent_start,
			root->root_key.objectid, &disk_key, level,
			search_start, empty_size);
1060 1061
	if (IS_ERR(cow))
		return PTR_ERR(cow);
1062

1063 1064
	/* cow is set to blocking by btrfs_init_new_buffer */

1065
	copy_extent_buffer_full(cow, buf);
1066
	btrfs_set_header_bytenr(cow, cow->start);
1067
	btrfs_set_header_generation(cow, trans->transid);
1068 1069 1070 1071 1072 1073 1074
	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
				     BTRFS_HEADER_FLAG_RELOC);
	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
	else
		btrfs_set_header_owner(cow, root->root_key.objectid);
1075

1076
	write_extent_buffer_fsid(cow, fs_info->fsid);
Yan Zheng's avatar
Yan Zheng committed
1077

1078
	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1079
	if (ret) {
1080
		btrfs_abort_transaction(trans, ret);
1081 1082
		return ret;
	}
1083

1084
	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1085
		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1086
		if (ret) {
1087
			btrfs_abort_transaction(trans, ret);
1088
			return ret;
1089
		}
1090
	}
1091

1092
	if (buf == root->node) {
1093
		WARN_ON(parent && parent != buf);
1094 1095 1096
		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
			parent_start = buf->start;
1097

1098
		extent_buffer_get(cow);
1099 1100
		ret = tree_mod_log_insert_root(root->node, cow, 1);
		BUG_ON(ret < 0);
1101
		rcu_assign_pointer(root->node, cow);
1102

1103
		btrfs_free_tree_block(trans, root, buf, parent_start,
1104
				      last_ref);
1105
		free_extent_buffer(buf);
1106
		add_root_to_dirty_list(root);
1107
	} else {
1108
		WARN_ON(trans->transid != btrfs_header_generation(parent));
1109
		tree_mod_log_insert_key(parent, parent_slot,
1110
					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1111
		btrfs_set_node_blockptr(parent, parent_slot,
1112
					cow->start);
1113 1114
		btrfs_set_node_ptr_generation(parent, parent_slot,
					      trans->transid);
1115
		btrfs_mark_buffer_dirty(parent);
1116
		if (last_ref) {
1117
			ret = tree_mod_log_free_eb(buf);
1118
			if (ret) {
1119
				btrfs_abort_transaction(trans, ret);
1120 1121 1122
				return ret;
			}
		}
1123
		btrfs_free_tree_block(trans, root, buf, parent_start,
1124
				      last_ref);
1125
	}
1126 1127
	if (unlock_orig)
		btrfs_tree_unlock(buf);
1128
	free_extent_buffer_stale(buf);
1129
	btrfs_mark_buffer_dirty(cow);
1130
	*cow_ret = cow;
1131 1132 1133
	return 0;
}

1134 1135 1136 1137
/*
 * returns the logical address of the oldest predecessor of the given root.
 * entries older than time_seq are ignored.
 */
1138 1139
static struct tree_mod_elem *__tree_mod_log_oldest_root(
		struct extent_buffer *eb_root, u64 time_seq)
1140 1141 1142
{
	struct tree_mod_elem *tm;
	struct tree_mod_elem *found = NULL;
1143
	u64 root_logical = eb_root->start;
1144 1145 1146
	int looped = 0;

	if (!time_seq)
1147
		return NULL;
1148 1149

	/*
1150 1151 1152 1153
	 * the very last operation that's logged for a root is the
	 * replacement operation (if it is replaced at all). this has
	 * the logical address of the *new* root, making it the very
	 * first operation that's logged for this root.
1154 1155
	 */
	while (1) {
1156
		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1157 1158
						time_seq);
		if (!looped && !tm)
1159
			return NULL;
1160
		/*
1161 1162 1163
		 * if there are no tree operation for the oldest root, we simply
		 * return it. this should only happen if that (old) root is at
		 * level 0.
1164
		 */
1165 1166
		if (!tm)
			break;
1167

1168 1169 1170 1171 1172
		/*
		 * if there's an operation that's not a root replacement, we
		 * found the oldest version of our root. normally, we'll find a
		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
		 */
1173 1174 1175 1176 1177 1178 1179 1180
		if (tm->op != MOD_LOG_ROOT_REPLACE)
			break;

		found = tm;
		root_logical = tm->old_root.logical;
		looped = 1;
	}

1181 1182 1183 1184
	/* if there's no old root to return, return what we found instead */
	if (!found)
		found = tm;

1185 1186 1187 1188 1189
	return found;
}

/*
 * tm is a pointer to the first operation to rewind within eb. then, all
1190
 * previous operations will be rewound (until we reach something older than
1191 1192 1193
 * time_seq).
 */
static void
1194 1195
__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
		      u64 time_seq, struct tree_mod_elem *first_tm)
1196 1197 1198 1199 1200 1201 1202 1203 1204
{
	u32 n;
	struct rb_node *next;
	struct tree_mod_elem *tm = first_tm;
	unsigned long o_dst;
	unsigned long o_src;
	unsigned long p_size = sizeof(struct btrfs_key_ptr);

	n = btrfs_header_nritems(eb);
1205
	read_lock(&fs_info->tree_mod_log_lock);
1206
	while (tm && tm->seq >= time_seq) {
1207 1208 1209 1210 1211 1212 1213 1214
		/*
		 * all the operations are recorded with the operator used for
		 * the modification. as we're going backwards, we do the
		 * opposite of each operation here.
		 */
		switch (tm->op) {
		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
			BUG_ON(tm->slot < n);
1215
			/* Fallthrough */
1216
		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1217
		case MOD_LOG_KEY_REMOVE:
1218 1219 1220 1221
			btrfs_set_node_key(eb, &tm->key, tm->slot);
			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
			btrfs_set_node_ptr_generation(eb, tm->slot,
						      tm->generation);
1222
			n++;
1223 1224 1225 1226 1227 1228 1229 1230 1231
			break;
		case MOD_LOG_KEY_REPLACE:
			BUG_ON(tm->slot >= n);
			btrfs_set_node_key(eb, &tm->key, tm->slot);
			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
			btrfs_set_node_ptr_generation(eb, tm->slot,
						      tm->generation);
			break;
		case MOD_LOG_KEY_ADD:
1232
			/* if a move operation is needed it's in the log */
1233 1234 1235
			n--;
			break;
		case MOD_LOG_MOVE_KEYS:
1236 1237 1238
			o_dst = btrfs_node_key_ptr_offset(tm->slot);
			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
			memmove_extent_buffer(eb, o_dst, o_src,
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
					      tm->move.nr_items * p_size);
			break;
		case MOD_LOG_ROOT_REPLACE:
			/*
			 * this operation is special. for roots, this must be
			 * handled explicitly before rewinding.
			 * for non-roots, this operation may exist if the node
			 * was a root: root A -> child B; then A gets empty and
			 * B is promoted to the new root. in the mod log, we'll
			 * have a root-replace operation for B, a tree block
			 * that is no root. we simply ignore that operation.
			 */
			break;
		}
		next = rb_next(&tm->node);
		if (!next)
			break;
1256
		tm = rb_entry(next, struct tree_mod_elem, node);
1257
		if (tm->logical != first_tm->logical)
1258 1259
			break;
	}
1260
	read_unlock(&fs_info->tree_mod_log_lock);
1261 1262 1263
	btrfs_set_header_nritems(eb, n);
}

1264
/*
1265
 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1266 1267 1268 1269 1270
 * is returned. If rewind operations happen, a fresh buffer is returned. The
 * returned buffer is always read-locked. If the returned buffer is not the
 * input buffer, the lock on the input buffer is released and the input buffer
 * is freed (its refcount is decremented).
 */
Jan Schmidt's avatar