Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@barcharcraz
Created April 12, 2018 16:07
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save barcharcraz/bdb1dd71f9d8a4368abc6ec7f23ab2ac to your computer and use it in GitHub Desktop.
Save barcharcraz/bdb1dd71f9d8a4368abc6ec7f23ab2ac to your computer and use it in GitHub Desktop.
1,4c1,18
< #ifndef MY_ABC_HERE
< #define MY_ABC_HERE
< #endif
<
---
> /*
> * Copyright (C) 2007 Oracle. All rights reserved.
> *
> * This program is free software; you can redistribute it and/or
> * modify it under the terms of the GNU General Public
> * License v2 as published by the Free Software Foundation.
> *
> * This program is distributed in the hope that it will be useful,
> * but WITHOUT ANY WARRANTY; without even the implied warranty of
> * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> * General Public License for more details.
> *
> * You should have received a copy of the GNU General Public
> * License along with this program; if not, write to the
> * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
> * Boston, MA 021110-1307, USA.
> */
>
14d27
< #include <linux/freezer.h>
38a52
> #include "compression.h"
44c58,64
< static struct extent_io_ops btree_extent_io_ops;
---
> #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
> BTRFS_HEADER_FLAG_RELOC |\
> BTRFS_SUPER_FLAG_ERROR |\
> BTRFS_SUPER_FLAG_SEEDING |\
> BTRFS_SUPER_FLAG_METADUMP)
>
> static const struct extent_io_ops btree_extent_io_ops;
61c81,86
< struct end_io_wq {
---
> /*
> * btrfs_end_io_wq structs are used to do processing in task context when an IO
> * is complete. This is used during reads to verify checksums, and it is used
> * by writes to insert metadata for new file extents after IO is complete.
> */
> struct btrfs_end_io_wq {
67c92
< int metadata;
---
> enum btrfs_wq_endio_type metadata;
71a97,120
> static struct kmem_cache *btrfs_end_io_wq_cache;
>
> int __init btrfs_end_io_wq_init(void)
> {
> btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
> sizeof(struct btrfs_end_io_wq),
> 0,
> SLAB_MEM_SPREAD,
> NULL);
> if (!btrfs_end_io_wq_cache)
> return -ENOMEM;
> return 0;
> }
>
> void btrfs_end_io_wq_exit(void)
> {
> kmem_cache_destroy(btrfs_end_io_wq_cache);
> }
>
> /*
> * async submit bios are used to offload expensive checksumming
> * onto the worker threads. They checksum file and metadata bios
> * just before they are sent down the IO stack.
> */
81c130,133
<
---
> /*
> * bio_offset is optional, can be used if the pages in the bio
> * can't tell us where in the file the bio should go
> */
86a139,161
> /*
> * Lockdep class keys for extent_buffer->lock's in this root. For a given
> * eb, the lockdep key is determined by the btrfs_root it belongs to and
> * the level the eb occupies in the tree.
> *
> * Different roots are used for different purposes and may nest inside each
> * other and they require separate keysets. As lockdep keys should be
> * static, assign keysets according to the purpose of the root as indicated
> * by btrfs_root->objectid. This ensures that all special purpose roots
> * have separate keysets.
> *
> * Lock-nesting across peer nodes is always done with the immediate parent
> * node locked thus preventing deadlock. As lockdep doesn't know this, use
> * subclass to avoid triggering lockdep warning in such cases.
> *
> * The key is set by the readpage_end_io_hook after the buffer has passed
> * csum validation but before the pages are unlocked. It is also set by
> * btrfs_init_new_buffer on freshly allocated blocks.
> *
> * We also add a check to make sure the highest level of the tree is the
> * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
> * needs update as well.
> */
93,94c168,169
< u64 id;
< const char *name_stem;
---
> u64 id; /* root objectid */
> const char *name_stem; /* lock name stem */
110,112d184
< #ifdef MY_DEF_HERE
< { .id = BTRFS_BLOCK_GROUP_HINT_TREE_OBJECTID, .name_stem = "block-group-hint" },
< #endif
119a192
> /* initialize lockdep class names */
135a209
> /* find the matching keyset, id 0 is the default entry */
145a220,223
> /*
> * extents on the btree inode are pretty simple, there's one extent
> * that covers the entire device
> */
202c280,285
< static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
---
> /*
> * compute the csum for a btree block, and either verify it or write it
> * into the csum field of the block.
> */
> static int csum_tree_block(struct btrfs_fs_info *fs_info,
> struct extent_buffer *buf,
205c288
< u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
---
> u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
222c305
< return 1;
---
> return err;
230c313
< result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
---
> result = kzalloc(csum_size, GFP_NOFS);
232c315
< return 1;
---
> return -ENOMEM;
246,249c329,331
< printk_ratelimited(KERN_INFO
< "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
< "level %d\n",
< root->fs_info->sb->s_id, buf->start,
---
> btrfs_warn_rl(fs_info,
> "%s checksum verify failed on %llu wanted %X found %X level %d",
> fs_info->sb->s_id, buf->start,
253c335
< return 1;
---
> return -EUCLEAN;
262a345,350
> /*
> * we can't consider a given block up to date unless the transid of the
> * block matches the transid in the parent node's pointer. This is how we
> * detect blocks that either didn't get written at all or got written
> * in the wrong place.
> */
269,270c357
< bool need_lock = (current->journal_info ==
< (void *)BTRFS_SEND_TRANS_STUB);
---
> bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
284c371
< 0, &cached_state);
---
> &cached_state);
290,292c377,380
< printk_ratelimited("parent transid verify failed on %llu wanted %llu "
< "found %llu\n",
< eb->start, parent_transid, btrfs_header_generation(eb));
---
> btrfs_err_rl(eb->fs_info,
> "parent transid verify failed on %llu wanted %llu found %llu",
> eb->start,
> parent_transid, btrfs_header_generation(eb));
294a383,390
> /*
> * Things reading via commit roots that don't have normal protection,
> * like send, can have a really old block in cache that may point at a
> * block that has been free'd and re-allocated. So don't clear uptodate
> * if we find an eb that is under IO (dirty/writeback) because we could
> * end up reading in the stale data and then writing it back out and
> * making everybody very sad.
> */
304a401,404
> /*
> * Return 0 if the superblock checksum type matches the checksum value of that
> * algorithm. Pass the raw disk superblock data.
> */
316a417,421
> /*
> * The super_block structure does not span the whole
> * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
> * is filled with zeros and is included in the checkum.
> */
323,328d427
<
< if (ret && btrfs_super_generation(disk_sb) < 10) {
< printk(KERN_WARNING
< "BTRFS: super block crcs don't match, older mkfs detected\n");
< ret = 0;
< }
339a439,442
> /*
> * helper to read a given tree block, doing retries as required when
> * the checksums don't match and we have alternate mirrors to try.
> */
342c445
< u64 start, u64 parent_transid)
---
> u64 parent_transid)
354,355c457
< ret = read_extent_buffer_pages(io_tree, eb, start,
< WAIT_COMPLETE,
---
> ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
364a467,471
> /*
> * This buffer's crc is fine, but its contents are corrupted, so
> * there is no reason to read the other copies, they won't be
> * any less wrong.
> */
388,391d494
< #ifdef MY_DEF_HERE
< if (unlikely(test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) && !ret))
< repair_eb_io_failure(root, eb, 1);
< #endif
396c499,504
< static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
---
> /*
> * checksum a dirty tree block before IO. This has extra checks to make sure
> * we only fill in the checksum field in the first page of a multi-page block
> */
>
> static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
404a513
>
406,409c515,527
< if (WARN_ON(found_start != start || !PageUptodate(page)))
< return 0;
< csum_tree_block(root, eb, 0);
< return 0;
---
> /*
> * Please do not consolidate these warnings into a single if.
> * It is useful to know what went wrong.
> */
> if (WARN_ON(found_start != start))
> return -EUCLEAN;
> if (WARN_ON(!PageUptodate(page)))
> return -EUCLEAN;
>
> ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
> btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
>
> return csum_tree_block(fs_info, eb, 0);
412c530
< static int check_tree_block_fsid(struct btrfs_root *root,
---
> static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
415c533
< struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
---
> struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
436,545d553
< #ifdef MY_DEF_HERE
<
< static int fix_item_offset_size(struct btrfs_root *root, struct extent_buffer *leaf, int slot)
< {
< u32 offset, size;
< struct btrfs_item *item = btrfs_item_nr(slot);
<
< if (slot >= btrfs_header_nritems(leaf) - 1)
< return -EIO;
<
< if (btrfs_item_offset_nr(leaf, slot + 1) > BTRFS_LEAF_DATA_SIZE(root) ||
< btrfs_item_size_nr(leaf, slot + 1) > BTRFS_LEAF_DATA_SIZE(root))
< return -EIO;
<
< if (slot + 2 < btrfs_header_nritems(leaf) &&
< btrfs_item_offset_nr(leaf, slot + 1) != btrfs_item_end_nr(leaf, slot + 2))
< return -EIO;
<
< offset = btrfs_item_end_nr(leaf, slot + 1);
<
< if (slot != 0) {
< if (offset >= btrfs_item_offset_nr(leaf, slot - 1))
< return -EIO;
< size = btrfs_item_offset_nr(leaf, slot - 1) - offset;
< } else {
< if (offset >= BTRFS_LEAF_DATA_SIZE(root))
< return -EIO;
< size = BTRFS_LEAF_DATA_SIZE(root) - offset;
< }
<
< btrfs_set_item_offset(leaf, item, offset);
< btrfs_set_item_size(leaf, item, size);
<
< return 0;
< }
<
< static void fix_item_key(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, int slot, struct btrfs_key bad_key)
< {
< struct btrfs_extent_item *ei;
< struct btrfs_extent_inline_ref *iref;
< struct btrfs_extent_data_ref *dref;
< struct btrfs_root *root;
< struct btrfs_key key;
< struct btrfs_path *path = NULL;
< struct extent_buffer *buf;
< struct btrfs_file_extent_item *item;
< struct btrfs_disk_key disk_key;
< u64 flags;
< int type;
< int ret;
<
< if (btrfs_header_owner(leaf) != 2)
< return;
<
< if (bad_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY)
< return;
<
< ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
< flags = btrfs_extent_flags(leaf, ei);
<
< if (flags != BTRFS_EXTENT_FLAG_DATA)
< return;
<
< iref = (struct btrfs_extent_inline_ref *)(ei + 1);
< type = btrfs_extent_inline_ref_type(leaf, iref);
<
< if (type != BTRFS_EXTENT_DATA_REF_KEY)
< return;
<
< if (cmpxchg(&fs_info->can_fix_meta_key, CAN_FIX_META_KEY, DOING_FIX_META_KEY) != CAN_FIX_META_KEY)
< return;
<
< dref = (struct btrfs_extent_data_ref *)(&iref->offset);
< key.objectid = btrfs_extent_data_ref_root(leaf, dref);
< key.type = BTRFS_ROOT_ITEM_KEY;
< key.offset = 0;
< root = btrfs_read_fs_root_no_name(fs_info, &key);
< if (IS_ERR(root))
< goto err;
<
< path = btrfs_alloc_path();
< if (!path)
< goto err;
<
< key.objectid = btrfs_extent_data_ref_objectid(leaf, dref);
< key.offset = btrfs_extent_data_ref_offset(leaf, dref);
< key.type = BTRFS_EXTENT_DATA_KEY;
< ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
< if (ret)
< goto err;
<
< buf = path->nodes[0];
< item = btrfs_item_ptr(buf, path->slots[0], struct btrfs_file_extent_item);
< type = btrfs_file_extent_type(buf, item);
< if (type != BTRFS_FILE_EXTENT_REG && type != BTRFS_FILE_EXTENT_PREALLOC)
< goto err;
<
< key.objectid = btrfs_file_extent_disk_bytenr(buf, item);
< key.type = BTRFS_EXTENT_ITEM_KEY;
< key.offset = btrfs_file_extent_disk_num_bytes(buf, item);
< btrfs_cpu_key_to_disk(&disk_key, &key);
< btrfs_set_item_key(leaf, &disk_key, slot);
<
< err:
< fs_info->can_fix_meta_key = CAN_FIX_META_KEY;
< btrfs_free_path(path);
< return;
< }
< #endif
<
553a562,597
> /*
> * Extent buffers from a relocation tree have a owner field that
> * corresponds to the subvolume tree they are based on. So just from an
> * extent buffer alone we can not find out what is the id of the
> * corresponding subvolume tree, so we can not figure out if the extent
> * buffer corresponds to the root of the relocation tree or not. So skip
> * this check for relocation trees.
> */
> if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
> struct btrfs_root *check_root;
>
> key.objectid = btrfs_header_owner(leaf);
> key.type = BTRFS_ROOT_ITEM_KEY;
> key.offset = (u64)-1;
>
> check_root = btrfs_get_fs_root(root->fs_info, &key, false);
> /*
> * The only reason we also check NULL here is that during
> * open_ctree() some roots has not yet been set up.
> */
> if (!IS_ERR_OR_NULL(check_root)) {
> struct extent_buffer *eb;
>
> eb = btrfs_root_node(check_root);
> /* if leaf is the root, then it's fine */
> if (leaf != eb) {
> CORRUPT("non-root leaf's nritems is 0",
> leaf, check_root, 0);
> free_extent_buffer(eb);
> return -EIO;
> }
> free_extent_buffer(eb);
> }
> return 0;
> }
>
556a601
> /* Check the 0 item */
559,568d603
< #ifdef MY_DEF_HERE
< if (fix_item_offset_size(root, leaf, 0)) {
< btrfs_crit(root->fs_info, "invalid leaf item offset size pair, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), 0);
< return -EIO;
< }
< btrfs_warn(root->fs_info, "corrupt leaf fixed, invalid item offset size pair, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), 0);
< set_bit(EXTENT_BUFFER_CORRUPT, &leaf->bflags);
< #else
571d605
< #endif
573a608,614
> /*
> * Check to make sure each items keys are in the correct order and their
> * offsets make sense. We only have to loop through nritems-1 because
> * we check the current slot against the next slot, which verifies the
> * next slot's offset+size makes sense and that the current's slot
> * offset is correct.
> */
578,581c619
< #ifdef MY_DEF_HERE
<
< #else
<
---
> /* Make sure the keys are in the right order */
586d623
< #endif
587a625,629
> /*
> * Make sure the offset and ends are right, remember that the
> * item data starts at the end of the leaf and grows towards the
> * front.
> */
590,599d631
< #ifdef MY_DEF_HERE
< if (fix_item_offset_size(root, leaf, slot + 1)) {
< btrfs_crit(root->fs_info, "leaf slot offset bad, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), slot + 1);
< return -EIO;
< }
< btrfs_warn(root->fs_info, "corrupt leaf fixed, slot offset bad, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), slot + 1);
< set_bit(EXTENT_BUFFER_CORRUPT, &leaf->bflags);
< #else
602,622d633
< #endif
< }
<
< #ifdef MY_DEF_HERE
<
< if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
<
< fix_item_key(root->fs_info, leaf, slot, leaf_key);
< fix_item_key(root->fs_info, leaf, slot + 1, key);
<
< btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
< btrfs_item_key_to_cpu(leaf, &key, slot + 1);
<
< if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
< btrfs_crit(root->fs_info, "leaf bad key order, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), slot);
< return -EIO;
< }
< btrfs_warn(root->fs_info, "corrupt leaf fixed, bad key order, "
< "block=%llu, root=%llu, slot=%d", btrfs_header_bytenr(leaf), btrfs_header_owner(leaf), slot);
< set_bit(EXTENT_BUFFER_CORRUPT, &leaf->bflags);
625,626c636,640
< #else
<
---
> /*
> * Check to make sure that we don't point outside of the leaf,
> * just incase all the items are consistent to eachother, but
> * all point outside of the leaf.
> */
632d645
< #endif
681a695
> struct btrfs_fs_info *fs_info = root->fs_info;
689a704,706
> /* the pending IO might have been the only thing that kept this buffer
> * in memory. Make sure we have a ref for all this other checks
> */
697d713
<
705,707c721,722
< printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
< "%llu %llu\n",
< found_start, eb->start);
---
> btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
> found_start, eb->start);
711,713c726,728
< if (check_tree_block_fsid(root, eb)) {
< printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
< eb->start);
---
> if (check_tree_block_fsid(fs_info, eb)) {
> btrfs_err_rl(fs_info, "bad fsid on block %llu",
> eb->start);
719,720c734,735
< btrfs_info(root->fs_info, "bad tree block level %d",
< (int)btrfs_header_level(eb));
---
> btrfs_err(fs_info, "bad tree block level %d",
> (int)btrfs_header_level(eb));
728,730c743,744
< ret = csum_tree_block(root, eb, 1);
< if (ret) {
< ret = -EIO;
---
> ret = csum_tree_block(fs_info, eb, 1);
> if (ret)
732d745
< }
733a747,751
> /*
> * If this is a leaf block and it is corrupt, set the corrupt bit so
> * that we don't try and read the other copies of this block, just
> * return -EIO.
> */
747c765
< btree_readahead_hook(root, eb, eb->start, ret);
---
> btree_readahead_hook(fs_info, eb, eb->start, ret);
750c768,772
<
---
> /*
> * our io error hook is going to dec the io pages
> * again, we have to make sure it has something
> * to decrement
> */
762d783
< struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
769,770c790,791
< btree_readahead_hook(root, eb, eb->start, -EIO);
< return -EIO;
---
> btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO);
> return -EIO; /* we fixed nothing */
775c796
< struct end_io_wq *end_io_wq = bio->bi_private;
---
> struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
798c819,823
< if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
---
> if (unlikely(end_io_wq->metadata ==
> BTRFS_WQ_ENDIO_DIO_REPAIR)) {
> wq = fs_info->endio_repair_workers;
> func = btrfs_endio_repair_helper;
> } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
802,810d826
< #ifdef MY_DEF_HERE
< if (unlikely(fs_info->can_fix_meta_key == DOING_FIX_META_KEY)) {
< wq = fs_info->endio_meta_fix_workers;
< func = btrfs_endio_meta_fix_helper;
< } else {
< wq = fs_info->endio_meta_workers;
< func = btrfs_endio_meta_helper;
< }
< #else
813d828
< #endif
825c840
< int metadata)
---
> enum btrfs_wq_endio_type metadata)
827,828c842,844
< struct end_io_wq *end_io_wq;
< end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
---
> struct btrfs_end_io_wq *end_io_wq;
>
> end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
876a893,895
> /*
> * atomic_dec_return implies a barrier for waitqueue_active
> */
880a900
> /* If an error occurred we just want to clean up the bio and move on */
945,946c965
< struct bio_vec *bvec = bio->bi_io_vec;
< int bio_index = 0;
---
> struct bio_vec *bvec;
948c967
< int ret = 0;
---
> int i, ret = 0;
950,951c969
< WARN_ON(bio->bi_vcnt <= 0);
< while (bio_index < bio->bi_vcnt) {
---
> bio_for_each_segment_all(bvec, bio, i) {
953c971
< ret = csum_dirty_buffer(root, bvec->bv_page);
---
> ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
956,957d973
< bio_index++;
< bvec++;
958a975
>
967c984,987
<
---
> /*
> * when we're called for a write, we're already in the async
> * submission context. Just jump into btrfs_map_bio
> */
976a997,1000
> /*
> * when we're called for a write, we're already in the async
> * submission context. Just jump into btrfs_map_bio
> */
1002c1026,1029
<
---
> /*
> * called for a read, do the setup so that checksum validation
> * can happen in the async kernel threads
> */
1004c1031
< bio, 1);
---
> bio, BTRFS_WQ_ENDIO_METADATA);
1016c1043,1046
<
---
> /*
> * kthread helpers are used to submit writes so that
> * checksumming can happen in parallel across all CPUs
> */
1036c1066,1069
<
---
> /*
> * we can't safely write a btree page from here,
> * we haven't done the locking hook
> */
1039c1072,1075
<
---
> /*
> * Buffers may be managed in a filesystem specific way.
> * We must have no buffers or drop them.
> */
1046a1083
>
1059c1096
<
---
> /* this is a bit racy, but that's ok */
1125,1126c1162
< int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
< u64 parent_transid)
---
> void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1130d1165
< int ret = 0;
1132,1134c1167,1169
< buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
< if (!buf)
< return 0;
---
> buf = btrfs_find_create_tree_block(root, bytenr);
> if (IS_ERR(buf))
> return;
1136c1171
< buf, 0, WAIT_NONE, btree_get_extent, 0);
---
> buf, WAIT_NONE, btree_get_extent, 0);
1138d1172
< return ret;
1141c1175
< int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
---
> int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1149,1150c1183,1184
< buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
< if (!buf)
---
> buf = btrfs_find_create_tree_block(root, bytenr);
> if (IS_ERR(buf))
1155c1189
< ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
---
> ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1173,1174c1207,1208
< struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
< u64 bytenr, u32 blocksize)
---
> struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
> u64 bytenr)
1176c1210
< return find_extent_buffer(root->fs_info, bytenr);
---
> return find_extent_buffer(fs_info, bytenr);
1180c1214
< u64 bytenr, u32 blocksize)
---
> u64 bytenr)
1182,1183c1216,1218
< if (btrfs_test_is_dummy_root(root))
< return alloc_test_extent_buffer(root->fs_info, bytenr);
---
> if (btrfs_is_testing(root->fs_info))
> return alloc_test_extent_buffer(root->fs_info, bytenr,
> root->nodesize);
1186a1222
>
1200c1236
< u32 blocksize, u64 parent_transid)
---
> u64 parent_transid)
1205,1207c1241,1243
< buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
< if (!buf)
< return NULL;
---
> buf = btrfs_find_create_tree_block(root, bytenr);
> if (IS_ERR(buf))
> return buf;
1209c1245
< ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
---
> ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
1212c1248
< return NULL;
---
> return ERR_PTR(ret);
1218c1254,1255
< void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
---
> void clean_tree_block(struct btrfs_trans_handle *trans,
> struct btrfs_fs_info *fs_info,
1221,1222d1257
< struct btrfs_fs_info *fs_info = root->fs_info;
<
1231c1266
<
---
> /* ugh, clear_extent_buffer_dirty needs to lock the page */
1247c1282
< ret = percpu_counter_init(&writers->counter, 0);
---
> ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1264,1266c1299,1300
< static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
< u32 stripesize, struct btrfs_root *root,
< struct btrfs_fs_info *fs_info,
---
> static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
> struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1268a1303
> bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1273d1307
< root->leafsize = leafsize;
1307,1309d1340
< #ifdef MY_DEF_HERE
< mutex_init(&root->ordered_extent_worker_mutex);
< #endif
1322a1354
> atomic_set(&root->qgroup_meta_rsv, 0);
1324,1325d1355
< #ifdef MY_DEF_HERE
< #else
1327d1356
< #endif
1329c1358
< if (fs_info)
---
> if (!dummy)
1336,1337c1365
< memset(&root->root_kobj, 0, sizeof(root->root_kobj));
< if (fs_info)
---
> if (!dummy)
1341d1368
< init_completion(&root->kobj_unregister);
1348c1375,1376
< static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
---
> static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
> gfp_t flags)
1350c1378
< struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
---
> struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1357,1358c1385,1387
<
< struct btrfs_root *btrfs_alloc_dummy_root(void)
---
> /* Should only be used by the testing infrastructure */
> struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
> u32 sectorsize, u32 nodesize)
1362c1391,1394
< root = btrfs_alloc_root(NULL);
---
> if (!fs_info)
> return ERR_PTR(-EINVAL);
>
> root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1365,1366c1397,1399
< __setup_root(4096, 4096, 4096, 4096, root, NULL, 1);
< set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
---
> /* We don't use the stripesize in selftest, set it as sectorsize */
> __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
> BTRFS_ROOT_TREE_OBJECTID);
1384c1417
< root = btrfs_alloc_root(fs_info);
---
> root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1388,1390c1421,1422
< __setup_root(tree_root->nodesize, tree_root->leafsize,
< tree_root->sectorsize, tree_root->stripesize,
< root, fs_info, objectid);
---
> __setup_root(tree_root->nodesize, tree_root->sectorsize,
> tree_root->stripesize, root, fs_info, objectid);
1395,1396c1427
< leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
< 0, objectid, NULL, 0, 0, 0);
---
> leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1462c1493
< root = btrfs_alloc_root(fs_info);
---
> root = btrfs_alloc_root(fs_info, GFP_NOFS);
1466,1468c1497,1499
< __setup_root(tree_root->nodesize, tree_root->leafsize,
< tree_root->sectorsize, tree_root->stripesize,
< root, fs_info, BTRFS_TREE_LOG_OBJECTID);
---
> __setup_root(tree_root->nodesize, tree_root->sectorsize,
> tree_root->stripesize, root, fs_info,
> BTRFS_TREE_LOG_OBJECTID);
1474,1476c1505,1515
< leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
< BTRFS_TREE_LOG_OBJECTID, NULL,
< 0, 0, 0);
---
> /*
> * DON'T set REF_COWS for log trees
> *
> * log trees do not get reference counted because they go away
> * before a real commit is actually done. They do store pointers
> * to file data extents, and those reference counts still get
> * updated (along with back refs to the log tree).
> */
>
> leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
> NULL, 0, 0, 0);
1526c1565
< btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
---
> btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
1534,1535d1572
< #ifdef MY_DEF_HERE
< #else
1537d1573
< #endif
1549d1584
< u32 blocksize;
1556c1591
< root = btrfs_alloc_root(fs_info);
---
> root = btrfs_alloc_root(fs_info, GFP_NOFS);
1562,1564c1597,1598
< __setup_root(tree_root->nodesize, tree_root->leafsize,
< tree_root->sectorsize, tree_root->stripesize,
< root, fs_info, key->objectid);
---
> __setup_root(tree_root->nodesize, tree_root->sectorsize,
> tree_root->stripesize, root, fs_info, key->objectid);
1575d1608
< blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1577,1579c1610,1612
< blocksize, generation);
< if (!root->node) {
< ret = -ENOMEM;
---
> generation);
> if (IS_ERR(root->node)) {
> ret = PTR_ERR(root->node);
1583c1616,1617
< goto read_fail;
---
> free_extent_buffer(root->node);
> goto find_fail;
1590,1591d1623
< read_fail:
< free_extent_buffer(root->node);
1637,1638c1669,1670
< spin_lock_init(&root->cache_lock);
< init_waitqueue_head(&root->cache_wait);
---
> spin_lock_init(&root->ino_cache_lock);
> init_waitqueue_head(&root->ino_cache_wait);
1642,1643c1674,1682
< goto free_writers;
< return 0;
---
> goto fail;
>
> mutex_lock(&root->objectid_mutex);
> ret = btrfs_find_highest_objectid(root,
> &root->highest_objectid);
> if (ret) {
> mutex_unlock(&root->objectid_mutex);
> goto fail;
> }
1645,1646c1684,1688
< free_writers:
< btrfs_free_subvolume_writers(root->subv_writers);
---
> ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
>
> mutex_unlock(&root->objectid_mutex);
>
> return 0;
1648,1649c1690
< kfree(root->free_ino_ctl);
< kfree(root->free_ino_pinned);
---
> /* the caller is responsible to call free_fs_root */
1653,1654c1694,1695
< static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
< u64 root_id)
---
> struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
> u64 root_id)
1670c1711
< ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
---
> ret = radix_tree_preload(GFP_NOFS);
1690a1732,1733
> struct btrfs_path *path;
> struct btrfs_key key;
1712,1717d1754
< #ifdef MY_DEF_HERE
< if (location->objectid == BTRFS_BLOCK_GROUP_HINT_TREE_OBJECTID)
< return fs_info->block_group_hint_root ? fs_info->block_group_hint_root :
< ERR_PTR(-ENOENT);
< #endif
<
1739,1740c1776,1786
< ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
< location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
---
> path = btrfs_alloc_path();
> if (!path) {
> ret = -ENOMEM;
> goto fail;
> }
> key.objectid = BTRFS_ORPHAN_OBJECTID;
> key.type = BTRFS_ORPHAN_ITEM_KEY;
> key.offset = location->objectid;
>
> ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
> btrfs_free_path(path);
1772c1818
< if (bdi && bdi_congested(bdi, bdi_bits)) {
---
> if (bdi_congested(bdi, bdi_bits)) {
1795a1842,1845
> /*
> * called by the kthread helper functions to finally call the bio end_io
> * functions. This is where read checksum verification actually happens
> */
1799c1849
< struct end_io_wq *end_io_wq;
---
> struct btrfs_end_io_wq *end_io_wq;
1802c1852
< end_io_wq = container_of(work, struct end_io_wq, work);
---
> end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1808c1858
< kfree(end_io_wq);
---
> kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1820a1871
> /* Make the cleaner go to sleep early. */
1823a1875,1881
> /*
> * Do not do anything if we might cause open_ctree() to block
> * before we have finished mounting the filesystem.
> */
> if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
> goto sleep;
>
1826a1885,1888
> /*
> * Avoid the problem that we change the status of the fs
> * during the above check and trylock.
> */
1831a1894
> mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
1833c1896,1897
< btrfs_delete_unused_bgs(root->fs_info);
---
> mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
>
1836a1901,1904
> /*
> * The defragger has dealt with the R/O remount and umount,
> * needn't do anything special here.
> */
1837a1906,1915
>
> /*
> * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
> * with relocation (btrfs_relocate_chunk) and relocation
> * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
> * after acquiring fs_info->delete_unused_bgs_mutex. So we
> * can't hold, nor need to, fs_info->cleaner_mutex when deleting
> * unused block groups.
> */
> btrfs_delete_unused_bgs(root->fs_info);
1839c1917
< if (!try_to_freeze() && !again) {
---
> if (!again) {
1846a1925,1935
> /*
> * Transaction kthread is stopped before us and wakes us up.
> * However we might have started a new transaction and COWed some
> * tree blocks when deleting unused block groups for example. So
> * make sure we commit the transaction we started to have a clean
> * shutdown when evicting the btree inode - if it has dirty pages
> * when we do the final iput() on it, eviction will trigger a
> * writeback for it which will fail with null pointer dereferences
> * since work queues and other resources were already released and
> * destroyed by the time the iput/eviction/writeback is made.
> */
1893,1898d1981
< #ifdef MY_DEF_HERE
<
< if (root->fs_info->commit_interval <= 5)
< delay = HZ * 1;
< else
< #endif
1904a1988
> /* If the file system is aborted, this will always fail. */
1923,1930c2007,2012
< if (!try_to_freeze()) {
< set_current_state(TASK_INTERRUPTIBLE);
< if (!kthread_should_stop() &&
< (!btrfs_transaction_blocked(root->fs_info) ||
< cannot_commit))
< schedule_timeout(delay);
< __set_current_state(TASK_RUNNING);
< }
---
> set_current_state(TASK_INTERRUPTIBLE);
> if (!kthread_should_stop() &&
> (!btrfs_transaction_blocked(root->fs_info) ||
> cannot_commit))
> schedule_timeout(delay);
> __set_current_state(TASK_RUNNING);
1934a2017,2025
> /*
> * this will find the highest generation in the array of
> * root backups. The index of the highest array is returned,
> * or -1 if we can't find anything.
> *
> * We check to make sure the array is valid by comparing the
> * generation of the latest root in the array with the generation
> * in the super block. If they don't match we pitch it.
> */
1948a2040
> /* check to see if we actually wrapped around */
1957a2050,2055
>
> /*
> * find the oldest backup so we know where to store new entries
> * in the backup array. This will set the backup_root_index
> * field in the fs_info struct
> */
1964c2062
<
---
> /* if there was garbage in there, just move along */
1971a2070,2074
> /*
> * copy all the root pointers into the super backup array.
> * this will bump the backup pointer by one when it is
> * done
> */
1981a2085,2088
> /*
> * just overwrite the last backup if we're at the same generation
> * this happens only at umount
> */
1988a2096,2099
> /*
> * make sure all of our padding and empty slots get zero filled
> * regardless of which ones we use today
> */
2011a2123,2126
> /*
> * we might commit during log recovery, which happens before we set
> * the fs_root. Make sure it is valid before we fill it in.
> */
2039a2155,2158
> /*
> * if we don't copy this out to the super_copy, it won't get remembered
> * for the next commit
> */
2044a2164,2171
> /*
> * this copies info out of the root backup array and back into
> * the in-memory super block. It is meant to help iterate through
> * the array, so you send it the number of backups you've already
> * tried and the last backup index you used.
> *
> * this returns -1 when it has tried all the backups
> */
2062c2189
<
---
> /* we've tried all the backups, all done */
2065c2192
<
---
> /* jump to the next oldest backup */
2079a2207,2210
> /*
> * fixme: the total bytes and num_devices need to match or we should
> * need a fsck
> */
2084a2216
> /* helper to cleanup workers */
2092,2094d2223
< #ifdef MY_DEF_HERE
< btrfs_destroy_workqueue(fs_info->endio_meta_fix_workers);
< #endif
2095a2225
> btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2104,2106d2233
< #ifdef MY_DEF_HERE
< btrfs_destroy_workqueue(fs_info->reada_path_workers);
< #endif
2121a2249
> /* helper to cleanup tree roots */
2131,2133d2258
< #ifdef MY_DEF_HERE
< free_root_extent_buffers(info->block_group_hint_root);
< #endif
2175a2301,2583
> static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
> {
> mutex_init(&fs_info->scrub_lock);
> atomic_set(&fs_info->scrubs_running, 0);
> atomic_set(&fs_info->scrub_pause_req, 0);
> atomic_set(&fs_info->scrubs_paused, 0);
> atomic_set(&fs_info->scrub_cancel_req, 0);
> init_waitqueue_head(&fs_info->scrub_pause_wait);
> fs_info->scrub_workers_refcnt = 0;
> }
>
> static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
> {
> spin_lock_init(&fs_info->balance_lock);
> mutex_init(&fs_info->balance_mutex);
> atomic_set(&fs_info->balance_running, 0);
> atomic_set(&fs_info->balance_pause_req, 0);
> atomic_set(&fs_info->balance_cancel_req, 0);
> fs_info->balance_ctl = NULL;
> init_waitqueue_head(&fs_info->balance_wait_q);
> }
>
> static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
> struct btrfs_root *tree_root)
> {
> fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
> set_nlink(fs_info->btree_inode, 1);
> /*
> * we set the i_size on the btree inode to the max possible int.
> * the real end of the address space is determined by all of
> * the devices in the system
> */
> fs_info->btree_inode->i_size = OFFSET_MAX;
> fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
> fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
>
> RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
> extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
> fs_info->btree_inode->i_mapping);
> BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
> extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
>
> BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
>
> BTRFS_I(fs_info->btree_inode)->root = tree_root;
> memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
> sizeof(struct btrfs_key));
> set_bit(BTRFS_INODE_DUMMY,
> &BTRFS_I(fs_info->btree_inode)->runtime_flags);
> btrfs_insert_inode_hash(fs_info->btree_inode);
> }
>
> static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
> {
> fs_info->dev_replace.lock_owner = 0;
> atomic_set(&fs_info->dev_replace.nesting_level, 0);
> mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
> rwlock_init(&fs_info->dev_replace.lock);
> atomic_set(&fs_info->dev_replace.read_locks, 0);
> atomic_set(&fs_info->dev_replace.blocking_readers, 0);
> init_waitqueue_head(&fs_info->replace_wait);
> init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
> }
>
> static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
> {
> spin_lock_init(&fs_info->qgroup_lock);
> mutex_init(&fs_info->qgroup_ioctl_lock);
> fs_info->qgroup_tree = RB_ROOT;
> fs_info->qgroup_op_tree = RB_ROOT;
> INIT_LIST_HEAD(&fs_info->dirty_qgroups);
> fs_info->qgroup_seq = 1;
> fs_info->qgroup_ulist = NULL;
> fs_info->qgroup_rescan_running = false;
> mutex_init(&fs_info->qgroup_rescan_lock);
> }
>
> static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
> struct btrfs_fs_devices *fs_devices)
> {
> int max_active = fs_info->thread_pool_size;
> unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
>
> fs_info->workers =
> btrfs_alloc_workqueue(fs_info, "worker",
> flags | WQ_HIGHPRI, max_active, 16);
>
> fs_info->delalloc_workers =
> btrfs_alloc_workqueue(fs_info, "delalloc",
> flags, max_active, 2);
>
> fs_info->flush_workers =
> btrfs_alloc_workqueue(fs_info, "flush_delalloc",
> flags, max_active, 0);
>
> fs_info->caching_workers =
> btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
>
> /*
> * a higher idle thresh on the submit workers makes it much more
> * likely that bios will be send down in a sane order to the
> * devices
> */
> fs_info->submit_workers =
> btrfs_alloc_workqueue(fs_info, "submit", flags,
> min_t(u64, fs_devices->num_devices,
> max_active), 64);
>
> fs_info->fixup_workers =
> btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
>
> /*
> * endios are largely parallel and should have a very
> * low idle thresh
> */
> fs_info->endio_workers =
> btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
> fs_info->endio_meta_workers =
> btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
> max_active, 4);
> fs_info->endio_meta_write_workers =
> btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
> max_active, 2);
> fs_info->endio_raid56_workers =
> btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
> max_active, 4);
> fs_info->endio_repair_workers =
> btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
> fs_info->rmw_workers =
> btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
> fs_info->endio_write_workers =
> btrfs_alloc_workqueue(fs_info, "endio-write", flags,
> max_active, 2);
> fs_info->endio_freespace_worker =
> btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
> max_active, 0);
> fs_info->delayed_workers =
> btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
> max_active, 0);
> fs_info->readahead_workers =
> btrfs_alloc_workqueue(fs_info, "readahead", flags,
> max_active, 2);
> fs_info->qgroup_rescan_workers =
> btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
> fs_info->extent_workers =
> btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
> min_t(u64, fs_devices->num_devices,
> max_active), 8);
>
> if (!(fs_info->workers && fs_info->delalloc_workers &&
> fs_info->submit_workers && fs_info->flush_workers &&
> fs_info->endio_workers && fs_info->endio_meta_workers &&
> fs_info->endio_meta_write_workers &&
> fs_info->endio_repair_workers &&
> fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
> fs_info->endio_freespace_worker && fs_info->rmw_workers &&
> fs_info->caching_workers && fs_info->readahead_workers &&
> fs_info->fixup_workers && fs_info->delayed_workers &&
> fs_info->extent_workers &&
> fs_info->qgroup_rescan_workers)) {
> return -ENOMEM;
> }
>
> return 0;
> }
>
> static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
> struct btrfs_fs_devices *fs_devices)
> {
> int ret;
> struct btrfs_root *tree_root = fs_info->tree_root;
> struct btrfs_root *log_tree_root;
> struct btrfs_super_block *disk_super = fs_info->super_copy;
> u64 bytenr = btrfs_super_log_root(disk_super);
>
> if (fs_devices->rw_devices == 0) {
> btrfs_warn(fs_info, "log replay required on RO media");
> return -EIO;
> }
>
> log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
> if (!log_tree_root)
> return -ENOMEM;
>
> __setup_root(tree_root->nodesize, tree_root->sectorsize,
> tree_root->stripesize, log_tree_root, fs_info,
> BTRFS_TREE_LOG_OBJECTID);
>
> log_tree_root->node = read_tree_block(tree_root, bytenr,
> fs_info->generation + 1);
> if (IS_ERR(log_tree_root->node)) {
> btrfs_warn(fs_info, "failed to read log tree");
> ret = PTR_ERR(log_tree_root->node);
> kfree(log_tree_root);
> return ret;
> } else if (!extent_buffer_uptodate(log_tree_root->node)) {
> btrfs_err(fs_info, "failed to read log tree");
> free_extent_buffer(log_tree_root->node);
> kfree(log_tree_root);
> return -EIO;
> }
> /* returns with log_tree_root freed on success */
> ret = btrfs_recover_log_trees(log_tree_root);
> if (ret) {
> btrfs_handle_fs_error(tree_root->fs_info, ret,
> "Failed to recover log tree");
> free_extent_buffer(log_tree_root->node);
> kfree(log_tree_root);
> return ret;
> }
>
> if (fs_info->sb->s_flags & MS_RDONLY) {
> ret = btrfs_commit_super(tree_root);
> if (ret)
> return ret;
> }
>
> return 0;
> }
>
> static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
> struct btrfs_root *tree_root)
> {
> struct btrfs_root *root;
> struct btrfs_key location;
> int ret;
>
> location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
> location.type = BTRFS_ROOT_ITEM_KEY;
> location.offset = 0;
>
> root = btrfs_read_tree_root(tree_root, &location);
> if (IS_ERR(root))
> return PTR_ERR(root);
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> fs_info->extent_root = root;
>
> location.objectid = BTRFS_DEV_TREE_OBJECTID;
> root = btrfs_read_tree_root(tree_root, &location);
> if (IS_ERR(root))
> return PTR_ERR(root);
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> fs_info->dev_root = root;
> btrfs_init_devices_late(fs_info);
>
> location.objectid = BTRFS_CSUM_TREE_OBJECTID;
> root = btrfs_read_tree_root(tree_root, &location);
> if (IS_ERR(root))
> return PTR_ERR(root);
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> fs_info->csum_root = root;
>
> location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
> root = btrfs_read_tree_root(tree_root, &location);
> if (!IS_ERR(root)) {
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
> fs_info->quota_root = root;
> }
>
> location.objectid = BTRFS_UUID_TREE_OBJECTID;
> root = btrfs_read_tree_root(tree_root, &location);
> if (IS_ERR(root)) {
> ret = PTR_ERR(root);
> if (ret != -ENOENT)
> return ret;
> } else {
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> fs_info->uuid_root = root;
> }
>
> if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
> location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
> root = btrfs_read_tree_root(tree_root, &location);
> if (IS_ERR(root))
> return PTR_ERR(root);
> set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
> fs_info->free_space_root = root;
> }
>
> return 0;
> }
>
2182,2183d2589
< u32 leafsize;
< u32 blocksize;
2192,2193d2597
< struct btrfs_root *extent_root;
< struct btrfs_root *csum_root;
2195,2203d2598
< struct btrfs_root *dev_root;
< struct btrfs_root *quota_root;
< struct btrfs_root *uuid_root;
< struct btrfs_root *log_tree_root;
< struct btrfs_root *free_space_tree_root;
< #ifdef MY_DEF_HERE
< struct btrfs_root *block_group_hint_root;
< #endif
<
2209,2211d2603
< int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
< bool create_uuid_tree;
< bool check_uuid_tree;
2214,2215c2606,2607
< tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
< chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
---
> tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
> chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2233c2625
< ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
---
> ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2241c2633
< ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
---
> ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2247,2250c2639
< #ifdef MY_DEF_HERE
< #else
< ret = percpu_counter_init(&fs_info->bio_counter, 0);
<
---
> ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2255d2643
< #endif
2260,2262d2647
< #ifdef MY_DEF_HERE
< goto fail_delalloc_bytes;
< #else
2264d2648
< #endif
2288a2673
> mutex_init(&fs_info->delete_unused_bgs_mutex);
2290a2676
> mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2292d2677
< init_rwsem(&fs_info->delayed_iput_sem);
2294d2678
< init_completion(&fs_info->kobj_unregister);
2300,2304d2683
< #ifdef MY_DEF_HERE
< atomic_set(&fs_info->nr_extent_maps, 0);
< INIT_LIST_HEAD(&fs_info->extent_map_inode_list);
< spin_lock_init(&fs_info->extent_map_inode_list_lock);
< #endif
2319a2699
> atomic_set(&fs_info->reada_works_cnt, 0);
2320a2701
> fs_info->fs_frozen = 0;
2322c2703
< fs_info->max_inline = 8192 * 1024;
---
> fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2328,2329c2709,2710
< fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
<
---
> fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
> /* readahead state */
2339c2720
< GFP_NOFS);
---
> GFP_KERNEL);
2346,2356c2727
< mutex_init(&fs_info->scrub_lock);
< atomic_set(&fs_info->scrubs_running, 0);
< atomic_set(&fs_info->scrub_pause_req, 0);
< atomic_set(&fs_info->scrubs_paused, 0);
< atomic_set(&fs_info->scrub_cancel_req, 0);
< #ifdef MY_DEF_HERE
< #else
< init_waitqueue_head(&fs_info->replace_wait);
< #endif
< init_waitqueue_head(&fs_info->scrub_pause_wait);
< fs_info->scrub_workers_refcnt = 0;
---
> btrfs_init_scrub(fs_info);
2360,2367c2731
<
< spin_lock_init(&fs_info->balance_lock);
< mutex_init(&fs_info->balance_mutex);
< atomic_set(&fs_info->balance_running, 0);
< atomic_set(&fs_info->balance_pause_req, 0);
< atomic_set(&fs_info->balance_cancel_req, 0);
< fs_info->balance_ctl = NULL;
< init_waitqueue_head(&fs_info->balance_wait_q);
---
> btrfs_init_balance(fs_info);
2374,2394c2738
< fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
< set_nlink(fs_info->btree_inode, 1);
<
< fs_info->btree_inode->i_size = OFFSET_MAX;
< fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
< fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
<
< RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
< extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
< fs_info->btree_inode->i_mapping);
< BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
< extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
<
< BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
<
< BTRFS_I(fs_info->btree_inode)->root = tree_root;
< memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
< sizeof(struct btrfs_key));
< set_bit(BTRFS_INODE_DUMMY,
< &BTRFS_I(fs_info->btree_inode)->runtime_flags);
< btrfs_insert_inode_hash(fs_info->btree_inode);
---
> btrfs_init_btree_inode(fs_info, tree_root);
2405c2749
< fs_info->do_barriers = 1;
---
> set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2408d2751
< mutex_init(&fs_info->ordered_extent_flush_mutex);
2419,2423d2761
< fs_info->dev_replace.lock_owner = 0;
< atomic_set(&fs_info->dev_replace.nesting_level, 0);
< mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
< mutex_init(&fs_info->dev_replace.lock_management_lock);
< mutex_init(&fs_info->dev_replace.lock);
2425,2446c2763,2764
< #ifdef MY_DEF_HERE
< fs_info->metadata_ratio = 50;
< #endif
< #ifdef MY_DEF_HERE
< fs_info->ordered_extent_nr = 0;
< fs_info->delalloc_inodes_nr = 0;
< if (totalram_pages > ((2ULL*1024*1024*1024)/PAGE_SIZE))
< fs_info->flushoncommit_threshold = 0;
< else
< fs_info->flushoncommit_threshold = 1000;
< #endif
<
< spin_lock_init(&fs_info->qgroup_lock);
< mutex_init(&fs_info->qgroup_ioctl_lock);
< fs_info->qgroup_tree = RB_ROOT;
< fs_info->qgroup_op_tree = RB_ROOT;
< INIT_LIST_HEAD(&fs_info->dirty_qgroups);
< fs_info->qgroup_seq = 1;
< fs_info->quota_enabled = 0;
< fs_info->pending_quota_state = 0;
< fs_info->qgroup_ulist = NULL;
< mutex_init(&fs_info->qgroup_rescan_lock);
---
> btrfs_init_dev_replace_locks(fs_info);
> btrfs_init_qgroup(fs_info);
2464c2782
< __setup_root(4096, 4096, 4096, 4096, tree_root,
---
> __setup_root(4096, 4096, 4096, tree_root,
2468a2787,2789
> /*
> * Read super block and check the signature bytes only
> */
2470,2471c2791,2792
< if (!bh) {
< err = -EINVAL;
---
> if (IS_ERR(bh)) {
> err = PTR_ERR(bh);
2474a2796,2799
> /*
> * We want to check superblock checksum, the type is stored inside.
> * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
> */
2476c2801
< printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
---
> btrfs_err(fs_info, "superblock checksum mismatch");
2481a2807,2811
> /*
> * super_copy is zeroed at allocation time and we never touch the
> * following bytes up to INFO_SIZE, the checksum is calculated from
> * the whole block of INFO_SIZE
> */
2491c2821
< printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
---
> btrfs_err(fs_info, "superblock contains fatal errors");
2499a2830
> /* check FS state, whether FS is broken. */
2502a2834,2837
> /*
> * run through our array of backup supers and setup
> * our ring pointer to the oldest one
> */
2506,2508c2841,2844
< #ifdef MY_DEF_HERE
< fs_info->compress_type = BTRFS_COMPRESS_DEFAULT;
< #else
---
> /*
> * In the long term, we'll store the compression type in the super
> * block, and it'll be used for per file compression control.
> */
2510d2845
< #endif
2512c2847
< ret = btrfs_parse_options(tree_root, options);
---
> ret = btrfs_parse_options(tree_root, options, sb->s_flags);
2521,2540c2856,2858
< printk(KERN_ERR "BTRFS: couldn't mount because of "
< "unsupported optional features (%Lx).\n",
< features);
< err = -EINVAL;
< goto fail_alloc;
< }
<
< if (btrfs_super_leafsize(disk_super) !=
< btrfs_super_nodesize(disk_super)) {
< printk(KERN_ERR "BTRFS: couldn't mount because metadata "
< "blocksizes don't match. node %d leaf %d\n",
< btrfs_super_nodesize(disk_super),
< btrfs_super_leafsize(disk_super));
< err = -EINVAL;
< goto fail_alloc;
< }
< if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
< printk(KERN_ERR "BTRFS: couldn't mount because metadata "
< "blocksize (%d) was too large\n",
< btrfs_super_leafsize(disk_super));
---
> btrfs_err(fs_info,
> "cannot mount because of unsupported optional features (%llx)",
> features);
2551c2869
< printk(KERN_ERR "BTRFS: has skinny extents\n");
---
> btrfs_info(fs_info, "has skinny extents");
2553c2871,2875
< if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
---
> /*
> * flag our filesystem as having big metadata blocks if
> * they are bigger than the page size
> */
> if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
2555c2877,2878
< printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
---
> btrfs_info(fs_info,
> "flagging fs with big metadata feature");
2560d2882
< leafsize = btrfs_super_leafsize(disk_super);
2562,2563c2884,2885
< stripesize = btrfs_super_stripesize(disk_super);
< fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
---
> stripesize = sectorsize;
> fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2565a2888,2891
> /*
> * mixed block groups end up with duplicate but slightly offset
> * extent buffers for the same range. It leads to corruptions
> */
2567,2570c2893,2896
< (sectorsize != leafsize)) {
< printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
< "are not allowed for mixed block groups on %s\n",
< sb->s_id);
---
> (sectorsize != nodesize)) {
> btrfs_err(fs_info,
> "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
> nodesize, sectorsize);
2573a2900,2903
> /*
> * Needn't use the lock because there is no other task which will
> * update the flag.
> */
2579,2580c2909,2910
< printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
< "unsupported option features (%Lx).\n",
---
> btrfs_err(fs_info,
> "cannot mount read-write because of unsupported optional features (%llx)",
2588,2656c2918,2920
< fs_info->workers =
< btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
< max_active, 16);
<
< fs_info->delalloc_workers =
< btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
<
< fs_info->flush_workers =
< btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
<
< fs_info->caching_workers =
< btrfs_alloc_workqueue("cache", flags, max_active, 0);
<
< fs_info->submit_workers =
< btrfs_alloc_workqueue("submit", flags,
< min_t(u64, fs_devices->num_devices,
< max_active), 64);
<
< fs_info->fixup_workers =
< btrfs_alloc_workqueue("fixup", flags, 1, 0);
<
< fs_info->endio_workers =
< btrfs_alloc_workqueue("endio", flags, max_active, 4);
< fs_info->endio_meta_workers =
< btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
< #ifdef MY_DEF_HERE
< fs_info->endio_meta_fix_workers =
< btrfs_alloc_workqueue("endio-meta-fix", flags, max_active, 4);
< #endif
< fs_info->endio_meta_write_workers =
< btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
< fs_info->endio_raid56_workers =
< btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
< fs_info->rmw_workers =
< btrfs_alloc_workqueue("rmw", flags, max_active, 2);
< fs_info->endio_write_workers =
< #ifdef MY_DEF_HERE
< btrfs_alloc_workqueue("endio-write", flags, min_t(unsigned long, 4, max_active), 2);
< #else
< btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
< #endif
< fs_info->endio_freespace_worker =
< btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
< fs_info->delayed_workers =
< btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
< fs_info->readahead_workers =
< btrfs_alloc_workqueue("readahead", flags, max_active, 2);
< #ifdef MY_DEF_HERE
< fs_info->reada_path_workers =
< btrfs_alloc_workqueue("reada-path", flags, max_active, 2);
< #endif
< fs_info->qgroup_rescan_workers =
< btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
< fs_info->extent_workers =
< btrfs_alloc_workqueue("extent-refs", flags,
< min_t(u64, fs_devices->num_devices,
< max_active), 8);
<
< if (!(fs_info->workers && fs_info->delalloc_workers &&
< fs_info->submit_workers && fs_info->flush_workers &&
< fs_info->endio_workers && fs_info->endio_meta_workers &&
< fs_info->endio_meta_write_workers &&
< fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
< fs_info->endio_freespace_worker && fs_info->rmw_workers &&
< fs_info->caching_workers && fs_info->readahead_workers &&
< fs_info->fixup_workers && fs_info->delayed_workers &&
< fs_info->fixup_workers && fs_info->extent_workers &&
< fs_info->qgroup_rescan_workers)) {
< err = -ENOMEM;
---
> ret = btrfs_init_workqueues(fs_info, fs_devices);
> if (ret) {
> err = ret;
2662c2926
< 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
---
> SZ_4M / PAGE_CACHE_SIZE);
2665d2928
< tree_root->leafsize = leafsize;
2672,2682d2934
< if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
< printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
< goto fail_sb_buffer;
< }
<
< if (sectorsize != PAGE_SIZE) {
< printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
< "found on %s\n", (unsigned long)sectorsize, sb->s_id);
< goto fail_sb_buffer;
< }
<
2687,2688c2939
< printk(KERN_WARNING "BTRFS: failed to read the system "
< "array on %s\n", sb->s_id);
---
> btrfs_err(fs_info, "failed to read the system array: %d", ret);
2692,2693d2942
< blocksize = btrfs_level_size(tree_root,
< btrfs_super_chunk_root_level(disk_super));
2696,2697c2945,2946
< __setup_root(nodesize, leafsize, sectorsize, stripesize,
< chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
---
> __setup_root(nodesize, sectorsize, stripesize, chunk_root,
> fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2701,2705c2950,2956
< blocksize, generation);
< if (!chunk_root->node ||
< !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
< printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
< sb->s_id);
---
> generation);
> if (IS_ERR(chunk_root->node) ||
> !extent_buffer_uptodate(chunk_root->node)) {
> btrfs_err(fs_info, "failed to read chunk root");
> if (!IS_ERR(chunk_root->node))
> free_extent_buffer(chunk_root->node);
> chunk_root->node = NULL;
2716,2717c2967
< printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
< sb->s_id);
---
> btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2721c2971,2975
< btrfs_close_extra_devices(fs_info, fs_devices, 0);
---
> /*
> * keep the device that is marked to be the target device for the
> * dev_replace procedure
> */
> btrfs_close_extra_devices(fs_devices, 0);
2724,2725c2978
< printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
< sb->s_id);
---
> btrfs_err(fs_info, "failed to read devices");
2730,2731d2982
< blocksize = btrfs_level_size(tree_root,
< btrfs_super_root_level(disk_super));
2736,2741c2987,2993
< blocksize, generation);
< if (!tree_root->node ||
< !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
< printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
< sb->s_id);
<
---
> generation);
> if (IS_ERR(tree_root->node) ||
> !extent_buffer_uptodate(tree_root->node)) {
> btrfs_warn(fs_info, "failed to read tree root");
> if (!IS_ERR(tree_root->node))
> free_extent_buffer(tree_root->node);
> tree_root->node = NULL;
2749,2774c3001,3005
< location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
< location.type = BTRFS_ROOT_ITEM_KEY;
< location.offset = 0;
<
< extent_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR(extent_root)) {
< ret = PTR_ERR(extent_root);
< goto recovery_tree_root;
< }
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &extent_root->state);
< fs_info->extent_root = extent_root;
<
< location.objectid = BTRFS_DEV_TREE_OBJECTID;
< dev_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR(dev_root)) {
< ret = PTR_ERR(dev_root);
< goto recovery_tree_root;
< }
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &dev_root->state);
< fs_info->dev_root = dev_root;
< btrfs_init_devices_late(fs_info);
<
< location.objectid = BTRFS_CSUM_TREE_OBJECTID;
< csum_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR(csum_root)) {
< ret = PTR_ERR(csum_root);
---
> mutex_lock(&tree_root->objectid_mutex);
> ret = btrfs_find_highest_objectid(tree_root,
> &tree_root->highest_objectid);
> if (ret) {
> mutex_unlock(&tree_root->objectid_mutex);
2777,2778d3007
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &csum_root->state);
< fs_info->csum_root = csum_root;
2780,2787c3009
< location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
< quota_root = btrfs_read_tree_root(tree_root, &location);
< if (!IS_ERR(quota_root)) {
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &quota_root->state);
< fs_info->quota_enabled = 1;
< fs_info->pending_quota_state = 1;
< fs_info->quota_root = quota_root;
< }
---
> ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2789,2817c3011
< location.objectid = BTRFS_UUID_TREE_OBJECTID;
< uuid_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR(uuid_root)) {
< ret = PTR_ERR(uuid_root);
< if (ret != -ENOENT)
< goto recovery_tree_root;
< create_uuid_tree = true;
< check_uuid_tree = false;
< } else {
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &uuid_root->state);
< fs_info->uuid_root = uuid_root;
< create_uuid_tree = false;
< check_uuid_tree =
< generation != btrfs_super_uuid_tree_generation(disk_super);
< }
<
< #ifdef MY_DEF_HERE
< if (!fs_info->no_block_group_hint) {
< spin_lock_init(&fs_info->block_group_hint_tree_lock);
< location.objectid = BTRFS_BLOCK_GROUP_HINT_TREE_OBJECTID;
< block_group_hint_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR_OR_NULL(block_group_hint_root))
< fs_info->block_group_hint_root = NULL;
< else {
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &block_group_hint_root->state);
< fs_info->block_group_hint_root = block_group_hint_root;
< }
< }
< #endif
---
> mutex_unlock(&tree_root->objectid_mutex);
2819,2828c3013,3015
< if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
< location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
< free_space_tree_root = btrfs_read_tree_root(tree_root, &location);
< if (IS_ERR(free_space_tree_root)) {
< ret = PTR_ERR(free_space_tree_root);
< goto recovery_tree_root;
< }
< set_bit(BTRFS_ROOT_TRACK_DIRTY, &free_space_tree_root->state);
< fs_info->free_space_root = free_space_tree_root;
< }
---
> ret = btrfs_read_roots(fs_info, tree_root);
> if (ret)
> goto recovery_tree_root;
2835c3022
< printk(KERN_WARNING "BTRFS: failed to recover balance\n");
---
> btrfs_err(fs_info, "failed to recover balance: %d", ret);
2841,2842c3028
< printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
< ret);
---
> btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2848c3034
< pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
---
> btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
2852c3038
< btrfs_close_extra_devices(fs_info, fs_devices, 1);
---
> btrfs_close_extra_devices(fs_devices, 1);
2854c3040
< ret = btrfs_sysfs_add_one(fs_info);
---
> ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2856c3042,3043
< pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
---
> btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
> ret);
2859a3047,3059
> ret = btrfs_sysfs_add_device(fs_devices);
> if (ret) {
> btrfs_err(fs_info, "failed to init sysfs device interface: %d",
> ret);
> goto fail_fsdev_sysfs;
> }
>
> ret = btrfs_sysfs_add_mounted(fs_info);
> if (ret) {
> btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
> goto fail_fsdev_sysfs;
> }
>
2862c3062
< printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
---
> btrfs_err(fs_info, "failed to initialize space info: %d", ret);
2866,2881c3066
< #ifdef MY_DEF_HERE
< fs_info->can_fix_meta_key = CAN_FIX_META_KEY;
< #endif
<
< ret = btrfs_read_block_groups(extent_root);
<
< #ifdef MY_DEF_HERE
< btrfs_destroy_workqueue(fs_info->reada_path_workers);
< fs_info->reada_path_workers = NULL;
< #endif
< #ifdef MY_DEF_HERE
< fs_info->can_fix_meta_key = STOP_FIX_META_KEY;
< btrfs_destroy_workqueue(fs_info->endio_meta_fix_workers);
< fs_info->endio_meta_fix_workers = NULL;
< #endif
<
---
> ret = btrfs_read_block_groups(fs_info->extent_root);
2883c3068
< printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
---
> btrfs_err(fs_info, "failed to read block groups: %d", ret);
2891,2892c3076,3079
< printk(KERN_WARNING "BTRFS: "
< "too many missing devices, writeable mount is not allowed\n");
---
> btrfs_warn(fs_info,
> "missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
> fs_info->fs_devices->missing_devices,
> fs_info->num_tolerated_disk_barrier_failures);
2907,2908c3094,3095
< if (!btrfs_test_opt(tree_root, SSD) &&
< !btrfs_test_opt(tree_root, NOSSD) &&
---
> if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
> !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
2910,2911c3097
< printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
< "mode\n");
---
> btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
2915,2916c3101,3105
< if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
< btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
---
> /*
> * Mount does not set all options immediatelly, we can do it now and do
> * not have to wait for transaction commit
> */
> btrfs_apply_pending_changes(fs_info);
2919c3108
< if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
---
> if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
2921c3110
< btrfs_test_opt(tree_root,
---
> btrfs_test_opt(tree_root->fs_info,
2926,2927c3115,3117
< printk(KERN_WARNING "BTRFS: failed to initialize"
< " integrity check module %s\n", sb->s_id);
---
> btrfs_warn(fs_info,
> "failed to initialize integrity check module: %d",
> ret);
2934,2967c3124,3127
< if (btrfs_super_log_root(disk_super) != 0) {
< u64 bytenr = btrfs_super_log_root(disk_super);
<
< if (fs_devices->rw_devices == 0) {
< printk(KERN_WARNING "BTRFS: log replay required "
< "on RO media\n");
< err = -EIO;
< goto fail_qgroup;
< }
< blocksize =
< btrfs_level_size(tree_root,
< btrfs_super_log_root_level(disk_super));
<
< log_tree_root = btrfs_alloc_root(fs_info);
< if (!log_tree_root) {
< err = -ENOMEM;
< goto fail_qgroup;
< }
<
< __setup_root(nodesize, leafsize, sectorsize, stripesize,
< log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
<
< log_tree_root->node = read_tree_block(tree_root, bytenr,
< blocksize,
< generation + 1);
< if (!log_tree_root->node ||
< !extent_buffer_uptodate(log_tree_root->node)) {
< printk(KERN_ERR "BTRFS: failed to read log tree\n");
< free_extent_buffer(log_tree_root->node);
< kfree(log_tree_root);
< goto fail_qgroup;
< }
<
< ret = btrfs_recover_log_trees(log_tree_root);
---
> /* do not make disk changes in broken FS or nologreplay is given */
> if (btrfs_super_log_root(disk_super) != 0 &&
> !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
> ret = btrfs_replay_log(fs_info, fs_devices);
2969,2972c3129
< btrfs_error(tree_root->fs_info, ret,
< "Failed to recover log tree");
< free_extent_buffer(log_tree_root->node);
< kfree(log_tree_root);
---
> err = ret;
2975,2980d3131
<
< if (sb->s_flags & MS_RDONLY) {
< ret = btrfs_commit_super(tree_root);
< if (ret)
< goto fail_qgroup;
< }
2996,2997c3147,3148
< printk(KERN_WARNING
< "BTRFS: failed to recover relocation\n");
---
> btrfs_warn(fs_info, "failed to recover relocation: %d",
> ret);
3016c3167
< if (btrfs_test_opt(tree_root, CLEAR_CACHE) &&
---
> if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3036c3187
< if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) &&
---
> if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
3038c3189
< pr_info("BTRFS: creating free space tree\n");
---
> btrfs_info(fs_info, "creating free space tree");
3041,3042c3192,3193
< pr_warn("BTRFS: failed to create free space tree %d\n",
< ret);
---
> btrfs_warn(fs_info,
> "failed to create free space tree: %d", ret);
3059c3210
< printk(KERN_WARNING "BTRFS: failed to resume balance\n");
---
> btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3066c3217
< pr_warn("BTRFS: failed to resume dev_replace\n");
---
> btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3073,3074c3224,3225
< if (create_uuid_tree) {
< pr_info("BTRFS: creating UUID tree\n");
---
> if (!fs_info->uuid_root) {
> btrfs_info(fs_info, "creating UUID tree");
3077,3078c3228,3229
< pr_warn("BTRFS: failed to create the UUID tree %d\n",
< ret);
---
> btrfs_warn(fs_info,
> "failed to create the UUID tree: %d", ret);
3082,3084c3233,3236
< } else if (check_uuid_tree ||
< btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
< pr_info("BTRFS: checking UUID tree\n");
---
> } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
> fs_info->generation !=
> btrfs_super_uuid_tree_generation(disk_super)) {
> btrfs_info(fs_info, "checking UUID tree");
3087,3088c3239,3240
< pr_warn("BTRFS: failed to check the UUID tree %d\n",
< ret);
---
> btrfs_warn(fs_info,
> "failed to check the UUID tree: %d", ret);
3093c3245
< fs_info->update_uuid_tree_gen = 1;
---
> set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3094a3247
> set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3096c3249,3253
< fs_info->open = 1;
---
> /*
> * backuproot only affect mount behavior, and if open_ctree succeeded,
> * no need to keep the flag
> */
> btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3108a3266,3269
> /*
> * make sure we're done with the btree inode before we stop our
> * kthreads
> */
3112c3273,3276
< btrfs_sysfs_remove_one(fs_info);
---
> btrfs_sysfs_remove_mounted(fs_info);
>
> fail_fsdev_sysfs:
> btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3129,3130d3292
< #ifdef MY_DEF_HERE
< #else
3133d3294
< #endif
3145d3305
<
3149c3309
< if (!btrfs_test_opt(tree_root, RECOVERY))
---
> if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
3153a3314
> /* don't use the log in recovery mode, it won't be valid */
3155a3317
> /* we can't trust the free space cache either */
3173,3174c3335,3336
< printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
< "I/O error on %s\n",
---
> btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
> "lost page write due to IO error on %s",
3176c3338,3340
<
---
> /* note, we dont' set_buffer_write_io_error because we have
> * our own ways of dealing with the IO errors
> */
3183a3348,3378
> int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
> struct buffer_head **bh_ret)
> {
> struct buffer_head *bh;
> struct btrfs_super_block *super;
> u64 bytenr;
>
> bytenr = btrfs_sb_offset(copy_num);
> if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
> return -EINVAL;
>
> bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
> /*
> * If we fail to read from the underlying devices, as of now
> * the best option we have is to mark it EIO.
> */
> if (!bh)
> return -EIO;
>
> super = (struct btrfs_super_block *)bh->b_data;
> if (btrfs_super_bytenr(super) != bytenr ||
> btrfs_super_magic(super) != BTRFS_MAGIC) {
> brelse(bh);
> return -EINVAL;
> }
>
> *bh_ret = bh;
> return 0;
> }
>
>
3191c3386
< u64 bytenr;
---
> int ret = -EINVAL;
3192a3388,3392
> /* we would like to check all the supers, but that would make
> * a btrfs mount succeed after a mkfs from a different FS.
> * So, we need to add a special mount option to scan for
> * later supers, using BTRFS_SUPER_MIRROR_MAX instead
> */
3194,3200c3394,3395
< bytenr = btrfs_sb_offset(i);
< if (bytenr + BTRFS_SUPER_INFO_SIZE >=
< i_size_read(bdev->bd_inode))
< break;
< bh = __bread(bdev, bytenr / 4096,
< BTRFS_SUPER_INFO_SIZE);
< if (!bh)
---
> ret = btrfs_read_dev_one_super(bdev, i, &bh);
> if (ret)
3204,3208d3398
< if (btrfs_super_bytenr(super) != bytenr ||
< btrfs_super_magic(super) != BTRFS_MAGIC) {
< brelse(bh);
< continue;
< }
3217a3408,3411
>
> if (!latest)
> return ERR_PTR(ret);
>
3220a3415,3425
> /*
> * this should be called twice, once with wait == 0 and
> * once with wait == 1. When wait == 0 is done, all the buffer heads
> * we write are pinned.
> *
> * They are released when wait == 1 is done.
> * max_mirrors must be the same for both runs, and it indicates how
> * many supers on this one device should be written.
> *
> * max_mirrors == 0 means to write them all.
> */
3237c3442,3443
< if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
---
> if (bytenr + BTRFS_SUPER_INFO_SIZE >=
> device->commit_total_bytes)
3250a3457
> /* drop our reference */
3252a3460
> /* drop the reference from the wait == 0 run */
3264a3473,3476
> /*
> * one reference for us, and we leave it for the
> * caller
> */
3268,3269c3480,3482
< printk(KERN_ERR "BTRFS: couldn't get super "
< "buffer head for bytenr %Lu\n", bytenr);
---
> btrfs_err(device->dev_root->fs_info,
> "couldn't get super buffer head for bytenr %llu",
> bytenr);
3275a3489
> /* one reference for submit_bh */
3283a3498,3501
> /*
> * we fua the first super. The others we allow
> * to go down lazy.
> */
3293a3512,3515
> /*
> * endio for the write_dev_flush, this will wake anyone waiting
> * for the barrier when it is done
> */
3305a3528,3534
> /*
> * trigger flushes for one the devices. If you pass wait == 0, the flushes are
> * sent down. With wait == 1, it waits for the previous flush.
> *
> * any device where the flush fails with eopnotsupp are flagged as not-barrier
> * capable
> */
3330a3560
> /* drop the reference from the wait == 0 run */
3336a3567,3570
> /*
> * one reference for us, and we leave it for the
> * caller
> */
3353a3588,3591
> /*
> * send an empty flush down to each device in parallel,
> * then wait for them
> */
3361a3600
> /* send down all the barriers */
3377a3617
> /* wait for all the barriers */
3397a3638,3666
> int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
> {
> int raid_type;
> int min_tolerated = INT_MAX;
>
> if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
> (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
> min_tolerated = min(min_tolerated,
> btrfs_raid_array[BTRFS_RAID_SINGLE].
> tolerated_failures);
>
> for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
> if (raid_type == BTRFS_RAID_SINGLE)
> continue;
> if (!(flags & btrfs_raid_group[raid_type]))
> continue;
> min_tolerated = min(min_tolerated,
> btrfs_raid_array[raid_type].
> tolerated_failures);
> }
>
> if (min_tolerated == INT_MAX) {
> pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
> min_tolerated = 0;
> }
>
> return min_tolerated;
> }
>
3407d3675
< int num_types = 4;
3413c3681
< for (i = 0; i < num_types; i++) {
---
> for (i = 0; i < ARRAY_SIZE(types); i++) {
3431,3432c3699
< if (!list_empty(&sinfo->block_groups[c])) {
< u64 flags;
---
> u64 flags;
3434,3457c3701,3713
< btrfs_get_block_group_info(
< &sinfo->block_groups[c], &space);
< if (space.total_bytes == 0 ||
< space.used_bytes == 0)
< continue;
< flags = space.flags;
<
< if (num_tolerated_disk_barrier_failures > 0 &&
< ((flags & (BTRFS_BLOCK_GROUP_DUP |
< BTRFS_BLOCK_GROUP_RAID0)) ||
< ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
< == 0)))
< num_tolerated_disk_barrier_failures = 0;
< else if (num_tolerated_disk_barrier_failures > 1) {
< if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
< BTRFS_BLOCK_GROUP_RAID5 |
< BTRFS_BLOCK_GROUP_RAID10)) {
< num_tolerated_disk_barrier_failures = 1;
< } else if (flags &
< BTRFS_BLOCK_GROUP_RAID6) {
< num_tolerated_disk_barrier_failures = 2;
< }
< }
< }
---
> if (list_empty(&sinfo->block_groups[c]))
> continue;
>
> btrfs_get_block_group_info(&sinfo->block_groups[c],
> &space);
> if (space.total_bytes == 0 || space.used_bytes == 0)
> continue;
> flags = space.flags;
>
> num_tolerated_disk_barrier_failures = min(
> num_tolerated_disk_barrier_failures,
> btrfs_get_num_tolerated_disk_barrier_failures(
> flags));
3477c3733
< do_barriers = !btrfs_test_opt(root, NOBARRIER);
---
> do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
3492c3748
< btrfs_error(root->fs_info, ret,
---
> btrfs_handle_fs_error(root->fs_info, ret,
3509,3510c3765,3768
< btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
< btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
---
> btrfs_set_stack_device_total_bytes(dev_item,
> dev->commit_total_bytes);
> btrfs_set_stack_device_bytes_used(dev_item,
> dev->commit_bytes_used);
3529c3787,3788
< btrfs_error(root->fs_info, -EIO,
---
> /* FUA is masked off if unsupported and can't be the reason */
> btrfs_handle_fs_error(root->fs_info, -EIO,
3547c3806
< btrfs_error(root->fs_info, -EIO,
---
> btrfs_handle_fs_error(root->fs_info, -EIO,
3559a3819
> /* Drop a fs root from the radix tree and free it. */
3571c3831
< if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
---
> if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3572a3833,3839
> if (root->reloc_root) {
> free_extent_buffer(root->reloc_root->node);
> free_extent_buffer(root->reloc_root->commit_root);
> btrfs_put_fs_root(root->reloc_root);
> root->reloc_root = NULL;
> }
> }
3583c3850
< iput(root->cache_inode);
---
> iput(root->ino_cache_inode);
3625c3892
<
---
> /* Avoid to grab roots in dead_roots */
3630c3897
<
---
> /* grab all the search result for later use */
3646a3914
> /* release the uncleaned roots due to error */
3662a3931
> /* wait until ongoing cleanup work done */
3672c3941
< int close_ctree(struct btrfs_root *root)
---
> void close_ctree(struct btrfs_root *root)
3677,3678c3946
< fs_info->closing = 1;
< smp_mb();
---
> set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3679a3948,3951
> /* wait for the qgroup rescan worker to stop */
> btrfs_qgroup_wait_for_completion(fs_info, false);
>
> /* wait for the uuid_scan task to finish */
3681c3953
<
---
> /* avoid complains from lockdep et al., set sem back to initial state */
3683a3956
> /* pause restriper - we want to resume on mount */
3689a3963
> /* wait for any defraggers to finish */
3692a3967
> /* clear out the rbtree of defraggable inodes */
3697a3973,3979
> /*
> * If the cleaner thread is stopped and there are
> * block groups queued for removal, the deletion will be
> * skipped when we quit the cleaner thread.
> */
> btrfs_delete_unused_bgs(root->fs_info);
>
3700c3982
< btrfs_err(root->fs_info, "commit super ret %d", ret);
---
> btrfs_err(fs_info, "commit super ret %d", ret);
3709,3710c3991
< fs_info->closing = 2;
< smp_mb();
---
> set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3712c3993
< btrfs_free_qgroup_config(root->fs_info);
---
> btrfs_free_qgroup_config(fs_info);
3715c3996
< btrfs_info(root->fs_info, "at unmount delalloc count %lld",
---
> btrfs_info(fs_info, "at unmount delalloc count %lld",
3719c4000,4001
< btrfs_sysfs_remove_one(fs_info);
---
> btrfs_sysfs_remove_mounted(fs_info);
> btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3726a4009,4012
> /*
> * we must make sure there is not any read request to
> * submit after we stopping all workers.
> */
3730c4016
< fs_info->open = 0;
---
> clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3736c4022
< if (btrfs_test_opt(root, CHECK_INTEGRITY))
---
> if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
3745,3746d4030
< #ifdef MY_DEF_HERE
< #else
3748d4031
< #endif
3754c4037
< btrfs_free_block_rsv(root, root->orphan_block_rsv);
---
> __btrfs_free_block_rsv(root->orphan_block_rsv);
3767d4049
< return 0;
3787,3791d4068
< int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
< {
< return set_extent_buffer_uptodate(buf);
< }
<
3799c4076,4080
<
---
> /*
> * This is a fast path so only do this check if we have sanity tests
> * enabled. Normal people shouldn't be marking dummy buffers as dirty
> * outside of the sanity tests.
> */
3806,3807c4087
< WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
< "found %llu running %llu\n",
---
> WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
3814d4093
<
3826c4105,4108
<
---
> /*
> * looks as though older kernels can get into trouble with
> * this code, they end up stuck in balance_dirty_pages forever
> */
3841d4122
< return;
3857c4138
< return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
---
> return btree_read_extent_buffer_pages(root, buf, parent_transid);
3863,3864c4144,4283
<
< return 0;
---
> struct btrfs_super_block *sb = fs_info->super_copy;
> u64 nodesize = btrfs_super_nodesize(sb);
> u64 sectorsize = btrfs_super_sectorsize(sb);
> int ret = 0;
>
> if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
> printk(KERN_ERR "BTRFS: no valid FS found\n");
> ret = -EINVAL;
> }
> if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
> printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
> btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
> if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
> printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
> btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
> ret = -EINVAL;
> }
> if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
> printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
> btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
> ret = -EINVAL;
> }
> if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
> printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
> btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
> ret = -EINVAL;
> }
>
> /*
> * Check sectorsize and nodesize first, other check will need it.
> * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
> */
> if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
> sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
> printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
> ret = -EINVAL;
> }
> /* Only PAGE SIZE is supported yet */
> if (sectorsize != PAGE_CACHE_SIZE) {
> printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
> sectorsize, PAGE_CACHE_SIZE);
> ret = -EINVAL;
> }
> if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
> nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
> printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
> ret = -EINVAL;
> }
> if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
> printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
> le32_to_cpu(sb->__unused_leafsize),
> nodesize);
> ret = -EINVAL;
> }
>
> /* Root alignment check */
> if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
> printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
> btrfs_super_root(sb));
> ret = -EINVAL;
> }
> if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
> printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
> btrfs_super_chunk_root(sb));
> ret = -EINVAL;
> }
> if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
> printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
> btrfs_super_log_root(sb));
> ret = -EINVAL;
> }
>
> if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
> printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
> fs_info->fsid, sb->dev_item.fsid);
> ret = -EINVAL;
> }
>
> /*
> * Hint to catch really bogus numbers, bitflips or so, more exact checks are
> * done later
> */
> if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
> btrfs_err(fs_info, "bytes_used is too small %llu",
> btrfs_super_bytes_used(sb));
> ret = -EINVAL;
> }
> if (!is_power_of_2(btrfs_super_stripesize(sb))) {
> btrfs_err(fs_info, "invalid stripesize %u",
> btrfs_super_stripesize(sb));
> ret = -EINVAL;
> }
> if (btrfs_super_num_devices(sb) > (1UL << 31))
> printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
> btrfs_super_num_devices(sb));
> if (btrfs_super_num_devices(sb) == 0) {
> printk(KERN_ERR "BTRFS: number of devices is 0\n");
> ret = -EINVAL;
> }
>
> if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
> printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
> btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
> ret = -EINVAL;
> }
>
> /*
> * Obvious sys_chunk_array corruptions, it must hold at least one key
> * and one chunk
> */
> if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
> printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
> btrfs_super_sys_array_size(sb),
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
> ret = -EINVAL;
> }
> if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
> + sizeof(struct btrfs_chunk)) {
> printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
> btrfs_super_sys_array_size(sb),
> sizeof(struct btrfs_disk_key)
> + sizeof(struct btrfs_chunk));
> ret = -EINVAL;
> }
>
> /*
> * The generation is a global counter, we'll trust it more than the others
> * but it's still possible that it's the one that's wrong.
> */
> if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
> printk(KERN_WARNING
> "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
> btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
> if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
> && btrfs_super_cache_generation(sb) != (u64)-1)
> printk(KERN_WARNING
> "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
> btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
>
> return ret;
3875a4295
> /* cleanup FS via transaction */
3884c4304,4307
<
---
> /*
> * This will just short circuit the ordered completion stuff which will
> * make sure the ordered extent gets properly cleaned up.
> */
3933a4357
> struct btrfs_delayed_ref_node *tmp;
3949,3951c4373,4374
< while ((node = rb_first(&head->ref_root)) != NULL) {
< ref = rb_entry(node, struct btrfs_delayed_ref_node,
< rb_node);
---
> list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
> list) {
3953c4376,4378
< rb_erase(&ref->rb_node, &head->ref_root);
---
> list_del(&ref->list);
> if (!list_empty(&ref->add_list))
> list_del(&ref->add_list);
4050c4475
< clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
---
> clear_extent_bits(dirty_pages, start, end, mark);
4052,4054c4477,4478
< eb = btrfs_find_tree_block(root, start,
< root->leafsize);
< start += root->leafsize;
---
> eb = btrfs_find_tree_block(root->fs_info, start);
> start += root->nodesize;
4086c4510
< clear_extent_dirty(unpin, start, end, GFP_NOFS);
---
> clear_extent_dirty(unpin, start, end);
4102a4527,4593
> static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
> {
> struct inode *inode;
>
> inode = cache->io_ctl.inode;
> if (inode) {
> invalidate_inode_pages2(inode->i_mapping);
> BTRFS_I(inode)->generation = 0;
> cache->io_ctl.inode = NULL;
> iput(inode);
> }
> btrfs_put_block_group(cache);
> }
>
> void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
> struct btrfs_root *root)
> {
> struct btrfs_block_group_cache *cache;
>
> spin_lock(&cur_trans->dirty_bgs_lock);
> while (!list_empty(&cur_trans->dirty_bgs)) {
> cache = list_first_entry(&cur_trans->dirty_bgs,
> struct btrfs_block_group_cache,
> dirty_list);
> if (!cache) {
> btrfs_err(root->fs_info,
> "orphan block group dirty_bgs list");
> spin_unlock(&cur_trans->dirty_bgs_lock);
> return;
> }
>
> if (!list_empty(&cache->io_list)) {
> spin_unlock(&cur_trans->dirty_bgs_lock);
> list_del_init(&cache->io_list);
> btrfs_cleanup_bg_io(cache);
> spin_lock(&cur_trans->dirty_bgs_lock);
> }
>
> list_del_init(&cache->dirty_list);
> spin_lock(&cache->lock);
> cache->disk_cache_state = BTRFS_DC_ERROR;
> spin_unlock(&cache->lock);
>
> spin_unlock(&cur_trans->dirty_bgs_lock);
> btrfs_put_block_group(cache);
> spin_lock(&cur_trans->dirty_bgs_lock);
> }
> spin_unlock(&cur_trans->dirty_bgs_lock);
>
> while (!list_empty(&cur_trans->io_bgs)) {
> cache = list_first_entry(&cur_trans->io_bgs,
> struct btrfs_block_group_cache,
> io_list);
> if (!cache) {
> btrfs_err(root->fs_info,
> "orphan block group on io_bgs list");
> return;
> }
>
> list_del_init(&cache->io_list);
> spin_lock(&cache->lock);
> cache->disk_cache_state = BTRFS_DC_ERROR;
> spin_unlock(&cache->lock);
> btrfs_cleanup_bg_io(cache);
> }
> }
>
4105a4597,4600
> btrfs_cleanup_dirty_bgs(cur_trans, root);
> ASSERT(list_empty(&cur_trans->dirty_bgs));
> ASSERT(list_empty(&cur_trans->io_bgs));
>
4124a4620,4623
> /*
> memset(cur_trans, 0, sizeof(*cur_trans));
> kmem_cache_free(btrfs_transaction_cachep, cur_trans);
> */
4148c4647,4650
<
---
> /*
> * We wait for 0 num_writers since we don't hold a trans
> * handle open currently for this transaction.
> */
4177c4679
< static struct extent_io_ops btree_extent_io_ops = {
---
> static const struct extent_io_ops btree_extent_io_ops = {
4181c4683
<
---
> /* note we're sharing with inode.c for the merge bio hook */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment