* [patch 0/9] btrfs: More error handling patches
@ 2011-08-10 23:20 Jeff Mahoney
2011-08-10 23:20 ` [patch 1/9] btrfs: Add btrfs_panic() Jeff Mahoney
` (8 more replies)
0 siblings, 9 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
Hi all -
The following 8 patches add more error handling to the btrfs code:
- Add btrfs_panic
- Catch locking failures in {set,clear}_extent_bit
- Push up set_extent_bit errors to callers
- Push up lock_extent errors to callers
- Push up clear_extent_bit errors to callers
- Push up unlock_extent errors to callers
- Make pin_down_extent return void
- Push up btrfs_pin_extent errors to callers
- Push up non-looped btrfs_transaction_start errors to callers
-Jeff
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 1/9] btrfs: Add btrfs_panic()
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 2/9] btrfs: Catch locking failures in {set,clear}_extent_bit Jeff Mahoney
` (7 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
As part of the effort to eliminate BUG_ON as an error handling
technique, we need to determine which errors are actual logic errors,
which are on-disk corruption, and which are normal runtime errors
e.g. -ENOMEM.
Annotating these error cases is helpful to understand and report them.
This patch adds a btrfs_panic() routine that will either panic
or BUG depending on the new -ofatal_errors={panic,bug} mount option.
Since there are still so many BUG_ONs, it defaults to BUG for now but I
expect that to change once the error handling effort has made
significant progress.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/ctree.h | 11 +++++++++++
fs/btrfs/super.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 57 insertions(+), 1 deletion(-)
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1363,6 +1363,7 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
+#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 18)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2650,6 +2651,16 @@ do { \
__btrfs_std_error((fs_info), __func__, __LINE__, (errno));\
} while (0)
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int errno, const char *fmt, ...);
+
+#define btrfs_panic(fs_info, errno, fmt, args...) \
+do { \
+ struct btrfs_fs_info *_i = (fs_info); \
+ __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
+ BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
+} while (0)
+
/* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -74,6 +74,9 @@ static const char *btrfs_decode_error(st
case -EROFS:
errstr = "Readonly filesystem";
break;
+ case -EEXIST:
+ errstr = "Object already exists";
+ break;
default:
if (nbuf) {
if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
@@ -143,6 +146,33 @@ void __btrfs_std_error(struct btrfs_fs_i
btrfs_handle_error(fs_info);
}
+/*
+ * __btrfs_panic decodes unexpected, fatal errors from the caller,
+ * issues an alert, and either panics or BUGs, depending on mount options.
+ */
+void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int errno, const char *fmt, ...)
+{
+ struct super_block *sb = fs_info->sb;
+ char nbuf[16];
+ const char *errstr;
+ struct va_format vaf = { .fmt = fmt };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ errstr = btrfs_decode_error(fs_info, errno, nbuf);
+ if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+ panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+ sb->s_id, function, line, &vaf, errstr);
+
+ printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
+ sb->s_id, function, line, &vaf, errstr);
+ va_end(args);
+ /* Caller calls BUG() */
+}
+
static void btrfs_put_super(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
@@ -162,7 +192,7 @@ enum {
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
- Opt_inode_cache, Opt_err,
+ Opt_inode_cache, Opt_fatal_errors, Opt_err,
};
static match_table_t tokens = {
@@ -195,6 +225,7 @@ static match_table_t tokens = {
{Opt_subvolrootid, "subvolrootid=%d"},
{Opt_defrag, "autodefrag"},
{Opt_inode_cache, "inode_cache"},
+ {Opt_fatal_errors, "fatal_errors=%s"},
{Opt_err, NULL},
};
@@ -381,6 +412,18 @@ int btrfs_parse_options(struct btrfs_roo
printk(KERN_INFO "btrfs: enabling auto defrag");
btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
break;
+ case Opt_fatal_errors:
+ if (strcmp(args[0].from, "panic") == 0)
+ btrfs_set_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+ else if (strcmp(args[0].from, "bug") == 0)
+ btrfs_clear_opt(info->mount_opt,
+ PANIC_ON_FATAL_ERROR);
+ else {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
@@ -729,6 +772,8 @@ static int btrfs_show_options(struct seq
seq_puts(seq, ",autodefrag");
if (btrfs_test_opt(root, INODE_MAP_CACHE))
seq_puts(seq, ",inode_cache");
+ if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
+ seq_puts(seq, ",fatal_errors=panic");
return 0;
}
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 2/9] btrfs: Catch locking failures in {set,clear}_extent_bit
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
2011-08-10 23:20 ` [patch 1/9] btrfs: Add btrfs_panic() Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 3/9] btrfs: Push up set_extent_bit errors to callers Jeff Mahoney
` (6 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
The *_state functions can only return 0 or -EEXIST. This patch addresses
the cases where those functions return -EEXIST, representing a locking
failure. It handles them by panicking with an appropriate error message.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/extent_io.c | 42 +++++++++++++++++++++++++++++++-----------
1 file changed, 31 insertions(+), 11 deletions(-)
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -455,6 +455,7 @@ int clear_extent_bit(struct extent_io_tr
struct extent_state **cached_state,
gfp_t mask)
{
+ struct btrfs_fs_info *fs_info;
struct extent_state *state;
struct extent_state *cached;
struct extent_state *prealloc = NULL;
@@ -465,6 +466,8 @@ int clear_extent_bit(struct extent_io_tr
int set = 0;
int clear = 0;
+ fs_info = btrfs_sb(tree->mapping->host->i_sb)->fs_info;
+
if (delete)
bits |= ~EXTENT_CTLBITS;
bits |= EXTENT_FIRST_DELALLOC;
@@ -531,7 +534,10 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
prealloc = NULL;
if (err)
goto out;
@@ -553,7 +559,10 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
if (wake)
wake_up(&state->wq);
@@ -704,6 +713,7 @@ int set_extent_bit(struct extent_io_tree
int bits, int exclusive_bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask)
{
+ struct btrfs_fs_info *fs_info;
struct extent_state *state;
struct extent_state *prealloc = NULL;
struct rb_node *node;
@@ -711,6 +721,8 @@ int set_extent_bit(struct extent_io_tree
u64 last_start;
u64 last_end;
+ fs_info = btrfs_sb(tree->mapping->host->i_sb)->fs_info;
+
bits |= EXTENT_FIRST_DELALLOC;
again:
if (!prealloc && (mask & __GFP_WAIT)) {
@@ -736,8 +748,11 @@ again:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = insert_state(tree, prealloc, start, end, &bits);
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
prealloc = NULL;
- BUG_ON(err == -EEXIST);
goto out;
}
state = rb_entry(node, struct extent_state, rb_node);
@@ -803,7 +818,10 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
prealloc = NULL;
if (err)
goto out;
@@ -840,12 +858,11 @@ hit_next:
*/
err = insert_state(tree, prealloc, start, this_end,
&bits);
- BUG_ON(err == -EEXIST);
- if (err) {
- free_extent_state(prealloc);
- prealloc = NULL;
- goto out;
- }
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
+
cache_state(prealloc, cached_state);
prealloc = NULL;
start = this_end + 1;
@@ -867,7 +884,10 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
+ if (err)
+ btrfs_panic(fs_info, err, "Locking error: "
+ "Extent tree was modified by another "
+ "thread while locked.");
set_state_bits(tree, prealloc, &bits);
cache_state(prealloc, cached_state);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 3/9] btrfs: Push up set_extent_bit errors to callers
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
2011-08-10 23:20 ` [patch 1/9] btrfs: Add btrfs_panic() Jeff Mahoney
2011-08-10 23:20 ` [patch 2/9] btrfs: Catch locking failures in {set,clear}_extent_bit Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-11 0:08 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 4/9] btrfs: Push up lock_extent " Jeff Mahoney
` (5 subsequent siblings)
8 siblings, 1 reply; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
In the locking case, set_extent_bit can return -EEXIST but callers
already handle that.
In the non-locking case, it can't fail. Memory allocation failures are
handled by BUG_ON.
This patch pushes up the BUG_ONs from set_extent_bit to callers, except
where -ENOMEM can't occur (e.g. __GFP_WAIT && __GFP_NOFAIL).
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/extent-tree.c | 37 +++++++++++++++++++++++++------------
fs/btrfs/extent_io.c | 46 +++++++++++++++++++++++++++++++++++-----------
fs/btrfs/file-item.c | 3 ++-
fs/btrfs/inode.c | 25 +++++++++++++++++--------
fs/btrfs/ioctl.c | 5 +++--
fs/btrfs/relocation.c | 21 +++++++++++++--------
6 files changed, 95 insertions(+), 42 deletions(-)
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -192,11 +192,14 @@ block_group_cache_tree_search(struct btr
static int add_excluded_extent(struct btrfs_root *root,
u64 start, u64 num_bytes)
{
+ int ret;
u64 end = start + num_bytes - 1;
- set_extent_bits(&root->fs_info->freed_extents[0],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
- set_extent_bits(&root->fs_info->freed_extents[1],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ ret = set_extent_bits(&root->fs_info->freed_extents[0],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ BUG_ON(ret);
+ ret = set_extent_bits(&root->fs_info->freed_extents[1],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ BUG_ON(ret);
return 0;
}
@@ -4142,8 +4145,9 @@ static int update_block_group(struct btr
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- set_extent_dirty(info->pinned_extents,
- bytenr, bytenr + num_bytes - 1,
+ /* Can't return -ENOMEM */
+ set_extent_dirty(info->pinned_extents, bytenr,
+ bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
}
btrfs_put_block_group(cache);
@@ -4184,6 +4188,7 @@ static int pin_down_extent(struct btrfs_
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ /* __GFP_NOFAIL means it can't return -ENOMEM */
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
@@ -5636,6 +5641,7 @@ struct extent_buffer *btrfs_init_new_buf
int level)
{
struct extent_buffer *buf;
+ int ret;
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!buf)
@@ -5654,14 +5660,21 @@ struct extent_buffer *btrfs_init_new_buf
* EXENT bit to differentiate dirty pages.
*/
if (root->log_transid % 2 == 0)
- set_extent_dirty(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ ret = set_extent_dirty(&root->dirty_log_pages,
+ buf->start,
+ buf->start + buf->len - 1,
+ GFP_NOFS);
else
- set_extent_new(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ ret = set_extent_new(&root->dirty_log_pages,
+ buf->start,
+ buf->start + buf->len - 1,
+ GFP_NOFS);
+ BUG_ON(ret);
} else {
- set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ ret = set_extent_dirty(&trans->transaction->dirty_pages,
+ buf->start, buf->start + buf->len - 1,
+ GFP_NOFS);
+ BUG_ON(ret);
}
trans->blocks_used++;
/* this returns a buffer locked for blocking */
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -706,6 +706,9 @@ static void uncache_state(struct extent_
* part of the range already has the desired bits set. The start of the
* existing range is returned in failed_start in this case.
*
+ * It may also fail with -ENOMEM if memory cannot be obtained for extent_state
+ * structures.
+ *
* [start, end] is inclusive This takes the tree lock.
*/
@@ -727,7 +730,8 @@ int set_extent_bit(struct extent_io_tree
again:
if (!prealloc && (mask & __GFP_WAIT)) {
prealloc = alloc_extent_state(mask);
- BUG_ON(!prealloc);
+ if (!prealloc)
+ return -ENOMEM;
}
spin_lock(&tree->lock);
@@ -746,7 +750,11 @@ again:
node = tree_search(tree, start);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
err = insert_state(tree, prealloc, start, end, &bits);
if (err)
btrfs_panic(fs_info, err, "Locking error: "
@@ -816,7 +824,11 @@ hit_next:
}
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
err = split_state(tree, state, prealloc, start);
if (err)
btrfs_panic(fs_info, err, "Locking error: "
@@ -850,7 +862,10 @@ hit_next:
this_end = last_start - 1;
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
/*
* Avoid to free 'prealloc' if it can be merged with
@@ -882,7 +897,11 @@ hit_next:
}
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
err = split_state(tree, state, prealloc, end + 1);
if (err)
btrfs_panic(fs_info, err, "Locking error: "
@@ -994,6 +1013,7 @@ int lock_extent_bits(struct extent_io_tr
}
WARN_ON(start > end);
}
+ BUG_ON(err);
return err;
}
@@ -1016,6 +1036,7 @@ int try_lock_extent(struct extent_io_tre
EXTENT_LOCKED, 1, 0, NULL, mask);
return 0;
}
+ BUG_ON(err);
return 1;
}
@@ -1763,8 +1784,9 @@ static void end_bio_extent_readpage(stru
}
if (uptodate) {
- set_extent_uptodate(tree, start, end, &cached,
- GFP_ATOMIC);
+ ret = set_extent_uptodate(tree, start, end, &cached,
+ GFP_ATOMIC);
+ BUG_ON(ret);
}
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
@@ -1986,8 +2008,9 @@ static int __extent_read_full_page(struc
memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
+ ret = set_extent_uptodate(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
+ BUG_ON(ret);
unlock_extent_cached(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
break;
@@ -2036,8 +2059,9 @@ static int __extent_read_full_page(struc
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
+ ret = set_extent_uptodate(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
+ BUG_ON(ret);
unlock_extent_cached(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
cur = cur + iosize;
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -212,9 +212,10 @@ static int __btrfs_lookup_bio_sums(struc
sum = 0;
if (BTRFS_I(inode)->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
- set_extent_bits(io_tree, offset,
+ ret = set_extent_bits(io_tree, offset,
offset + bvec->bv_len - 1,
EXTENT_NODATASUM, GFP_NOFS);
+ BUG_ON(ret);
} else {
printk(KERN_INFO "btrfs no csum found "
"for inode %llu start %llu\n",
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1548,6 +1548,7 @@ static void btrfs_writepage_fixup_worker
struct inode *inode;
u64 page_start;
u64 page_end;
+ int ret;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
@@ -1579,7 +1580,9 @@ again:
}
BUG();
- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
+ ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+ &cached_state);
+ BUG_ON(ret);
ClearPageChecked(page);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
@@ -1883,8 +1886,11 @@ static int btrfs_io_failed_hook(struct b
}
failrec->logical = logical;
free_extent_map(em);
- set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
- EXTENT_DIRTY, GFP_NOFS);
+
+ /* Doesn't this ignore locking failures? */
+ ret = set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
+ EXTENT_DIRTY, GFP_NOFS);
+ BUG_ON(ret);
set_state_private(failure_tree, start,
(u64)(unsigned long)failrec);
} else {
@@ -5167,8 +5173,10 @@ again:
kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
- set_extent_uptodate(io_tree, em->start,
- extent_map_end(em) - 1, NULL, GFP_NOFS);
+ ret = set_extent_uptodate(io_tree, em->start,
+ extent_map_end(em) - 1,
+ NULL, GFP_NOFS);
+ BUG_ON(ret);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -6230,9 +6238,10 @@ static ssize_t btrfs_direct_IO(int rw, s
*/
if (writing) {
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
- ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- EXTENT_DELALLOC, 0, NULL, &cached_state,
- GFP_NOFS);
+ ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, EXTENT_DELALLOC, 0, NULL,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_LOCKED | write_bits,
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -938,8 +938,9 @@ again:
}
- btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
- &cached_state);
+ ret = btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
+ &cached_state);
+ BUG_ON(ret);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state,
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2623,11 +2623,11 @@ static int finish_pending_nodes(struct b
return err;
}
-static void mark_block_processed(struct reloc_control *rc,
+static int mark_block_processed(struct reloc_control *rc,
u64 bytenr, u32 blocksize)
{
- set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
- EXTENT_DIRTY, GFP_NOFS);
+ return set_extent_bits(&rc->processed_blocks, bytenr,
+ bytenr + blocksize - 1, EXTENT_DIRTY, GFP_NOFS);
}
static void __mark_block_processed(struct reloc_control *rc,
@@ -2636,8 +2636,10 @@ static void __mark_block_processed(struc
u32 blocksize;
if (node->level == 0 ||
in_block_group(node->bytenr, rc->block_group)) {
+ int ret;
blocksize = btrfs_level_size(rc->extent_root, node->level);
- mark_block_processed(rc, node->bytenr, blocksize);
+ ret = mark_block_processed(rc, node->bytenr, blocksize);
+ BUG_ON(ret);
}
node->processed = 1;
}
@@ -2994,13 +2996,16 @@ static int relocate_file_extent_cluster(
if (nr < cluster->nr &&
page_start + offset == cluster->boundary[nr]) {
- set_extent_bits(&BTRFS_I(inode)->io_tree,
- page_start, page_end,
- EXTENT_BOUNDARY, GFP_NOFS);
+ ret = set_extent_bits(&BTRFS_I(inode)->io_tree,
+ page_start, page_end,
+ EXTENT_BOUNDARY, GFP_NOFS);
+ BUG_ON(ret);
nr++;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
+ ret = btrfs_set_extent_delalloc(inode, page_start,
+ page_end, NULL);
+ BUG_ON(ret);
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 4/9] btrfs: Push up lock_extent errors to callers
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (2 preceding siblings ...)
2011-08-10 23:20 ` [patch 3/9] btrfs: Push up set_extent_bit errors to callers Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 5/9] btrfs: Push up clear_extent_bit " Jeff Mahoney
` (4 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
lock_extent, try_lock_extent, and lock_extent_bits can't currently fail
because errors are caught via BUG_ON.
This patch pushes the error handling up to callers, which currently
only handle them via BUG_ON themselves.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/compression.c | 3 +-
fs/btrfs/disk-io.c | 5 ++-
fs/btrfs/extent_io.c | 20 ++++++++-----
fs/btrfs/file.c | 17 ++++++-----
fs/btrfs/free-space-cache.c | 6 ++--
fs/btrfs/inode.c | 66 ++++++++++++++++++++++++++------------------
fs/btrfs/ioctl.c | 16 ++++++----
fs/btrfs/relocation.c | 18 ++++++++----
8 files changed, 94 insertions(+), 57 deletions(-)
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -496,7 +496,8 @@ static noinline int add_ra_bio_pages(str
* sure they map to this compressed extent on disk.
*/
set_page_extent_mapped(page);
- lock_extent(tree, last_offset, end, GFP_NOFS);
+ ret = lock_extent(tree, last_offset, end, GFP_NOFS);
+ BUG_ON(ret < 0);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE);
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -331,8 +331,9 @@ static int verify_parent_transid(struct
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
- lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
- 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
+ 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
btrfs_header_generation(eb) == parent_transid) {
ret = 0;
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1013,7 +1013,6 @@ int lock_extent_bits(struct extent_io_tr
}
WARN_ON(start > end);
}
- BUG_ON(err);
return err;
}
@@ -1035,8 +1034,8 @@ int try_lock_extent(struct extent_io_tre
clear_extent_bit(tree, start, failed_start - 1,
EXTENT_LOCKED, 1, 0, NULL, mask);
return 0;
- }
- BUG_ON(err);
+ } else if (err < 0)
+ return err;
return 1;
}
@@ -1347,8 +1346,9 @@ again:
BUG_ON(ret);
/* step three, lock the state bits for the whole range */
- lock_extent_bits(tree, delalloc_start, delalloc_end,
- 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(tree, delalloc_start, delalloc_end,
+ 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1977,7 +1977,8 @@ static int __extent_read_full_page(struc
end = page_end;
while (1) {
- lock_extent(tree, start, end, GFP_NOFS);
+ ret = lock_extent(tree, start, end, GFP_NOFS);
+ BUG_ON(ret < 0);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
break;
@@ -2663,12 +2664,14 @@ int extent_invalidatepage(struct extent_
u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
u64 end = start + PAGE_CACHE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
+ int ret;
start += (offset + blocksize - 1) & ~(blocksize - 1);
if (start > end)
return 0;
- lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
wait_on_page_writeback(page);
clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -2878,8 +2881,9 @@ int extent_fiemap(struct inode *inode, s
last_for_get_extent = isize;
}
- lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
em = get_extent_skip_holes(inode, off, last_for_get_extent,
get_extent);
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1104,9 +1104,10 @@ again:
err = 0;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
- lock_extent_bits(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos - 1, 0, &cached_state,
- GFP_NOFS);
+ err = lock_extent_bits(&BTRFS_I(inode)->io_tree,
+ start_pos, last_pos - 1, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
ordered = btrfs_lookup_first_ordered_extent(inode,
last_pos - 1);
if (ordered &&
@@ -1612,8 +1613,9 @@ static long btrfs_fallocate(struct file
/* the extent lock is ordered inside the running
* transaction
*/
- lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
- locked_end, 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
+ locked_end, 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
ordered = btrfs_lookup_first_ordered_extent(inode,
alloc_end - 1);
if (ordered &&
@@ -1696,8 +1698,9 @@ static int find_desired_extent(struct in
if (inode->i_size == 0)
return -ENXIO;
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
- &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/*
* Delalloc is such a pain. If we have a hole and we have pending
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -609,8 +609,10 @@ int __btrfs_write_out_cache(struct btrfs
}
index = 0;
- lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
/*
* When searching for pinned extents, we need to start at our start
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -588,9 +588,11 @@ retry:
int page_started = 0;
unsigned long nr_written = 0;
- lock_extent(io_tree, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1, GFP_NOFS);
+ ret = lock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
@@ -617,9 +619,10 @@ retry:
continue;
}
- lock_extent(io_tree, async_extent->start,
- async_extent->start + async_extent->ram_size - 1,
- GFP_NOFS);
+ ret = lock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, GFP_NOFS);
+ BUG_ON(ret < 0);
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
@@ -1563,8 +1566,9 @@ again:
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
- lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
- &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/* already ordered? We're done */
if (PagePrivate2(page))
@@ -1746,9 +1750,11 @@ static int btrfs_finish_ordered_io(struc
goto out;
}
- lock_extent_bits(io_tree, ordered_extent->file_offset,
- ordered_extent->file_offset + ordered_extent->len - 1,
- 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len - 1,
+ 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
@@ -3410,8 +3416,9 @@ again:
}
wait_on_page_writeback(page);
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -3486,8 +3493,9 @@ int btrfs_cont_expand(struct inode *inod
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start);
- lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
- &cached_state, GFP_NOFS);
+ err = lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
@@ -5798,9 +5806,10 @@ again:
goto out;
}
- lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
- ordered->file_offset + ordered->len - 1, 0,
- &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
+ ordered->file_offset + ordered->len - 1, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode,
@@ -6214,8 +6223,9 @@ static ssize_t btrfs_direct_IO(int rw, s
}
while (1) {
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- 0, &cached_state, GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
@@ -6354,6 +6364,7 @@ static void btrfs_invalidatepage(struct
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+ int ret;
/*
@@ -6370,8 +6381,9 @@ static void btrfs_invalidatepage(struct
btrfs_releasepage(page, GFP_NOFS);
return;
}
- lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ ret = lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
if (ordered) {
@@ -6393,8 +6405,9 @@ static void btrfs_invalidatepage(struct
}
btrfs_put_ordered_extent(ordered);
cached_state = NULL;
- lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ ret = lock_extent_bits(tree, page_start, page_end,
+ 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -6462,8 +6475,9 @@ again:
}
wait_on_page_writeback(page);
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
- GFP_NOFS);
+ ret = lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
set_page_extent_mapped(page);
/*
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -757,7 +757,7 @@ static int should_defrag_range(struct in
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- int ret = 1;
+ int ret = 1, err;
/*
* make sure that once we start defragging and extent, we keep on
@@ -778,7 +778,8 @@ static int should_defrag_range(struct in
if (!em) {
/* get the big lock and read metadata off disk */
- lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ BUG_ON(err < 0);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
@@ -902,9 +903,10 @@ again:
page_start = page_offset(pages[0]);
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
- lock_extent_bits(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1, 0, &cached_state,
- GFP_NOFS);
+ ret = lock_extent_bits(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1);
if (ordered &&
ordered->file_offset + ordered->len > page_start &&
@@ -2225,7 +2227,9 @@ static noinline long btrfs_ioctl_clone(s
another, and lock file content */
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ ret = lock_extent(&BTRFS_I(src)->io_tree, off, off+len,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
ordered = btrfs_lookup_first_ordered_extent(src, off+len);
if (!ordered &&
!test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1577,6 +1577,7 @@ int replace_file_extents(struct btrfs_tr
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end,
GFP_NOFS);
+ BUG_ON(ret < 0);
if (!ret)
continue;
@@ -1899,6 +1900,7 @@ static int invalidate_extent_cache(struc
u64 objectid;
u64 start, end;
u64 ino;
+ int ret;
objectid = min_key->objectid;
while (1) {
@@ -1952,7 +1954,9 @@ static int invalidate_extent_cache(struc
}
/* the lock_extent waits for readpage to complete */
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
btrfs_drop_extent_cache(inode, start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
}
@@ -2862,7 +2866,9 @@ int prealloc_file_extent_cluster(struct
else
end = cluster->end - offset;
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ ret = lock_extent(&BTRFS_I(inode)->io_tree, start,
+ end, GFP_NOFS);
+ BUG_ON(ret < 0);
num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes,
@@ -2899,7 +2905,8 @@ int setup_extent_mapping(struct inode *i
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ ret = lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ BUG_ON(ret < 0);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
@@ -2989,8 +2996,9 @@ static int relocate_file_extent_cluster(
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(&BTRFS_I(inode)->io_tree,
- page_start, page_end, GFP_NOFS);
+ ret = lock_extent(&BTRFS_I(inode)->io_tree,
+ page_start, page_end, GFP_NOFS);
+ BUG_ON(ret < 0);
set_page_extent_mapped(page);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 5/9] btrfs: Push up clear_extent_bit errors to callers
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (3 preceding siblings ...)
2011-08-10 23:20 ` [patch 4/9] btrfs: Push up lock_extent " Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 6/9] btrfs: Push up unlock_extent " Jeff Mahoney
` (3 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
clear_extent_bit can fail with -ENOMEM for a specific case but will BUG
on other memory allocation failures.
This patch returns -ENOMEM for memory allocation failures and handles them
with BUG_ON in callers which don't handle it already.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/disk-io.c | 7 ++-
fs/btrfs/extent-tree.c | 14 ++++--
fs/btrfs/extent_io.c | 56 ++++++++++++++++++--------
fs/btrfs/file.c | 10 ++--
fs/btrfs/free-space-cache.c | 20 +++++----
fs/btrfs/inode.c | 92 +++++++++++++++++++++++++++-----------------
fs/btrfs/ioctl.c | 9 ++--
fs/btrfs/relocation.c | 5 +-
fs/btrfs/transaction.c | 4 +
fs/btrfs/tree-log.c | 5 +-
10 files changed, 142 insertions(+), 80 deletions(-)
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2981,7 +2981,9 @@ static int btrfs_destroy_marked_extents(
if (ret)
break;
- clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+ ret = clear_extent_bits(dirty_pages, start, end, mark,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
@@ -3042,7 +3044,8 @@ static int btrfs_destroy_pinned_extent(s
end + 1 - start,
NULL);
- clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ ret = clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ BUG_ON(ret < 0);
btrfs_error_unpin_extent_range(root, start, end);
cond_resched();
}
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -207,14 +207,17 @@ static void free_excluded_extents(struct
struct btrfs_block_group_cache *cache)
{
u64 start, end;
+ int ret;
start = cache->key.objectid;
end = start + cache->key.offset - 1;
- clear_extent_bits(&root->fs_info->freed_extents[0],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
- clear_extent_bits(&root->fs_info->freed_extents[1],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ ret = clear_extent_bits(&root->fs_info->freed_extents[0],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ BUG_ON(ret < 0);
+ ret = clear_extent_bits(&root->fs_info->freed_extents[1],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ BUG_ON(ret < 0);
}
static int exclude_super_stripes(struct btrfs_root *root,
@@ -4359,7 +4362,8 @@ int btrfs_finish_extent_commit(struct bt
ret = btrfs_discard_extent(root, start,
end + 1 - start, NULL);
- clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ ret = clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ BUG_ON(ret < 0);
unpin_extent_range(root, start, end);
cond_resched();
}
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -532,7 +532,11 @@ hit_next:
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
err = split_state(tree, state, prealloc, start);
if (err)
btrfs_panic(fs_info, err, "Locking error: "
@@ -557,7 +561,11 @@ hit_next:
*/
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
err = split_state(tree, state, prealloc, end + 1);
if (err)
btrfs_panic(fs_info, err, "Locking error: "
@@ -1030,9 +1038,12 @@ int try_lock_extent(struct extent_io_tre
err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
&failed_start, NULL, mask);
if (err == -EEXIST) {
- if (failed_start > start)
- clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL, mask);
+ if (failed_start > start) {
+ err = clear_extent_bit(tree, start, failed_start - 1,
+ EXTENT_LOCKED, 1, 0, NULL,
+ mask);
+ BUG_ON(err < 0);
+ }
return 0;
} else if (err < 0)
return err;
@@ -1042,14 +1053,18 @@ int try_lock_extent(struct extent_io_tre
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- mask);
+ int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0,
+ cached, mask);
+ BUG_ON(ret < 0);
+ return ret;
}
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- mask);
+ int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
+ mask);
+ BUG_ON(ret < 0);
+ return ret;
}
/*
@@ -1389,7 +1404,9 @@ int extent_clear_unlock_delalloc(struct
if (op & EXTENT_CLEAR_DELALLOC)
clear_bits |= EXTENT_DELALLOC;
- clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
+ ret = clear_extent_bit(tree, start, end, clear_bits,
+ 1, 0, NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
EXTENT_SET_PRIVATE2)))
@@ -1694,7 +1711,9 @@ static void end_bio_extent_writepage(str
}
if (!uptodate) {
- clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
+ ret = clear_extent_uptodate(tree, start, end,
+ NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
ClearPageUptodate(page);
SetPageError(page);
}
@@ -2673,10 +2692,11 @@ int extent_invalidatepage(struct extent_
ret = lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
BUG_ON(ret < 0);
wait_on_page_writeback(page);
- clear_extent_bit(tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING,
- 1, 1, &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(tree, start, end,
+ EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING,
+ 1, 1, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
return 0;
}
@@ -3299,8 +3319,10 @@ int clear_extent_buffer_uptodate(struct
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
if (eb_straddles_pages(eb)) {
- clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- cached_state, GFP_NOFS);
+ int ret = clear_extent_uptodate(tree, eb->start,
+ eb->start + eb->len - 1,
+ cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
}
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1128,10 +1128,12 @@ again:
if (ordered)
btrfs_put_ordered_extent(ordered);
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
- last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
- GFP_NOFS);
+ err = clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
+ last_pos - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, &cached_state,
GFP_NOFS);
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -800,10 +800,12 @@ int __btrfs_write_out_cache(struct btrfs
ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
if (ret < 0) {
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0, NULL,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
ret = -1;
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
goto out;
}
leaf = path->nodes[0];
@@ -814,12 +816,14 @@ int __btrfs_write_out_cache(struct btrfs
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
found_key.offset != offset) {
- ret = -1;
- clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL,
- GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
+ bytes - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0,
+ NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
btrfs_release_path(path);
+ ret = -1;
goto out;
}
}
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -960,9 +960,11 @@ static int cow_file_range_async(struct i
unsigned long nr_pages;
u64 cur_end;
int limit = 10 * 1024 * 1042;
+ int ret;
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
- 1, 0, NULL, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end,
+ EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow);
@@ -1917,9 +1919,11 @@ static int btrfs_io_failed_hook(struct b
}
if (!state || failrec->last_mirror > num_copies) {
set_state_private(failure_tree, failrec->start, 0);
- clear_extent_bits(failure_tree, failrec->start,
- failrec->start + failrec->len - 1,
- EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+ ret = clear_extent_bits(failure_tree, failrec->start,
+ failrec->start + failrec->len - 1,
+ EXTENT_LOCKED | EXTENT_DIRTY,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
kfree(failrec);
return -EIO;
}
@@ -1963,11 +1967,13 @@ static int btrfs_clean_io_failures(struc
private_failure;
set_state_private(&BTRFS_I(inode)->io_failure_tree,
failure->start, 0);
- clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+ ret = clear_extent_bits(
+ &BTRFS_I(inode)->io_failure_tree,
failure->start,
failure->start + failure->len - 1,
EXTENT_DIRTY | EXTENT_LOCKED,
GFP_NOFS);
+ BUG_ON(ret < 0);
kfree(failure);
}
}
@@ -2001,8 +2007,9 @@ static int btrfs_readpage_end_io_hook(st
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
- clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
- GFP_NOFS);
+ ret = clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
return 0;
}
@@ -3432,9 +3439,11 @@ again:
goto again;
}
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
- EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
- 0, 0, &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
@@ -5584,6 +5593,7 @@ static int btrfs_get_blocks_direct(struc
u64 start = iblock << inode->i_blkbits;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
+ int ret;
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em))
@@ -5679,9 +5689,11 @@ must_cow:
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
unlock:
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
- EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
- 0, NULL, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, start,
+ start + len - 1,
+ EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY,
+ 1, 0, NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
@@ -6253,9 +6265,12 @@ static ssize_t btrfs_direct_IO(int rw, s
&cached_state, GFP_NOFS);
BUG_ON(ret);
if (ret) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_LOCKED | write_bits,
- 1, 0, &cached_state, GFP_NOFS);
+ int ret2;
+ ret2 = clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend,
+ EXTENT_LOCKED | write_bits,
+ 1, 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret2 < 0);
goto out;
}
}
@@ -6269,19 +6284,21 @@ static ssize_t btrfs_direct_IO(int rw, s
btrfs_submit_direct, 0);
if (ret < 0 && ret != -EIOCBQUEUED) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
+ offset + iov_length(iov, nr_segs) - 1,
+ EXTENT_LOCKED | write_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
/*
* We're falling back to buffered, unlock the section we didn't
* do IO on.
*/
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
+ offset + iov_length(iov, nr_segs) - 1,
+ EXTENT_LOCKED | write_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
}
out:
free_extent_state(cached_state);
@@ -6391,10 +6408,11 @@ static void btrfs_invalidatepage(struct
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
- clear_extent_bit(tree, page_start, page_end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
- &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(tree, page_start, page_end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_LOCKED | EXTENT_DO_ACCOUNTING,
+ 1, 0, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
@@ -6409,9 +6427,11 @@ static void btrfs_invalidatepage(struct
0, &cached_state, GFP_NOFS);
BUG_ON(ret < 0);
}
- clear_extent_bit(tree, page_start, page_end,
- EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(tree, page_start, page_end,
+ EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 1, 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
@@ -6501,9 +6521,11 @@ again:
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
- EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
- 0, 0, &cached_state, GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -926,10 +926,11 @@ again:
if (ordered)
btrfs_put_ordered_extent(ordered);
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
- page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
- GFP_NOFS);
+ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
+ page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
if (i_done != num_pages) {
spin_lock(&BTRFS_I(inode)->lock);
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3834,8 +3834,9 @@ restart:
}
btrfs_release_path(path);
- clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
- GFP_NOFS);
+ ret = clear_extent_bits(&rc->processed_blocks, 0, (u64)-1,
+ EXTENT_DIRTY, GFP_NOFS);
+ BUG_ON(ret < 0);
if (trans) {
nr = trans->blocks_used;
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -636,7 +636,9 @@ int btrfs_wait_marked_extents(struct btr
if (ret)
break;
- clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+ ret = clear_extent_bits(dirty_pages, start, end,
+ mark, GFP_NOFS);
+ BUG_ON(ret < 0);
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2158,8 +2158,9 @@ static void free_log_tree(struct btrfs_t
if (ret)
break;
- clear_extent_bits(&log->dirty_log_pages, start, end,
- EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+ ret = clear_extent_bits(&log->dirty_log_pages, start, end,
+ EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+ BUG_ON(ret < 0);
}
free_extent_buffer(log->node);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 6/9] btrfs: Push up unlock_extent errors to callers
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (4 preceding siblings ...)
2011-08-10 23:20 ` [patch 5/9] btrfs: Push up clear_extent_bit " Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 7/9] btrfs: Make pin_down_extent return void Jeff Mahoney
` (2 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
The previous patch pushed the clear_extent_bit error handling up a level,
which included unlock_extent and unlock_extent_cache.
This patch pushes the BUG_ON up into the callers of those functions.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/compression.c | 6 +-
fs/btrfs/disk-io.c | 7 +--
fs/btrfs/extent_io.c | 52 +++++++++++++----------
fs/btrfs/file.c | 35 ++++++++-------
fs/btrfs/free-space-cache.c | 15 ++++--
fs/btrfs/inode.c | 98 ++++++++++++++++++++++++++------------------
fs/btrfs/ioctl.c | 26 +++++++----
fs/btrfs/relocation.c | 24 +++++++---
8 files changed, 159 insertions(+), 104 deletions(-)
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -507,7 +507,8 @@ static noinline int add_ra_bio_pages(str
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_sector) {
free_extent_map(em);
- unlock_extent(tree, last_offset, end, GFP_NOFS);
+ ret = unlock_extent(tree, last_offset, end, GFP_NOFS);
+ BUG_ON(ret < 0);
unlock_page(page);
page_cache_release(page);
break;
@@ -535,7 +536,8 @@ static noinline int add_ra_bio_pages(str
nr_pages++;
page_cache_release(page);
} else {
- unlock_extent(tree, last_offset, end, GFP_NOFS);
+ ret = unlock_extent(tree, last_offset, end, GFP_NOFS);
+ BUG_ON(ret < 0);
unlock_page(page);
page_cache_release(page);
break;
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -326,7 +326,7 @@ static int verify_parent_transid(struct
struct extent_buffer *eb, u64 parent_transid)
{
struct extent_state *cached_state = NULL;
- int ret;
+ int ret, err;
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
@@ -347,8 +347,9 @@ static int verify_parent_transid(struct
ret = 1;
clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
out:
- unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
return ret;
}
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1053,18 +1053,14 @@ int try_lock_extent(struct extent_io_tre
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask)
{
- int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0,
- cached, mask);
- BUG_ON(ret < 0);
- return ret;
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ mask);
}
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
- int ret = clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- mask);
- BUG_ON(ret < 0);
- return ret;
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
+ mask);
}
/*
@@ -1369,8 +1365,9 @@ again:
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, 1, cached_state);
if (!ret) {
- unlock_extent_cached(tree, delalloc_start, delalloc_end,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(tree, delalloc_start, delalloc_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
cond_resched();
@@ -1807,7 +1804,9 @@ static void end_bio_extent_readpage(stru
GFP_ATOMIC);
BUG_ON(ret);
}
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
+ ret = unlock_extent_cached(tree, start, end,
+ &cached, GFP_ATOMIC);
+ BUG_ON(ret < 0);
if (whole_page) {
if (uptodate) {
@@ -2001,7 +2000,8 @@ static int __extent_read_full_page(struc
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
break;
- unlock_extent(tree, start, end, GFP_NOFS);
+ ret = unlock_extent(tree, start, end, GFP_NOFS);
+ BUG_ON(ret < 0);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
@@ -2031,15 +2031,17 @@ static int __extent_read_full_page(struc
ret = set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
BUG_ON(ret);
- unlock_extent_cached(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
+ ret = unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
+ BUG_ON(ret < 0);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
- unlock_extent(tree, cur, end, GFP_NOFS);
+ ret = unlock_extent(tree, cur, end, GFP_NOFS);
+ BUG_ON(ret < 0);
break;
}
extent_offset = cur - em->start;
@@ -2082,8 +2084,9 @@ static int __extent_read_full_page(struc
ret = set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
BUG_ON(ret);
- unlock_extent_cached(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
+ ret = unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
+ BUG_ON(ret < 0);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2092,7 +2095,9 @@ static int __extent_read_full_page(struc
if (test_range_bit(tree, cur, cur_end,
EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ ret = unlock_extent(tree, cur, cur + iosize - 1,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2102,7 +2107,9 @@ static int __extent_read_full_page(struc
*/
if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ ret = unlock_extent(tree, cur, cur + iosize - 1,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2829,7 +2836,7 @@ static struct extent_map *get_extent_ski
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
- int ret = 0;
+ int ret = 0, err;
u64 off = start;
u64 max = start + len;
u32 flags = 0;
@@ -2989,8 +2996,9 @@ int extent_fiemap(struct inode *inode, s
out_free:
free_extent_map(em);
out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
return ret;
}
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1114,9 +1114,10 @@ again:
ordered->file_offset + ordered->len > start_pos &&
ordered->file_offset < last_pos) {
btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos - 1,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ start_pos, last_pos - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
page_cache_release(pages[i]);
@@ -1134,9 +1135,10 @@ again:
EXTENT_DO_ACCOUNTING, 0, 0,
&cached_state, GFP_NOFS);
BUG_ON(err < 0);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos - 1, &cached_state,
- GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ start_pos, last_pos - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
}
for (i = 0; i < num_pages; i++) {
clear_page_dirty_for_io(pages[i]);
@@ -1577,7 +1579,7 @@ static long btrfs_fallocate(struct file
u64 locked_end;
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
- int ret;
+ int ret, err;
alloc_start = offset & ~mask;
alloc_end = (offset + len + mask) & ~mask;
@@ -1624,9 +1626,10 @@ static long btrfs_fallocate(struct file
ordered->file_offset + ordered->len > alloc_start &&
ordered->file_offset < alloc_end) {
btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- alloc_start, locked_end,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ alloc_start, locked_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
@@ -1668,8 +1671,9 @@ static long btrfs_fallocate(struct file
break;
}
}
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start,
+ locked_end, &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
out:
@@ -1688,7 +1692,7 @@ static int find_desired_extent(struct in
u64 orig_start = *offset;
u64 len = i_size_read(inode);
u64 last_end = 0;
- int ret = 0;
+ int ret = 0, err;
lockend = max_t(u64, root->sectorsize, lockend);
if (lockend <= lockstart)
@@ -1784,8 +1788,9 @@ static int find_desired_extent(struct in
if (!ret)
*offset = min(*offset, inode->i_size);
out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
return ret;
}
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -549,7 +549,7 @@ int __btrfs_write_out_cache(struct btrfs
int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
- int ret = -1;
+ int ret = -1, err;
bool next_page = false;
bool out_of_space = false;
@@ -760,9 +760,10 @@ int __btrfs_write_out_cache(struct btrfs
if (out_of_space) {
btrfs_drop_pages(pages, num_pages);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state,
- GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
ret = 0;
goto out;
}
@@ -782,8 +783,10 @@ int __btrfs_write_out_cache(struct btrfs
ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
bytes, &cached_state);
btrfs_drop_pages(pages, num_pages);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
if (ret) {
ret = 0;
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -643,9 +643,11 @@ retry:
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
- unlock_extent(io_tree, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1, GFP_NOFS);
+ ret = unlock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
goto retry;
}
@@ -1578,8 +1580,10 @@ again:
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
- page_end, &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
goto again;
@@ -1591,8 +1595,9 @@ again:
BUG_ON(ret);
ClearPageChecked(page);
out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
+ page_end, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
out_page:
unlock_page(page);
page_cache_release(page);
@@ -1789,9 +1794,11 @@ static int btrfs_finish_ordered_io(struc
ordered_extent->len);
BUG_ON(ret);
}
- unlock_extent_cached(io_tree, ordered_extent->file_offset,
- ordered_extent->file_offset +
- ordered_extent->len - 1, &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len - 1, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
@@ -3387,7 +3394,7 @@ static int btrfs_truncate_page(struct ad
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
struct page *page;
- int ret = 0;
+ int ret = 0, err;
u64 page_start;
u64 page_end;
@@ -3430,8 +3437,9 @@ again:
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
unlock_page(page);
page_cache_release(page);
btrfs_start_ordered_extent(inode, ordered, 1);
@@ -3448,8 +3456,9 @@ again:
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
goto out_unlock;
}
@@ -3462,8 +3471,9 @@ again:
}
ClearPageChecked(page);
set_page_dirty(page);
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
- GFP_NOFS);
+ err = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
out_unlock:
if (ret)
@@ -3493,7 +3503,7 @@ int btrfs_cont_expand(struct inode *inod
u64 last_byte;
u64 cur_offset;
u64 hole_size;
- int err = 0;
+ int err = 0, err2;
if (size <= hole_start)
return 0;
@@ -3508,8 +3518,9 @@ int btrfs_cont_expand(struct inode *inod
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
- unlock_extent_cached(io_tree, hole_start, block_end - 1,
- &cached_state, GFP_NOFS);
+ err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err2 < 0);
btrfs_put_ordered_extent(ordered);
}
@@ -3556,8 +3567,9 @@ int btrfs_cont_expand(struct inode *inod
}
free_extent_map(em);
- unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
- GFP_NOFS);
+ err2 = unlock_extent_cached(io_tree, hole_start, block_end - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err2 < 0);
return err;
}
@@ -5624,8 +5636,9 @@ static int btrfs_get_blocks_direct(struc
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */
- unlock_extent(&BTRFS_I(inode)->io_tree, start,
- start + root->sectorsize - 1, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(inode)->io_tree, start,
+ start + root->sectorsize - 1, GFP_NOFS);
+ BUG_ON(ret < 0);
return 0;
}
@@ -5727,6 +5740,7 @@ struct btrfs_dio_private {
static void btrfs_endio_direct_read(struct bio *bio, int err)
{
+ int ret;
struct btrfs_dio_private *dip = bio->bi_private;
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
@@ -5767,8 +5781,9 @@ static void btrfs_endio_direct_read(stru
bvec++;
} while (bvec <= bvec_end);
- unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
- dip->logical_offset + dip->bytes - 1, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
+ dip->logical_offset + dip->bytes - 1, GFP_NOFS);
+ BUG_ON(ret < 0);
bio->bi_private = dip->private;
kfree(dip->csums);
@@ -5790,7 +5805,7 @@ static void btrfs_endio_direct_write(str
struct extent_state *cached_state = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
- int ret;
+ int ret, ret2;
if (err)
goto out_done;
@@ -5856,9 +5871,11 @@ again:
btrfs_update_inode(trans, root, inode);
ret = 0;
out_unlock:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
- ordered->file_offset + ordered->len - 1,
- &cached_state, GFP_NOFS);
+ ret2 = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ ordered->file_offset,
+ ordered->file_offset + ordered->len - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret2 < 0);
out:
btrfs_delalloc_release_metadata(inode, ordered->len);
btrfs_end_transaction(trans, root);
@@ -6247,8 +6264,9 @@ static ssize_t btrfs_direct_IO(int rw, s
lockend - lockstart + 1);
if (!ordered)
break;
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
cond_resched();
@@ -6468,7 +6486,7 @@ int btrfs_page_mkwrite(struct vm_area_st
char *kaddr;
unsigned long zero_start;
loff_t size;
- int ret;
+ int ret, err;
u64 page_start;
u64 page_end;
@@ -6506,8 +6524,9 @@ again:
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -6530,8 +6549,9 @@ again:
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
- unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -6556,7 +6576,9 @@ again:
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
+ err = unlock_extent_cached(io_tree, page_start, page_end,
+ &cached_state, GFP_NOFS);
+ BUG_ON(err < 0);
out_unlock:
if (!ret)
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -781,7 +781,8 @@ static int should_defrag_range(struct in
err = lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
BUG_ON(err < 0);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ err = unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
+ BUG_ON(err < 0);
if (IS_ERR(em))
return 0;
@@ -912,9 +913,10 @@ again:
ordered->file_offset + ordered->len > page_start &&
ordered->file_offset < page_end) {
btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1,
- &cached_state, GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret < 0);
for (i = 0; i < i_done; i++) {
unlock_page(pages[i]);
page_cache_release(pages[i]);
@@ -945,9 +947,10 @@ again:
&cached_state);
BUG_ON(ret);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1, &cached_state,
- GFP_NOFS);
+ ret = unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1, &cached_state,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
for (i = 0; i < i_done; i++) {
clear_page_dirty_for_io(pages[i]);
@@ -2139,7 +2142,7 @@ static noinline long btrfs_ioctl_clone(s
struct btrfs_key key;
u32 nritems;
int slot;
- int ret;
+ int ret, err;
u64 len = olen;
u64 bs = root->fs_info->sb->s_blocksize;
u64 hint_byte;
@@ -2236,7 +2239,9 @@ static noinline long btrfs_ioctl_clone(s
!test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
EXTENT_DELALLOC, 0, NULL))
break;
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(src)->io_tree, off,
+ off+len, GFP_NOFS);
+ BUG_ON(ret < 0);
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len);
@@ -2443,7 +2448,8 @@ next:
ret = 0;
out:
btrfs_release_path(path);
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ err = unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+ BUG_ON(err < 0);
out_unlock:
mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex);
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1583,8 +1583,9 @@ int replace_file_extents(struct btrfs_tr
btrfs_drop_extent_cache(inode, key.offset, end,
1);
- unlock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(inode)->io_tree,
+ key.offset, end, GFP_NOFS);
+ BUG_ON(ret < 0);
}
}
@@ -1958,7 +1959,9 @@ static int invalidate_extent_cache(struc
GFP_NOFS);
BUG_ON(ret < 0);
btrfs_drop_extent_cache(inode, start, end, 1);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(inode)->io_tree, start, end,
+ GFP_NOFS);
+ BUG_ON(ret < 0);
}
return 0;
}
@@ -2860,6 +2863,7 @@ int prealloc_file_extent_cluster(struct
goto out;
while (nr < cluster->nr) {
+ int err;
start = cluster->boundary[nr] - offset;
if (nr + 1 < cluster->nr)
end = cluster->boundary[nr + 1] - 1 - offset;
@@ -2873,7 +2877,9 @@ int prealloc_file_extent_cluster(struct
ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end,
+ GFP_NOFS);
+ BUG_ON(err < 0);
if (ret)
break;
nr++;
@@ -2892,7 +2898,7 @@ int setup_extent_mapping(struct inode *i
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
- int ret = 0;
+ int ret = 0, err;
em = alloc_extent_map();
if (!em)
@@ -2917,7 +2923,8 @@ int setup_extent_mapping(struct inode *i
}
btrfs_drop_extent_cache(inode, start, end, 0);
}
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ err = unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ BUG_ON(err < 0);
return ret;
}
@@ -3016,8 +3023,9 @@ static int relocate_file_extent_cluster(
BUG_ON(ret);
set_page_dirty(page);
- unlock_extent(&BTRFS_I(inode)->io_tree,
- page_start, page_end, GFP_NOFS);
+ ret = unlock_extent(&BTRFS_I(inode)->io_tree,
+ page_start, page_end, GFP_NOFS);
+ BUG_ON(ret < 0);
unlock_page(page);
page_cache_release(page);
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 7/9] btrfs: Make pin_down_extent return void
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (5 preceding siblings ...)
2011-08-10 23:20 ` [patch 6/9] btrfs: Push up unlock_extent " Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 8/9] btrfs: Push up btrfs_pin_extent failures Jeff Mahoney
2011-08-10 23:20 ` [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures Jeff Mahoney
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
pin_down_extent performs some operations which can't fail and then calls
set_extent_dirty, which has two failure cases via set_extent_bit:
1) Return -EEXIST if exclusive bits are set
- Since it doesn't use any exclusive bits, this failure case can't
occur.
2) Return -ENOMEM if memory can't be allocated
- Since it's called with gfp_flags & __GFP_NOFAIL, this failure case
can't occur.
With no failure cases, it can return void.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/extent-tree.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4175,9 +4175,9 @@ static u64 first_logical_byte(struct btr
return bytenr;
}
-static int pin_down_extent(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache,
- u64 bytenr, u64 num_bytes, int reserved)
+static void pin_down_extent(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache,
+ u64 bytenr, u64 num_bytes, int reserved)
{
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
@@ -4194,7 +4194,6 @@ static int pin_down_extent(struct btrfs_
/* __GFP_NOFAIL means it can't return -ENOMEM */
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
- return 0;
}
/*
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 8/9] btrfs: Push up btrfs_pin_extent failures
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (6 preceding siblings ...)
2011-08-10 23:20 ` [patch 7/9] btrfs: Make pin_down_extent return void Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures Jeff Mahoney
8 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
btrfs_pin_extent looks up a block group and then calls pin_down_extent
with it. If the lookup fails, it should return -ENOENT to allow callers
to handle the error condition. For the three existing callers, it is
a logic error if the lookup fails and a panic will occur.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/extent-tree.c | 20 +++++++++++++++-----
fs/btrfs/tree-log.c | 10 +++++++---
2 files changed, 22 insertions(+), 8 deletions(-)
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2075,8 +2075,14 @@ static int run_one_delayed_ref(struct bt
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
if (insert_reserved) {
- btrfs_pin_extent(root, node->bytenr,
- node->num_bytes, 1);
+ ret = btrfs_pin_extent(root, node->bytenr,
+ node->num_bytes, 1);
+ if (ret)
+ btrfs_panic(root->fs_info, ret,
+ "Cannot pin extent in range "
+ "%llu(%llu)\n",
+ node->bytenr, node->num_bytes);
+
if (head->is_data) {
ret = btrfs_del_csums(trans, root,
node->bytenr,
@@ -4205,7 +4211,8 @@ int btrfs_pin_extent(struct btrfs_root *
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache);
+ if (cache == NULL)
+ return -ENOENT;
pin_down_extent(root, cache, bytenr, num_bytes, reserved);
@@ -4765,8 +4772,11 @@ int btrfs_free_extent(struct btrfs_trans
if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
/* unlocks the pinned mutex */
- btrfs_pin_extent(root, bytenr, num_bytes, 1);
- ret = 0;
+ ret = btrfs_pin_extent(root, bytenr, num_bytes, 1);
+ if (ret)
+ btrfs_panic(root->fs_info, ret, "Cannot pin "
+ "extent in range %llu(%llu)\n",
+ bytenr, num_bytes);
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
parent, root_objectid, (int)owner,
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -275,9 +275,13 @@ static int process_one_buffer(struct btr
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
- if (wc->pin)
- btrfs_pin_extent(log->fs_info->extent_root,
- eb->start, eb->len, 0);
+ if (wc->pin) {
+ int ret = btrfs_pin_extent(log->fs_info->extent_root,
+ eb->start, eb->len, 0);
+ if (ret)
+ btrfs_panic(log->fs_info, ret, "Cannot pin extent in "
+ "range %llu(%llu)\n", eb->start, eb->len);
+ }
if (btrfs_buffer_uptodate(eb, gen)) {
if (wc->write)
^ permalink raw reply [flat|nested] 15+ messages in thread
* [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
` (7 preceding siblings ...)
2011-08-10 23:20 ` [patch 8/9] btrfs: Push up btrfs_pin_extent failures Jeff Mahoney
@ 2011-08-10 23:20 ` Jeff Mahoney
2011-08-11 1:27 ` Tsutomu Itoh
8 siblings, 1 reply; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-10 23:20 UTC (permalink / raw)
To: linux-btrfs
This patch handles btrfs_start_transaction failures that don't occur
in a loop and are obvious to simply push up. In all cases except the
mark_garbage_root case, the error is already handled by BUG_ON in the
caller.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/extent-tree.c | 6 +++++-
fs/btrfs/relocation.c | 6 ++++--
fs/btrfs/tree-log.c | 5 ++++-
fs/btrfs/volumes.c | 3 ++-
4 files changed, 15 insertions(+), 5 deletions(-)
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo
}
trans = btrfs_start_transaction(tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ kfree(wc);
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
if (block_rsv)
trans->block_rsv = block_rsv;
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4096,7 +4096,8 @@ static noinline_for_stack int mark_garba
int ret;
trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress));
@@ -4176,7 +4177,8 @@ int btrfs_recover_relocation(struct btrf
err = ret;
goto out;
}
- mark_garbage_root(reloc_root);
+ ret = mark_garbage_root(reloc_root);
+ BUG_ON(ret);
}
}
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs
fs_info->log_root_recovering = 1;
trans = btrfs_start_transaction(fs_info->tree_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ btrfs_free_path(path);
+ return PTR_ERR(trans);
+ }
wc.trans = trans;
wc.pin = 1;
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b
return ret;
trans = btrfs_start_transaction(root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
lock_chunks(root);
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [patch 3/9] btrfs: Push up set_extent_bit errors to callers
2011-08-10 23:20 ` [patch 3/9] btrfs: Push up set_extent_bit errors to callers Jeff Mahoney
@ 2011-08-11 0:08 ` Jeff Mahoney
0 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-11 0:08 UTC (permalink / raw)
To: linux-btrfs
On 08/10/2011 07:20 PM, Jeff Mahoney wrote:
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
[...]
Oops. This is missing one more case. I'll re-send this one as part
of a general post-review re-send.
@@ -3292,8 +3316,10 @@ int set_extent_buffer_uptodate(struct ex
num_pages = num_extent_pages(eb->start, eb->len);
if (eb_straddles_pages(eb)) {
- set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- NULL, GFP_NOFS);
+ int ret = set_extent_uptodate(tree, eb->start,
+ eb->start + eb->len - 1,
+ NULL, GFP_NOFS);
+ BUG_ON(ret < 0);
}
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
-Jeff
--
Jeff Mahoney
SUSE Labs
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
2011-08-10 23:20 ` [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures Jeff Mahoney
@ 2011-08-11 1:27 ` Tsutomu Itoh
2011-08-11 1:58 ` Jeff Mahoney
0 siblings, 1 reply; 15+ messages in thread
From: Tsutomu Itoh @ 2011-08-11 1:27 UTC (permalink / raw)
To: Jeff Mahoney; +Cc: linux-btrfs
Hi, Jeff,
(2011/08/11 8:20), Jeff Mahoney wrote:
> This patch handles btrfs_start_transaction failures that don't occur
> in a loop and are obvious to simply push up. In all cases except the
> mark_garbage_root case, the error is already handled by BUG_ON in the
> caller.
>
> Signed-off-by: Jeff Mahoney <jeffm@suse.com>
> ---
> fs/btrfs/extent-tree.c | 6 +++++-
> fs/btrfs/relocation.c | 6 ++++--
> fs/btrfs/tree-log.c | 5 ++++-
> fs/btrfs/volumes.c | 3 ++-
> 4 files changed, 15 insertions(+), 5 deletions(-)
>
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo
> }
>
> trans = btrfs_start_transaction(tree_root, 0);
> - BUG_ON(IS_ERR(trans));
> + if (IS_ERR(trans)) {
> + kfree(wc);
> + btrfs_free_path(path);
> + return PTR_ERR(trans);
> + }
The caller of btrfs_drop_snapshot() ignore the error. So, I don't think
that it is significant even if the error is returned to the caller.
I think that it should make the filesystem readonly when becoming an error
in btrfs_start_transaction().
Thanks,
Tsutomu
>
> if (block_rsv)
> trans->block_rsv = block_rsv;
> --- a/fs/btrfs/relocation.c
> +++ b/fs/btrfs/relocation.c
> @@ -4096,7 +4096,8 @@ static noinline_for_stack int mark_garba
> int ret;
>
> trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
> - BUG_ON(IS_ERR(trans));
> + if (IS_ERR(trans))
> + return PTR_ERR(trans);
>
> memset(&root->root_item.drop_progress, 0,
> sizeof(root->root_item.drop_progress));
> @@ -4176,7 +4177,8 @@ int btrfs_recover_relocation(struct btrf
> err = ret;
> goto out;
> }
> - mark_garbage_root(reloc_root);
> + ret = mark_garbage_root(reloc_root);
> + BUG_ON(ret);
> }
> }
>
> --- a/fs/btrfs/tree-log.c
> +++ b/fs/btrfs/tree-log.c
> @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs
> fs_info->log_root_recovering = 1;
>
> trans = btrfs_start_transaction(fs_info->tree_root, 0);
> - BUG_ON(IS_ERR(trans));
> + if (IS_ERR(trans)) {
> + btrfs_free_path(path);
> + return PTR_ERR(trans);
> + }
>
> wc.trans = trans;
> wc.pin = 1;
> --- a/fs/btrfs/volumes.c
> +++ b/fs/btrfs/volumes.c
> @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b
> return ret;
>
> trans = btrfs_start_transaction(root, 0);
> - BUG_ON(IS_ERR(trans));
> + if (IS_ERR(trans))
> + return PTR_ERR(trans);
>
> lock_chunks(root);
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
2011-08-11 1:27 ` Tsutomu Itoh
@ 2011-08-11 1:58 ` Jeff Mahoney
2011-08-11 2:18 ` Tsutomu Itoh
0 siblings, 1 reply; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-11 1:58 UTC (permalink / raw)
To: Tsutomu Itoh; +Cc: linux-btrfs
On 08/10/2011 09:27 PM, Tsutomu Itoh wrote:
> Hi, Jeff,
>
> (2011/08/11 8:20), Jeff Mahoney wrote:
>> This patch handles btrfs_start_transaction failures that don't occur
>> in a loop and are obvious to simply push up. In all cases except the
>> mark_garbage_root case, the error is already handled by BUG_ON in the
>> caller.
>>
>> Signed-off-by: Jeff Mahoney<jeffm@suse.com>
>> ---
>> fs/btrfs/extent-tree.c | 6 +++++-
>> fs/btrfs/relocation.c | 6 ++++--
>> fs/btrfs/tree-log.c | 5 ++++-
>> fs/btrfs/volumes.c | 3 ++-
>> 4 files changed, 15 insertions(+), 5 deletions(-)
>>
>> --- a/fs/btrfs/extent-tree.c
>> +++ b/fs/btrfs/extent-tree.c
>> @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo
>> }
>>
>> trans = btrfs_start_transaction(tree_root, 0);
>> - BUG_ON(IS_ERR(trans));
>> + if (IS_ERR(trans)) {
>> + kfree(wc);
>> + btrfs_free_path(path);
>> + return PTR_ERR(trans);
>> + }
>
> The caller of btrfs_drop_snapshot() ignore the error. So, I don't think
> that it is significant even if the error is returned to the caller.
I'd actually consider that a separate issue since btrfs_drop_snapshot
also returns -ENOMEM. The errors should be properly caught or BUG_ON'd
in the caller. My patchset usually catches cases like this but since
btrfs_drop_snapshot already returned an error, I mistakenly assumed it
was handled by the caller.
> I think that it should make the filesystem readonly when becoming an error
> in btrfs_start_transaction().
For -ENOMEM, I don't think that's the way to handle it. Some transaction
start failures can be caught and handled (e.g. just creating a file)
easily by returning errors to the user. Other cases, deep in the code,
may be too complex to unwind and recover from and then a ROFS is the
next-best answer. The callers should be responsible for determining the
correct course of action.
-Jeff
> Thanks,
> Tsutomu
>
>>
>> if (block_rsv)
>> trans->block_rsv = block_rsv;
>> --- a/fs/btrfs/relocation.c
>> +++ b/fs/btrfs/relocation.c
>> @@ -4096,7 +4096,8 @@ static noinline_for_stack int mark_garba
>> int ret;
>>
>> trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
>> - BUG_ON(IS_ERR(trans));
>> + if (IS_ERR(trans))
>> + return PTR_ERR(trans);
>>
>> memset(&root->root_item.drop_progress, 0,
>> sizeof(root->root_item.drop_progress));
>> @@ -4176,7 +4177,8 @@ int btrfs_recover_relocation(struct btrf
>> err = ret;
>> goto out;
>> }
>> - mark_garbage_root(reloc_root);
>> + ret = mark_garbage_root(reloc_root);
>> + BUG_ON(ret);
>> }
>> }
>>
>> --- a/fs/btrfs/tree-log.c
>> +++ b/fs/btrfs/tree-log.c
>> @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs
>> fs_info->log_root_recovering = 1;
>>
>> trans = btrfs_start_transaction(fs_info->tree_root, 0);
>> - BUG_ON(IS_ERR(trans));
>> + if (IS_ERR(trans)) {
>> + btrfs_free_path(path);
>> + return PTR_ERR(trans);
>> + }
>>
>> wc.trans = trans;
>> wc.pin = 1;
>> --- a/fs/btrfs/volumes.c
>> +++ b/fs/btrfs/volumes.c
>> @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b
>> return ret;
>>
>> trans = btrfs_start_transaction(root, 0);
>> - BUG_ON(IS_ERR(trans));
>> + if (IS_ERR(trans))
>> + return PTR_ERR(trans);
>>
>> lock_chunks(root);
>>
>
--
Jeff Mahoney
SuSE Labs
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
2011-08-11 1:58 ` Jeff Mahoney
@ 2011-08-11 2:18 ` Tsutomu Itoh
2011-08-11 2:32 ` Jeff Mahoney
0 siblings, 1 reply; 15+ messages in thread
From: Tsutomu Itoh @ 2011-08-11 2:18 UTC (permalink / raw)
To: Jeff Mahoney; +Cc: linux-btrfs
(2011/08/11 10:58), Jeff Mahoney wrote:
> On 08/10/2011 09:27 PM, Tsutomu Itoh wrote:
>> Hi, Jeff,
>>
>> (2011/08/11 8:20), Jeff Mahoney wrote:
>>> This patch handles btrfs_start_transaction failures that don't occur
>>> in a loop and are obvious to simply push up. In all cases except the
>>> mark_garbage_root case, the error is already handled by BUG_ON in the
>>> caller.
>>>
>>> Signed-off-by: Jeff Mahoney<jeffm@suse.com>
>>> ---
>>> fs/btrfs/extent-tree.c | 6 +++++-
>>> fs/btrfs/relocation.c | 6 ++++--
>>> fs/btrfs/tree-log.c | 5 ++++-
>>> fs/btrfs/volumes.c | 3 ++-
>>> 4 files changed, 15 insertions(+), 5 deletions(-)
>>>
>>> --- a/fs/btrfs/extent-tree.c
>>> +++ b/fs/btrfs/extent-tree.c
>>> @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo
>>> }
>>>
>>> trans = btrfs_start_transaction(tree_root, 0);
>>> - BUG_ON(IS_ERR(trans));
>>> + if (IS_ERR(trans)) {
>>> + kfree(wc);
>>> + btrfs_free_path(path);
>>> + return PTR_ERR(trans);
>>> + }
>>
>> The caller of btrfs_drop_snapshot() ignore the error. So, I don't think
>> that it is significant even if the error is returned to the caller.
>
> I'd actually consider that a separate issue since btrfs_drop_snapshot
> also returns -ENOMEM. The errors should be properly caught or BUG_ON'd
> in the caller. My patchset usually catches cases like this but since
> btrfs_drop_snapshot already returned an error, I mistakenly assumed it
> was handled by the caller.
>
>> I think that it should make the filesystem readonly when becoming an error
>> in btrfs_start_transaction().
>
> For -ENOMEM, I don't think that's the way to handle it. Some transaction
> start failures can be caught and handled (e.g. just creating a file)
> easily by returning errors to the user. Other cases, deep in the code,
> may be too complex to unwind and recover from and then a ROFS is the
> next-best answer. The callers should be responsible for determining the
> correct course of action.
OK.
Could you please append BUG_ON() in the caller or correctly handle the error
of btrfs_start_transaction()?
-Tsutomu
>
> -Jeff
>
>> Thanks,
>> Tsutomu
>>
>>>
>>> if (block_rsv)
>>> trans->block_rsv = block_rsv;
>>> --- a/fs/btrfs/relocation.c
>>> +++ b/fs/btrfs/relocation.c
>>> @@ -4096,7 +4096,8 @@ static noinline_for_stack int mark_garba
>>> int ret;
>>>
>>> trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
>>> - BUG_ON(IS_ERR(trans));
>>> + if (IS_ERR(trans))
>>> + return PTR_ERR(trans);
>>>
>>> memset(&root->root_item.drop_progress, 0,
>>> sizeof(root->root_item.drop_progress));
>>> @@ -4176,7 +4177,8 @@ int btrfs_recover_relocation(struct btrf
>>> err = ret;
>>> goto out;
>>> }
>>> - mark_garbage_root(reloc_root);
>>> + ret = mark_garbage_root(reloc_root);
>>> + BUG_ON(ret);
>>> }
>>> }
>>>
>>> --- a/fs/btrfs/tree-log.c
>>> +++ b/fs/btrfs/tree-log.c
>>> @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs
>>> fs_info->log_root_recovering = 1;
>>>
>>> trans = btrfs_start_transaction(fs_info->tree_root, 0);
>>> - BUG_ON(IS_ERR(trans));
>>> + if (IS_ERR(trans)) {
>>> + btrfs_free_path(path);
>>> + return PTR_ERR(trans);
>>> + }
>>>
>>> wc.trans = trans;
>>> wc.pin = 1;
>>> --- a/fs/btrfs/volumes.c
>>> +++ b/fs/btrfs/volumes.c
>>> @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b
>>> return ret;
>>>
>>> trans = btrfs_start_transaction(root, 0);
>>> - BUG_ON(IS_ERR(trans));
>>> + if (IS_ERR(trans))
>>> + return PTR_ERR(trans);
>>>
>>> lock_chunks(root);
>>>
>>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures
2011-08-11 2:18 ` Tsutomu Itoh
@ 2011-08-11 2:32 ` Jeff Mahoney
0 siblings, 0 replies; 15+ messages in thread
From: Jeff Mahoney @ 2011-08-11 2:32 UTC (permalink / raw)
To: Tsutomu Itoh; +Cc: linux-btrfs
On 08/10/2011 10:18 PM, Tsutomu Itoh wrote:
> (2011/08/11 10:58), Jeff Mahoney wrote:
>> On 08/10/2011 09:27 PM, Tsutomu Itoh wrote:
>>> Hi, Jeff,
>>>
>>> (2011/08/11 8:20), Jeff Mahoney wrote:
>>>> This patch handles btrfs_start_transaction failures that don't occur
>>>> in a loop and are obvious to simply push up. In all cases except the
>>>> mark_garbage_root case, the error is already handled by BUG_ON in the
>>>> caller.
>>>>
>>>> Signed-off-by: Jeff Mahoney<jeffm@suse.com>
>>>> ---
>>>> fs/btrfs/extent-tree.c | 6 +++++-
>>>> fs/btrfs/relocation.c | 6 ++++--
>>>> fs/btrfs/tree-log.c | 5 ++++-
>>>> fs/btrfs/volumes.c | 3 ++-
>>>> 4 files changed, 15 insertions(+), 5 deletions(-)
>>>>
>>>> --- a/fs/btrfs/extent-tree.c
>>>> +++ b/fs/btrfs/extent-tree.c
>>>> @@ -6319,7 +6319,11 @@ int btrfs_drop_snapshot(struct btrfs_roo
>>>> }
>>>>
>>>> trans = btrfs_start_transaction(tree_root, 0);
>>>> - BUG_ON(IS_ERR(trans));
>>>> + if (IS_ERR(trans)) {
>>>> + kfree(wc);
>>>> + btrfs_free_path(path);
>>>> + return PTR_ERR(trans);
>>>> + }
>>>
>>> The caller of btrfs_drop_snapshot() ignore the error. So, I don't think
>>> that it is significant even if the error is returned to the caller.
>>
>> I'd actually consider that a separate issue since btrfs_drop_snapshot
>> also returns -ENOMEM. The errors should be properly caught or BUG_ON'd
>> in the caller. My patchset usually catches cases like this but since
>> btrfs_drop_snapshot already returned an error, I mistakenly assumed it
>> was handled by the caller.
>>
>>> I think that it should make the filesystem readonly when becoming an error
>>> in btrfs_start_transaction().
>>
>> For -ENOMEM, I don't think that's the way to handle it. Some transaction
>> start failures can be caught and handled (e.g. just creating a file)
>> easily by returning errors to the user. Other cases, deep in the code,
>> may be too complex to unwind and recover from and then a ROFS is the
>> next-best answer. The callers should be responsible for determining the
>> correct course of action.
>
> OK.
> Could you please append BUG_ON() in the caller or correctly handle the error
> of btrfs_start_transaction()?
Yep. I'll add that in. Thanks for the review.
-Jef
>> -Jeff
>>
>>> Thanks,
>>> Tsutomu
>>>
>>>>
>>>> if (block_rsv)
>>>> trans->block_rsv = block_rsv;
>>>> --- a/fs/btrfs/relocation.c
>>>> +++ b/fs/btrfs/relocation.c
>>>> @@ -4096,7 +4096,8 @@ static noinline_for_stack int mark_garba
>>>> int ret;
>>>>
>>>> trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
>>>> - BUG_ON(IS_ERR(trans));
>>>> + if (IS_ERR(trans))
>>>> + return PTR_ERR(trans);
>>>>
>>>> memset(&root->root_item.drop_progress, 0,
>>>> sizeof(root->root_item.drop_progress));
>>>> @@ -4176,7 +4177,8 @@ int btrfs_recover_relocation(struct btrf
>>>> err = ret;
>>>> goto out;
>>>> }
>>>> - mark_garbage_root(reloc_root);
>>>> + ret = mark_garbage_root(reloc_root);
>>>> + BUG_ON(ret);
>>>> }
>>>> }
>>>>
>>>> --- a/fs/btrfs/tree-log.c
>>>> +++ b/fs/btrfs/tree-log.c
>>>> @@ -3151,7 +3151,10 @@ int btrfs_recover_log_trees(struct btrfs
>>>> fs_info->log_root_recovering = 1;
>>>>
>>>> trans = btrfs_start_transaction(fs_info->tree_root, 0);
>>>> - BUG_ON(IS_ERR(trans));
>>>> + if (IS_ERR(trans)) {
>>>> + btrfs_free_path(path);
>>>> + return PTR_ERR(trans);
>>>> + }
>>>>
>>>> wc.trans = trans;
>>>> wc.pin = 1;
>>>> --- a/fs/btrfs/volumes.c
>>>> +++ b/fs/btrfs/volumes.c
>>>> @@ -1876,7 +1876,8 @@ static int btrfs_relocate_chunk(struct b
>>>> return ret;
>>>>
>>>> trans = btrfs_start_transaction(root, 0);
>>>> - BUG_ON(IS_ERR(trans));
>>>> + if (IS_ERR(trans))
>>>> + return PTR_ERR(trans);
>>>>
>>>> lock_chunks(root);
>>>>
>>>
>
--
Jeff Mahoney
SuSE Labs
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2011-08-11 2:32 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-10 23:20 [patch 0/9] btrfs: More error handling patches Jeff Mahoney
2011-08-10 23:20 ` [patch 1/9] btrfs: Add btrfs_panic() Jeff Mahoney
2011-08-10 23:20 ` [patch 2/9] btrfs: Catch locking failures in {set,clear}_extent_bit Jeff Mahoney
2011-08-10 23:20 ` [patch 3/9] btrfs: Push up set_extent_bit errors to callers Jeff Mahoney
2011-08-11 0:08 ` Jeff Mahoney
2011-08-10 23:20 ` [patch 4/9] btrfs: Push up lock_extent " Jeff Mahoney
2011-08-10 23:20 ` [patch 5/9] btrfs: Push up clear_extent_bit " Jeff Mahoney
2011-08-10 23:20 ` [patch 6/9] btrfs: Push up unlock_extent " Jeff Mahoney
2011-08-10 23:20 ` [patch 7/9] btrfs: Make pin_down_extent return void Jeff Mahoney
2011-08-10 23:20 ` [patch 8/9] btrfs: Push up btrfs_pin_extent failures Jeff Mahoney
2011-08-10 23:20 ` [patch 9/9] btrfs: Push up non-looped btrfs_start_transaction failures Jeff Mahoney
2011-08-11 1:27 ` Tsutomu Itoh
2011-08-11 1:58 ` Jeff Mahoney
2011-08-11 2:18 ` Tsutomu Itoh
2011-08-11 2:32 ` Jeff Mahoney
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.