Btrfs: Fix hole insertion corner cases

There were a few places that could cause duplicate extent insertion,
this adjusts the code that creates holes to avoid it.

lookup_extent_map is changed to correctly return all of the extents in a
range, even when there are none matching at the start of the range.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7a588ba..b9f2975 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1170,6 +1170,7 @@
 			      struct inode *inode);
 /* file.c */
 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end);
+int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
 extern struct file_operations btrfs_file_operations;
 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 485cf07..010a287 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -204,10 +204,12 @@
 }
 
 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
-				   struct rb_node **prev_ret)
+				     struct rb_node **prev_ret,
+				     struct rb_node **next_ret)
 {
 	struct rb_node * n = root->rb_node;
 	struct rb_node *prev = NULL;
+	struct rb_node *orig_prev = NULL;
 	struct tree_entry *entry;
 	struct tree_entry *prev_entry = NULL;
 
@@ -223,13 +225,25 @@
 		else
 			return n;
 	}
-	if (!prev_ret)
-		return NULL;
-	while(prev && offset > prev_entry->end) {
-		prev = rb_next(prev);
-		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+
+	if (prev_ret) {
+		orig_prev = prev;
+		while(prev && offset > prev_entry->end) {
+			prev = rb_next(prev);
+			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		}
+		*prev_ret = prev;
+		prev = orig_prev;
 	}
-	*prev_ret = prev;
+
+	if (next_ret) {
+		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		while(prev && offset < prev_entry->start) {
+			prev = rb_prev(prev);
+			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		}
+		*next_ret = prev;
+	}
 	return NULL;
 }
 
@@ -237,7 +251,7 @@
 {
 	struct rb_node *prev;
 	struct rb_node *ret;
-	ret = __tree_search(root, offset, &prev);
+	ret = __tree_search(root, offset, &prev, NULL);
 	if (!ret)
 		return prev;
 	return ret;
@@ -248,7 +262,7 @@
 	struct rb_node *node;
 	struct tree_entry *entry;
 
-	node = __tree_search(root, offset, NULL);
+	node = __tree_search(root, offset, NULL, NULL);
 	if (!node)
 		return -ENOENT;
 	entry = rb_entry(node, struct tree_entry, rb_node);
@@ -314,9 +328,21 @@
 {
 	struct extent_map *em;
 	struct rb_node *rb_node;
+	struct rb_node *prev = NULL;
+	struct rb_node *next = NULL;
 
 	read_lock_irq(&tree->lock);
-	rb_node = tree_search(&tree->map, start);
+	rb_node = __tree_search(&tree->map, start, &prev, &next);
+	if (!rb_node && prev) {
+		em = rb_entry(prev, struct extent_map, rb_node);
+		if (em->start <= end && em->end >= start)
+			goto found;
+	}
+	if (!rb_node && next) {
+		em = rb_entry(next, struct extent_map, rb_node);
+		if (em->start <= end && em->end >= start)
+			goto found;
+	}
 	if (!rb_node) {
 		em = NULL;
 		goto out;
@@ -330,6 +356,7 @@
 		em = NULL;
 		goto out;
 	}
+found:
 	atomic_inc(&em->refs);
 out:
 	read_unlock_irq(&tree->lock);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 897242e..1cd8c90 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -278,7 +278,7 @@
 		u64 hole_size;
 		u64 mask = root->sectorsize - 1;
 		last_pos_in_file = (isize + mask) & ~mask;
-		hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
+		hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
 
 		if (last_pos_in_file < start_pos) {
 			err = btrfs_drop_extents(trans, root, inode,
@@ -293,6 +293,7 @@
 						       inode->i_ino,
 						       last_pos_in_file,
 						       0, 0, hole_size);
+			btrfs_check_file(root, inode);
 		}
 		if (err)
 			goto failed;
@@ -378,6 +379,80 @@
 	return 0;
 }
 
+int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
+{
+	return 0;
+#if 0
+	struct btrfs_path *path;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+	struct btrfs_file_extent_item *extent;
+	u64 last_offset = 0;
+	int nritems;
+	int slot;
+	int found_type;
+	int ret;
+	int err = 0;
+	u64 extent_end = 0;
+
+	path = btrfs_alloc_path();
+	ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
+				       last_offset, 0);
+	while(1) {
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret)
+				goto out;
+			nritems = btrfs_header_nritems(path->nodes[0]);
+		}
+		slot = path->slots[0];
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+		if (found_key.objectid != inode->i_ino)
+			break;
+		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
+			goto out;
+
+		if (found_key.offset != last_offset) {
+			WARN_ON(1);
+			btrfs_print_leaf(root, leaf);
+			printk("inode %lu found offset %Lu expected %Lu\n",
+			       inode->i_ino, found_key.offset, last_offset);
+			err = 1;
+			goto out;
+		}
+		extent = btrfs_item_ptr(leaf, slot,
+					struct btrfs_file_extent_item);
+		found_type = btrfs_file_extent_type(leaf, extent);
+		if (found_type == BTRFS_FILE_EXTENT_REG) {
+			extent_end = found_key.offset +
+			     btrfs_file_extent_num_bytes(leaf, extent);
+		} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+			struct btrfs_item *item;
+			item = btrfs_item_nr(leaf, slot);
+			extent_end = found_key.offset +
+			     btrfs_file_extent_inline_len(leaf, item);
+			extent_end = (extent_end + root->sectorsize - 1) &
+				~((u64)root->sectorsize -1 );
+		}
+		last_offset = extent_end;
+		path->slots[0]++;
+	}
+	if (last_offset < inode->i_size) {
+		WARN_ON(1);
+		btrfs_print_leaf(root, leaf);
+		printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
+		       last_offset, inode->i_size);
+		err = 1;
+
+	}
+out:
+	btrfs_free_path(path);
+	return err;
+#endif
+}
+
 /*
  * this is very complex, but the basic idea is to drop all extents
  * in the range start - end.  hint_block is filled in with a block number
@@ -436,6 +511,7 @@
 		slot = path->slots[0];
 		ret = 0;
 		btrfs_item_key_to_cpu(leaf, &key, slot);
+
 		if (key.offset >= end || key.objectid != inode->i_ino) {
 			goto out;
 		}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 51fc06d..6700548 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -133,6 +133,7 @@
 		ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
 					       start, ins.objectid, ins.offset,
 					       ins.offset);
+		btrfs_check_file(root, inode);
 		num_bytes -= cur_alloc_size;
 		alloc_hint = ins.objectid + ins.offset;
 		start += cur_alloc_size;
@@ -965,12 +966,18 @@
 		u64 mask = root->sectorsize - 1;
 		u64 pos = (inode->i_size + mask) & ~mask;
 		u64 block_end = attr->ia_size | mask;
+		u64 hole_start;
 		u64 hole_size;
 		u64 alloc_hint = 0;
 
 		if (attr->ia_size <= pos)
 			goto out;
 
+		if (pos != inode->i_size)
+			hole_start = pos + root->sectorsize;
+		else
+			hole_start = pos;
+
 		mutex_lock(&root->fs_info->fs_mutex);
 		err = btrfs_check_free_space(root, 1, 0);
 		mutex_unlock(&root->fs_info->fs_mutex);
@@ -980,19 +987,21 @@
 		btrfs_truncate_page(inode->i_mapping, inode->i_size);
 
 		lock_extent(em_tree, pos, block_end, GFP_NOFS);
-		hole_size = (attr->ia_size - pos + mask) & ~mask;
+		hole_size = block_end - hole_start;
 
 		mutex_lock(&root->fs_info->fs_mutex);
 		trans = btrfs_start_transaction(root, 1);
 		btrfs_set_trans_block_group(trans, inode);
 		err = btrfs_drop_extents(trans, root, inode,
-					 pos, pos + hole_size, pos,
+					 pos, block_end, pos,
 					 &alloc_hint);
 
 		if (alloc_hint != EXTENT_MAP_INLINE) {
 			err = btrfs_insert_file_extent(trans, root,
 						       inode->i_ino,
-						       pos, 0, 0, hole_size);
+						       hole_start, 0, 0,
+						       hole_size);
+			btrfs_check_file(root, inode);
 		}
 		btrfs_end_transaction(trans, root);
 		mutex_unlock(&root->fs_info->fs_mutex);