exofs: Move all operations to an io_engine

In anticipation for multi-device operations, we separate osd operations
into an abstract I/O API. Currently only one device is used but later
when adding more devices, we will drive all devices in parallel according
to a "data_map" that describes how data is arranged on multiple devices.
The file system level operates, like before, as if there is one object
(inode-number) and an i_size. The io engine will split this to the same
object-number but on multiple device.

At first we introduce Mirror (raid 1) layout. But at the final outcome
we intend to fully implement the pNFS-Objects data-map, including
raid 0,4,5,6 over mirrored devices, over multiple device-groups. And
more. See: http://tools.ietf.org/html/draft-ietf-nfsv4-pnfs-obj-12

* Define an io_state based API for accessing osd storage devices
  in an abstract way.
  Usage:
	First a caller allocates an io state with:
		exofs_get_io_state(struct exofs_sb_info *sbi,
				   struct exofs_io_state** ios);

	Then calles one of:
		exofs_sbi_create(struct exofs_io_state *ios);
		exofs_sbi_remove(struct exofs_io_state *ios);
		exofs_sbi_write(struct exofs_io_state *ios);
		exofs_sbi_read(struct exofs_io_state *ios);
		exofs_oi_truncate(struct exofs_i_info *oi, u64 new_len);

	And when done
		exofs_put_io_state(struct exofs_io_state *ios);

* Convert all source files to use this new API
* Convert from bio_alloc to bio_kmalloc
* In io engine we make use of the now fixed osd_req_decode_sense

There are no functional changes or on disk additions after this patch.

Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 7bc71a7..7578950 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,17 +37,18 @@
 
 #include "exofs.h"
 
-#ifdef CONFIG_EXOFS_DEBUG
-#  define EXOFS_DEBUG_OBJ_ISIZE 1
-#endif
-
 #define EXOFS_DBGMSG2(M...) do {} while (0)
 
+enum { BIO_MAX_PAGES_KMALLOC =
+		(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
+};
+
 struct page_collect {
 	struct exofs_sb_info *sbi;
 	struct request_queue *req_q;
 	struct inode *inode;
 	unsigned expected_pages;
+	struct exofs_io_state *ios;
 
 	struct bio *bio;
 	unsigned nr_pages;
@@ -56,7 +57,7 @@
 };
 
 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
-		struct inode *inode)
+		       struct inode *inode)
 {
 	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
 
@@ -65,13 +66,11 @@
 	pcol->inode = inode;
 	pcol->expected_pages = expected_pages;
 
+	pcol->ios = NULL;
 	pcol->bio = NULL;
 	pcol->nr_pages = 0;
 	pcol->length = 0;
 	pcol->pg_first = -1;
-
-	EXOFS_DBGMSG("_pcol_init ino=0x%lx expected_pages=%u\n", inode->i_ino,
-		     expected_pages);
 }
 
 static void _pcol_reset(struct page_collect *pcol)
@@ -82,35 +81,49 @@
 	pcol->nr_pages = 0;
 	pcol->length = 0;
 	pcol->pg_first = -1;
-	EXOFS_DBGMSG("_pcol_reset ino=0x%lx expected_pages=%u\n",
-		     pcol->inode->i_ino, pcol->expected_pages);
+	pcol->ios = NULL;
 
 	/* this is probably the end of the loop but in writes
 	 * it might not end here. don't be left with nothing
 	 */
 	if (!pcol->expected_pages)
-		pcol->expected_pages = 128;
+		pcol->expected_pages = BIO_MAX_PAGES_KMALLOC;
 }
 
 static int pcol_try_alloc(struct page_collect *pcol)
 {
-	int pages = min_t(unsigned, pcol->expected_pages, BIO_MAX_PAGES);
+	int pages = min_t(unsigned, pcol->expected_pages,
+			  BIO_MAX_PAGES_KMALLOC);
+
+	if (!pcol->ios) { /* First time allocate io_state */
+		int ret = exofs_get_io_state(pcol->sbi, &pcol->ios);
+
+		if (ret)
+			return ret;
+	}
 
 	for (; pages; pages >>= 1) {
-		pcol->bio = bio_alloc(GFP_KERNEL, pages);
+		pcol->bio = bio_kmalloc(GFP_KERNEL, pages);
 		if (likely(pcol->bio))
 			return 0;
 	}
 
-	EXOFS_ERR("Failed to kcalloc expected_pages=%u\n",
+	EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
 		  pcol->expected_pages);
 	return -ENOMEM;
 }
 
 static void pcol_free(struct page_collect *pcol)
 {
-	bio_put(pcol->bio);
-	pcol->bio = NULL;
+	if (pcol->bio) {
+		bio_put(pcol->bio);
+		pcol->bio = NULL;
+	}
+
+	if (pcol->ios) {
+		exofs_put_io_state(pcol->ios);
+		pcol->ios = NULL;
+	}
 }
 
 static int pcol_add_page(struct page_collect *pcol, struct page *page,
@@ -163,22 +176,17 @@
 /* Called at the end of reads, to optionally unlock pages and update their
  * status.
  */
-static int __readpages_done(struct osd_request *or, struct page_collect *pcol,
-			    bool do_unlock)
+static int __readpages_done(struct page_collect *pcol, bool do_unlock)
 {
 	struct bio_vec *bvec;
 	int i;
 	u64 resid;
 	u64 good_bytes;
 	u64 length = 0;
-	int ret = exofs_check_ok_resid(or, &resid, NULL);
-
-	osd_end_request(or);
+	int ret = exofs_check_io(pcol->ios, &resid);
 
 	if (likely(!ret))
 		good_bytes = pcol->length;
-	else if (!resid)
-		good_bytes = 0;
 	else
 		good_bytes = pcol->length - resid;
 
@@ -216,13 +224,13 @@
 }
 
 /* callback of async reads */
-static void readpages_done(struct osd_request *or, void *p)
+static void readpages_done(struct exofs_io_state *ios, void *p)
 {
 	struct page_collect *pcol = p;
 
-	__readpages_done(or, pcol, true);
+	__readpages_done(pcol, true);
 	atomic_dec(&pcol->sbi->s_curr_pending);
-	kfree(p);
+	kfree(pcol);
 }
 
 static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
@@ -240,17 +248,13 @@
 
 		unlock_page(page);
 	}
-	pcol_free(pcol);
 }
 
 static int read_exec(struct page_collect *pcol, bool is_sync)
 {
 	struct exofs_i_info *oi = exofs_i(pcol->inode);
-	struct osd_obj_id obj = {pcol->sbi->s_pid,
-					pcol->inode->i_ino + EXOFS_OBJ_OFF};
-	struct osd_request *or = NULL;
+	struct exofs_io_state *ios = pcol->ios;
 	struct page_collect *pcol_copy = NULL;
-	loff_t i_start = pcol->pg_first << PAGE_CACHE_SHIFT;
 	int ret;
 
 	if (!pcol->bio)
@@ -259,17 +263,13 @@
 	/* see comment in _readpage() about sync reads */
 	WARN_ON(is_sync && (pcol->nr_pages != 1));
 
-	or = osd_start_request(pcol->sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	osd_req_read(or, &obj, i_start, pcol->bio, pcol->length);
+	ios->bio = pcol->bio;
+	ios->length = pcol->length;
+	ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
 
 	if (is_sync) {
-		exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred);
-		return __readpages_done(or, pcol, false);
+		exofs_oi_read(oi, pcol->ios);
+		return __readpages_done(pcol, false);
 	}
 
 	pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
@@ -279,14 +279,16 @@
 	}
 
 	*pcol_copy = *pcol;
-	ret = exofs_async_op(or, readpages_done, pcol_copy, oi->i_cred);
+	ios->done = readpages_done;
+	ios->private = pcol_copy;
+	ret = exofs_oi_read(oi, ios);
 	if (unlikely(ret))
 		goto err;
 
 	atomic_inc(&pcol->sbi->s_curr_pending);
 
 	EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
-		  obj.id, _LLU(i_start), pcol->length);
+		  ios->obj.id, _LLU(ios->offset), pcol->length);
 
 	/* pages ownership was passed to pcol_copy */
 	_pcol_reset(pcol);
@@ -295,12 +297,10 @@
 err:
 	if (!is_sync)
 		_unlock_pcol_pages(pcol, ret, READ);
-	else /* Pages unlocked by caller in sync mode only free bio */
-		pcol_free(pcol);
+
+	pcol_free(pcol);
 
 	kfree(pcol_copy);
-	if (or)
-		osd_end_request(or);
 	return ret;
 }
 
@@ -421,9 +421,8 @@
 
 	_pcol_init(&pcol, 1, page->mapping->host);
 
-	/* readpage_strip might call read_exec(,async) inside at several places
-	 * but this is safe for is_async=0 since read_exec will not do anything
-	 * when we have a single page.
+	/* readpage_strip might call read_exec(,is_sync==false) at several
+	 * places but not if we have a single page.
 	 */
 	ret = readpage_strip(&pcol, page);
 	if (ret) {
@@ -442,8 +441,8 @@
 	return _readpage(page, false);
 }
 
-/* Callback for osd_write. All writes are asynchronouse */
-static void writepages_done(struct osd_request *or, void *p)
+/* Callback for osd_write. All writes are asynchronous */
+static void writepages_done(struct exofs_io_state *ios, void *p)
 {
 	struct page_collect *pcol = p;
 	struct bio_vec *bvec;
@@ -451,16 +450,12 @@
 	u64 resid;
 	u64  good_bytes;
 	u64  length = 0;
+	int ret = exofs_check_io(ios, &resid);
 
-	int ret = exofs_check_ok_resid(or, NULL, &resid);
-
-	osd_end_request(or);
 	atomic_dec(&pcol->sbi->s_curr_pending);
 
 	if (likely(!ret))
 		good_bytes = pcol->length;
-	else if (!resid)
-		good_bytes = 0;
 	else
 		good_bytes = pcol->length - resid;
 
@@ -498,23 +493,13 @@
 static int write_exec(struct page_collect *pcol)
 {
 	struct exofs_i_info *oi = exofs_i(pcol->inode);
-	struct osd_obj_id obj = {pcol->sbi->s_pid,
-					pcol->inode->i_ino + EXOFS_OBJ_OFF};
-	struct osd_request *or = NULL;
+	struct exofs_io_state *ios = pcol->ios;
 	struct page_collect *pcol_copy = NULL;
-	loff_t i_start = pcol->pg_first << PAGE_CACHE_SHIFT;
 	int ret;
 
 	if (!pcol->bio)
 		return 0;
 
-	or = osd_start_request(pcol->sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("write_exec: Faild to osd_start_request()\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
 	if (!pcol_copy) {
 		EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
@@ -525,16 +510,22 @@
 	*pcol_copy = *pcol;
 
 	pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
-	osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length);
-	ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred);
+
+	ios->bio = pcol_copy->bio;
+	ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
+	ios->length = pcol_copy->length;
+	ios->done = writepages_done;
+	ios->private = pcol_copy;
+
+	ret = exofs_oi_write(oi, ios);
 	if (unlikely(ret)) {
-		EXOFS_ERR("write_exec: exofs_async_op() Faild\n");
+		EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
 		goto err;
 	}
 
 	atomic_inc(&pcol->sbi->s_curr_pending);
 	EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
-		  pcol->inode->i_ino, pcol->pg_first, _LLU(i_start),
+		  pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
 		  pcol->length);
 	/* pages ownership was passed to pcol_copy */
 	_pcol_reset(pcol);
@@ -542,9 +533,9 @@
 
 err:
 	_unlock_pcol_pages(pcol, ret, WRITE);
+	pcol_free(pcol);
 	kfree(pcol_copy);
-	if (or)
-		osd_end_request(or);
+
 	return ret;
 }
 
@@ -588,6 +579,9 @@
 			if (PageError(page))
 				ClearPageError(page);
 			unlock_page(page);
+			EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
+				     "outside the limits\n",
+				     inode->i_ino, page->index);
 			return 0;
 		}
 	}
@@ -602,6 +596,9 @@
 		ret = write_exec(pcol);
 		if (unlikely(ret))
 			goto fail;
+
+		EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
+			     inode->i_ino, page->index);
 		goto try_again;
 	}
 
@@ -636,6 +633,8 @@
 	return 0;
 
 fail:
+	EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
+		     inode->i_ino, page->index, ret);
 	set_bit(AS_EIO, &page->mapping->flags);
 	unlock_page(page);
 	return ret;
@@ -654,14 +653,17 @@
 			wbc->range_end >> PAGE_CACHE_SHIFT;
 
 	if (start || end)
-		expected_pages = min(end - start + 1, 32L);
+		expected_pages = end - start + 1;
 	else
 		expected_pages = mapping->nrpages;
 
-	EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx"
-		     " m->nrpages=%lu start=0x%lx end=0x%lx\n",
+	if (expected_pages < 32L)
+		expected_pages = 32L;
+
+	EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
+		     "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
 		     mapping->host->i_ino, wbc->range_start, wbc->range_end,
-		     mapping->nrpages, start, end);
+		     mapping->nrpages, start, end, expected_pages);
 
 	_pcol_init(&pcol, expected_pages, mapping->host);
 
@@ -773,19 +775,28 @@
 const struct osd_attr g_attr_logical_length = ATTR_DEF(
 	OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
 
+static int _do_truncate(struct inode *inode)
+{
+	struct exofs_i_info *oi = exofs_i(inode);
+	loff_t isize = i_size_read(inode);
+	int ret;
+
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+	nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
+
+	ret = exofs_oi_truncate(oi, (u64)isize);
+	EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode->i_ino, isize);
+	return ret;
+}
+
 /*
  * Truncate a file to the specified size - all we have to do is set the size
  * attribute.  We make sure the object exists first.
  */
 void exofs_truncate(struct inode *inode)
 {
-	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
 	struct exofs_i_info *oi = exofs_i(inode);
-	struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
-	struct osd_request *or;
-	struct osd_attr attr;
-	loff_t isize = i_size_read(inode);
-	__be64 newsize;
 	int ret;
 
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
@@ -795,22 +806,6 @@
 		return;
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 		return;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-
-	nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
-
-	or = osd_start_request(sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("ERROR: exofs_truncate: osd_start_request failed\n");
-		goto fail;
-	}
-
-	osd_req_set_attributes(or, &obj);
-
-	newsize = cpu_to_be64((u64)isize);
-	attr = g_attr_logical_length;
-	attr.val_ptr = &newsize;
-	osd_req_add_set_attr_list(or, &attr, 1);
 
 	/* if we are about to truncate an object, and it hasn't been
 	 * created yet, wait
@@ -818,8 +813,7 @@
 	if (unlikely(wait_obj_created(oi)))
 		goto fail;
 
-	ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
-	osd_end_request(or);
+	ret = _do_truncate(inode);
 	if (ret)
 		goto fail;
 
@@ -849,66 +843,57 @@
 
 /*
  * Read an inode from the OSD, and return it as is.  We also return the size
- * attribute in the 'sanity' argument if we got compiled with debugging turned
- * on.
+ * attribute in the 'obj_size' argument.
  */
 static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
-		    struct exofs_fcb *inode, uint64_t *sanity)
+		    struct exofs_fcb *inode, uint64_t *obj_size)
 {
 	struct exofs_sb_info *sbi = sb->s_fs_info;
-	struct osd_request *or;
-	struct osd_attr attr;
-	struct osd_obj_id obj = {sbi->s_pid,
-				 oi->vfs_inode.i_ino + EXOFS_OBJ_OFF};
+	struct osd_attr attrs[2];
+	struct exofs_io_state *ios;
 	int ret;
 
-	exofs_make_credential(oi->i_cred, &obj);
-
-	or = osd_start_request(sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("exofs_get_inode: osd_start_request failed.\n");
-		return -ENOMEM;
+	*obj_size = ~0;
+	ret = exofs_get_io_state(sbi, &ios);
+	if (unlikely(ret)) {
+		EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+		return ret;
 	}
-	osd_req_get_attributes(or, &obj);
 
-	/* we need the inode attribute */
-	osd_req_add_get_attr_list(or, &g_attr_inode_data, 1);
+	ios->obj.id = exofs_oi_objno(oi);
+	exofs_make_credential(oi->i_cred, &ios->obj);
+	ios->cred = oi->i_cred;
 
-#ifdef EXOFS_DEBUG_OBJ_ISIZE
-	/* we get the size attributes to do a sanity check */
-	osd_req_add_get_attr_list(or, &g_attr_logical_length, 1);
-#endif
+	attrs[0] = g_attr_inode_data;
+	attrs[1] = g_attr_logical_length;
+	ios->in_attr = attrs;
+	ios->in_attr_len = ARRAY_SIZE(attrs);
 
-	ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
+	ret = exofs_sbi_read(ios);
 	if (ret)
 		goto out;
 
-	attr = g_attr_inode_data;
-	ret = extract_attr_from_req(or, &attr);
+	ret = extract_attr_from_ios(ios, &attrs[0]);
 	if (ret) {
-		EXOFS_ERR("exofs_get_inode: extract_attr_from_req failed\n");
+		EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
 		goto out;
 	}
+	WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
+	memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
 
-	WARN_ON(attr.len != EXOFS_INO_ATTR_SIZE);
-	memcpy(inode, attr.val_ptr, EXOFS_INO_ATTR_SIZE);
-
-#ifdef EXOFS_DEBUG_OBJ_ISIZE
-	attr = g_attr_logical_length;
-	ret = extract_attr_from_req(or, &attr);
+	ret = extract_attr_from_ios(ios, &attrs[1]);
 	if (ret) {
-		EXOFS_ERR("ERROR: extract attr from or failed\n");
+		EXOFS_ERR("%s: extract_attr of logical_length failed\n",
+			  __func__);
 		goto out;
 	}
-	*sanity = get_unaligned_be64(attr.val_ptr);
-#endif
+	*obj_size = get_unaligned_be64(attrs[1].val_ptr);
 
 out:
-	osd_end_request(or);
+	exofs_put_io_state(ios);
 	return ret;
 }
 
-
 static void __oi_init(struct exofs_i_info *oi)
 {
 	init_waitqueue_head(&oi->i_wq);
@@ -922,7 +907,7 @@
 	struct exofs_i_info *oi;
 	struct exofs_fcb fcb;
 	struct inode *inode;
-	uint64_t uninitialized_var(sanity);
+	uint64_t obj_size;
 	int ret;
 
 	inode = iget_locked(sb, ino);
@@ -934,7 +919,7 @@
 	__oi_init(oi);
 
 	/* read the inode from the osd */
-	ret = exofs_get_inode(sb, oi, &fcb, &sanity);
+	ret = exofs_get_inode(sb, oi, &fcb, &obj_size);
 	if (ret)
 		goto bad_inode;
 
@@ -955,13 +940,12 @@
 	inode->i_blkbits = EXOFS_BLKSHIFT;
 	inode->i_generation = le32_to_cpu(fcb.i_generation);
 
-#ifdef EXOFS_DEBUG_OBJ_ISIZE
-	if ((inode->i_size != sanity) &&
+	if ((inode->i_size != obj_size) &&
 		(!exofs_inode_is_fast_symlink(inode))) {
 		EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
-			  inode->i_size, _LLU(sanity));
+			  inode->i_size, _LLU(obj_size));
+		/* FIXME: call exofs_inode_recovery() */
 	}
-#endif
 
 	oi->i_dir_start_lookup = 0;
 
@@ -1027,23 +1011,30 @@
  * set the obj_created flag so that other methods know that the object exists on
  * the OSD.
  */
-static void create_done(struct osd_request *or, void *p)
+static void create_done(struct exofs_io_state *ios, void *p)
 {
 	struct inode *inode = p;
 	struct exofs_i_info *oi = exofs_i(inode);
 	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
 	int ret;
 
-	ret = exofs_check_ok(or);
-	osd_end_request(or);
+	ret = exofs_check_io(ios, NULL);
+	exofs_put_io_state(ios);
+
 	atomic_dec(&sbi->s_curr_pending);
 
 	if (unlikely(ret)) {
 		EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
-			  _LLU(sbi->s_pid), _LLU(inode->i_ino + EXOFS_OBJ_OFF));
-		make_bad_inode(inode);
-	} else
-		set_obj_created(oi);
+			  _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid));
+		/*TODO: When FS is corrupted creation can fail, object already
+		 * exist. Get rid of this asynchronous creation, if exist
+		 * increment the obj counter and try the next object. Until we
+		 * succeed. All these dangling objects will be made into lost
+		 * files by chkfs.exofs
+		 */
+	}
+
+	set_obj_created(oi);
 
 	atomic_dec(&inode->i_count);
 	wake_up(&oi->i_wq);
@@ -1058,8 +1049,7 @@
 	struct inode *inode;
 	struct exofs_i_info *oi;
 	struct exofs_sb_info *sbi;
-	struct osd_request *or;
-	struct osd_obj_id obj;
+	struct exofs_io_state *ios;
 	int ret;
 
 	sb = dir->i_sb;
@@ -1096,28 +1086,28 @@
 
 	mark_inode_dirty(inode);
 
-	obj.partition = sbi->s_pid;
-	obj.id = inode->i_ino + EXOFS_OBJ_OFF;
-	exofs_make_credential(oi->i_cred, &obj);
-
-	or = osd_start_request(sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("exofs_new_inode: osd_start_request failed\n");
-		return ERR_PTR(-ENOMEM);
+	ret = exofs_get_io_state(sbi, &ios);
+	if (unlikely(ret)) {
+		EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
+		return ERR_PTR(ret);
 	}
 
-	osd_req_create_object(or, &obj);
+	ios->obj.id = exofs_oi_objno(oi);
+	exofs_make_credential(oi->i_cred, &ios->obj);
 
 	/* increment the refcount so that the inode will still be around when we
 	 * reach the callback
 	 */
 	atomic_inc(&inode->i_count);
 
-	ret = exofs_async_op(or, create_done, inode, oi->i_cred);
+	ios->done = create_done;
+	ios->private = inode;
+	ios->cred = oi->i_cred;
+	ret = exofs_sbi_create(ios);
 	if (ret) {
 		atomic_dec(&inode->i_count);
-		osd_end_request(or);
-		return ERR_PTR(-EIO);
+		exofs_put_io_state(ios);
+		return ERR_PTR(ret);
 	}
 	atomic_inc(&sbi->s_curr_pending);
 
@@ -1135,11 +1125,11 @@
 /*
  * Callback function from exofs_update_inode().
  */
-static void updatei_done(struct osd_request *or, void *p)
+static void updatei_done(struct exofs_io_state *ios, void *p)
 {
 	struct updatei_args *args = p;
 
-	osd_end_request(or);
+	exofs_put_io_state(ios);
 
 	atomic_dec(&args->sbi->s_curr_pending);
 
@@ -1155,8 +1145,7 @@
 	struct exofs_i_info *oi = exofs_i(inode);
 	struct super_block *sb = inode->i_sb;
 	struct exofs_sb_info *sbi = sb->s_fs_info;
-	struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
-	struct osd_request *or;
+	struct exofs_io_state *ios;
 	struct osd_attr attr;
 	struct exofs_fcb *fcb;
 	struct updatei_args *args;
@@ -1193,18 +1182,16 @@
 	} else
 		memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
 
-	or = osd_start_request(sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("exofs_update_inode: osd_start_request failed.\n");
-		ret = -ENOMEM;
+	ret = exofs_get_io_state(sbi, &ios);
+	if (unlikely(ret)) {
+		EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
 		goto free_args;
 	}
 
-	osd_req_set_attributes(or, &obj);
-
 	attr = g_attr_inode_data;
 	attr.val_ptr = fcb;
-	osd_req_add_set_attr_list(or, &attr, 1);
+	ios->out_attr_len = 1;
+	ios->out_attr = &attr;
 
 	if (!obj_created(oi)) {
 		EXOFS_DBGMSG("!obj_created\n");
@@ -1213,22 +1200,19 @@
 		EXOFS_DBGMSG("wait_event done\n");
 	}
 
-	if (do_sync) {
-		ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
-		osd_end_request(or);
-		goto free_args;
-	} else {
+	if (!do_sync) {
 		args->sbi = sbi;
+		ios->done = updatei_done;
+		ios->private = args;
+	}
 
-		ret = exofs_async_op(or, updatei_done, args, oi->i_cred);
-		if (ret) {
-			osd_end_request(or);
-			goto free_args;
-		}
+	ret = exofs_oi_write(oi, ios);
+	if (!do_sync && !ret) {
 		atomic_inc(&sbi->s_curr_pending);
 		goto out; /* deallocation in updatei_done */
 	}
 
+	exofs_put_io_state(ios);
 free_args:
 	kfree(args);
 out:
@@ -1245,11 +1229,12 @@
  * Callback function from exofs_delete_inode() - don't have much cleaning up to
  * do.
  */
-static void delete_done(struct osd_request *or, void *p)
+static void delete_done(struct exofs_io_state *ios, void *p)
 {
-	struct exofs_sb_info *sbi;
-	osd_end_request(or);
-	sbi = p;
+	struct exofs_sb_info *sbi = p;
+
+	exofs_put_io_state(ios);
+
 	atomic_dec(&sbi->s_curr_pending);
 }
 
@@ -1263,8 +1248,7 @@
 	struct exofs_i_info *oi = exofs_i(inode);
 	struct super_block *sb = inode->i_sb;
 	struct exofs_sb_info *sbi = sb->s_fs_info;
-	struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
-	struct osd_request *or;
+	struct exofs_io_state *ios;
 	int ret;
 
 	truncate_inode_pages(&inode->i_data, 0);
@@ -1281,25 +1265,26 @@
 
 	clear_inode(inode);
 
-	or = osd_start_request(sbi->s_dev, GFP_KERNEL);
-	if (unlikely(!or)) {
-		EXOFS_ERR("exofs_delete_inode: osd_start_request failed\n");
+	ret = exofs_get_io_state(sbi, &ios);
+	if (unlikely(ret)) {
+		EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
 		return;
 	}
 
-	osd_req_remove_object(or, &obj);
-
 	/* if we are deleting an obj that hasn't been created yet, wait */
 	if (!obj_created(oi)) {
 		BUG_ON(!obj_2bcreated(oi));
 		wait_event(oi->i_wq, obj_created(oi));
 	}
 
-	ret = exofs_async_op(or, delete_done, sbi, oi->i_cred);
+	ios->obj.id = exofs_oi_objno(oi);
+	ios->done = delete_done;
+	ios->private = sbi;
+	ios->cred = oi->i_cred;
+	ret = exofs_sbi_remove(ios);
 	if (ret) {
-		EXOFS_ERR(
-		       "ERROR: @exofs_delete_inode exofs_async_op failed\n");
-		osd_end_request(or);
+		EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
+		exofs_put_io_state(ios);
 		return;
 	}
 	atomic_inc(&sbi->s_curr_pending);