Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/fs/ufs/Makefile b/fs/ufs/Makefile
new file mode 100644
index 0000000..dd39980
--- /dev/null
+++ b/fs/ufs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux ufs filesystem routines.
+#
+
+obj-$(CONFIG_UFS_FS) += ufs.o
+
+ufs-objs := balloc.o cylinder.o dir.o file.o ialloc.o inode.o \
+	    namei.o super.o symlink.o truncate.o util.o
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
new file mode 100644
index 0000000..997640c
--- /dev/null
+++ b/fs/ufs/balloc.c
@@ -0,0 +1,818 @@
+/*
+ *  linux/fs/ufs/balloc.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_BALLOC_DEBUG
+
+#ifdef UFS_BALLOC_DEBUG
+#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
+static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
+static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
+static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
+static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
+static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
+
+/*
+ * Free 'count' fragments from fragment number 'fragment'
+ */
+void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
+	
+	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
+		ufs_error (sb, "ufs_free_fragments", "internal error");
+	
+	lock_super(sb);
+	
+	cgno = ufs_dtog(fragment);
+	bit = ufs_dtogd(fragment);
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
+		goto failed;
+	}
+		
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi) 
+		goto failed;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
+		goto failed;
+	}
+
+	end_bit = bit + count;
+	bbase = ufs_blknum (bit);
+	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
+	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
+	for (i = bit; i < end_bit; i++) {
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
+			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
+		else ufs_error (sb, "ufs_free_fragments",
+			"bit already cleared for fragment %u", i);
+	}
+	
+	DQUOT_FREE_BLOCK (inode, count);
+
+	
+	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
+	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
+	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
+
+	/*
+	 * Trying to reassemble free fragments into block
+	 */
+	blkno = ufs_fragstoblks (bbase);
+	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
+		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
+		fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
+		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
+		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+			ufs_clusteracct (sb, ucpi, blkno, 1);
+		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
+		cylno = ufs_cbtocylno (bbase);
+		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
+		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	}
+	
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+	
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+	return;
+
+failed:
+	unlock_super (sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return;
+}
+
+/*
+ * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
+ */
+void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+
+	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
+	
+	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
+		ufs_error (sb, "ufs_free_blocks", "internal error, "
+			"fragment %u, count %u\n", fragment, count);
+		goto failed;
+	}
+
+	lock_super(sb);
+	
+do_more:
+	overflow = 0;
+	cgno = ufs_dtog (fragment);
+	bit = ufs_dtogd (fragment);
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
+		goto failed;
+	}
+	end_bit = bit + count;
+	if (end_bit > uspi->s_fpg) {
+		overflow = bit + count - uspi->s_fpg;
+		count -= overflow;
+		end_bit -= overflow;
+	}
+
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi) 
+		goto failed;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
+		goto failed;
+	}
+
+	for (i = bit; i < end_bit; i += uspi->s_fpb) {
+		blkno = ufs_fragstoblks(i);
+		if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
+			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
+		}
+		ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
+		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+			ufs_clusteracct (sb, ucpi, blkno, 1);
+		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+
+		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
+		cylno = ufs_cbtocylno(i);
+		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
+		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+
+	if (overflow) {
+		fragment += count;
+		count = overflow;
+		goto do_more;
+	}
+
+	sb->s_dirt = 1;
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+	return;
+
+failed:
+	unlock_super (sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return;
+}
+
+
+
+#define NULLIFY_FRAGMENTS \
+	for (i = oldcount; i < newcount; i++) { \
+		bh = sb_getblk(sb, result + i); \
+		memset (bh->b_data, 0, sb->s_blocksize); \
+		set_buffer_uptodate(bh); \
+		mark_buffer_dirty (bh); \
+		if (IS_SYNC(inode)) \
+			sync_dirty_buffer(bh); \
+		brelse (bh); \
+	}
+
+unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
+	unsigned goal, unsigned count, int * err )
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct buffer_head * bh;
+	unsigned cgno, oldcount, newcount, tmp, request, i, result;
+	
+	UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	*err = -ENOSPC;
+
+	lock_super (sb);
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
+		ufs_warning (sb, "ufs_new_fragments", "internal warning"
+			" fragment %u, count %u", fragment, count);
+		count = uspi->s_fpb - ufs_fragnum(fragment); 
+	}
+	oldcount = ufs_fragnum (fragment);
+	newcount = oldcount + count;
+
+	/*
+	 * Somebody else has just allocated our fragments
+	 */
+	if (oldcount) {
+		if (!tmp) {
+			ufs_error (sb, "ufs_new_fragments", "internal error, "
+				"fragment %u, tmp %u\n", fragment, tmp);
+			unlock_super (sb);
+			return (unsigned)-1;
+		}
+		if (fragment < UFS_I(inode)->i_lastfrag) {
+			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
+			unlock_super (sb);
+			return 0;
+		}
+	}
+	else {
+		if (tmp) {
+			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
+			unlock_super(sb);
+			return 0;
+		}
+	}
+
+	/*
+	 * There is not enough space for user on the device
+	 */
+	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
+		unlock_super (sb);
+		UFSD(("EXIT (FAILED)\n"))
+		return 0;
+	}
+
+	if (goal >= uspi->s_size) 
+		goal = 0;
+	if (goal == 0) 
+		cgno = ufs_inotocg (inode->i_ino);
+	else
+		cgno = ufs_dtog (goal);
+	 
+	/*
+	 * allocate new fragment
+	 */
+	if (oldcount == 0) {
+		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
+		if (result) {
+			*p = cpu_to_fs32(sb, result);
+			*err = 0;
+			inode->i_blocks += count << uspi->s_nspfshift;
+			UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+			NULLIFY_FRAGMENTS
+		}
+		unlock_super(sb);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	/*
+	 * resize block
+	 */
+	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
+	if (result) {
+		*err = 0;
+		inode->i_blocks += count << uspi->s_nspfshift;
+		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+		NULLIFY_FRAGMENTS
+		unlock_super(sb);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	/*
+	 * allocate new block and move data
+	 */
+	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
+	    case UFS_OPTSPACE:
+		request = newcount;
+		if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) 
+		    > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
+			break;
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+		break;
+	    default:
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+	
+	    case UFS_OPTTIME:
+		request = uspi->s_fpb;
+		if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
+		    (uspi->s_minfree - 2) / 100)
+			break;
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+		break;
+	}
+	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
+	if (result) {
+		for (i = 0; i < oldcount; i++) {
+			bh = sb_bread(sb, tmp + i);
+			if(bh)
+			{
+				clear_buffer_dirty(bh);
+				bh->b_blocknr = result + i;
+				mark_buffer_dirty (bh);
+				if (IS_SYNC(inode))
+					sync_dirty_buffer(bh);
+				brelse (bh);
+			}
+			else
+			{
+				printk(KERN_ERR "ufs_new_fragments: bread fail\n");
+				unlock_super(sb);
+				return 0;
+			}
+		}
+		*p = cpu_to_fs32(sb, result);
+		*err = 0;
+		inode->i_blocks += count << uspi->s_nspfshift;
+		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+		NULLIFY_FRAGMENTS
+		unlock_super(sb);
+		if (newcount < request)
+			ufs_free_fragments (inode, result + newcount, request - newcount);
+		ufs_free_fragments (inode, tmp, oldcount);
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+
+	unlock_super(sb);
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+}		
+
+static unsigned
+ufs_add_fragments (struct inode * inode, unsigned fragment,
+		   unsigned oldcount, unsigned newcount, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned cgno, fragno, fragoff, count, fragsize, i;
+	
+	UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	count = newcount - oldcount;
+	
+	cgno = ufs_dtog(fragment);
+	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
+		return 0;
+	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
+		return 0;
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi)
+		return 0;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) {
+		ufs_panic (sb, "ufs_add_fragments",
+			"internal error, bad magic number on cg %u", cgno);
+		return 0;
+	}
+
+	fragno = ufs_dtogd (fragment);
+	fragoff = ufs_fragnum (fragno);
+	for (i = oldcount; i < newcount; i++)
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
+			return 0;
+	/*
+	 * Block can be extended
+	 */
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
+		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
+			break;
+	fragsize = i - oldcount;
+	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
+		ufs_panic (sb, "ufs_add_fragments",
+			"internal error or corrupted bitmap on cg %u", cgno);
+	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
+	if (fragsize != count)
+		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
+	for (i = oldcount; i < newcount; i++)
+		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
+	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+		*err = -EDQUOT;
+		return 0;
+	}
+
+	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
+	
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	UFSD(("EXIT, fragment %u\n", fragment))
+	
+	return fragment;
+}
+
+#define UFS_TEST_FREE_SPACE_CG \
+	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
+	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
+		goto cg_found; \
+	for (k = count; k < uspi->s_fpb; k++) \
+		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
+			goto cg_found; 
+
+static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
+	unsigned goal, unsigned count, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned oldcg, i, j, k, result, allocsize;
+	
+	UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	oldcg = cgno;
+	
+	/*
+	 * 1. searching on preferred cylinder group
+	 */
+	UFS_TEST_FREE_SPACE_CG
+
+	/*
+	 * 2. quadratic rehash
+	 */
+	for (j = 1; j < uspi->s_ncg; j *= 2) {
+		cgno += j;
+		if (cgno >= uspi->s_ncg) 
+			cgno -= uspi->s_ncg;
+		UFS_TEST_FREE_SPACE_CG
+	}
+
+	/*
+	 * 3. brute force search
+	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
+	 */
+	cgno = (oldcg + 1) % uspi->s_ncg;
+	for (j = 2; j < uspi->s_ncg; j++) {
+		cgno++;
+		if (cgno >= uspi->s_ncg)
+			cgno = 0;
+		UFS_TEST_FREE_SPACE_CG
+	}
+	
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+
+cg_found:
+	ucpi = ufs_load_cylinder (sb, cgno);
+	if (!ucpi)
+		return 0;
+	ucg = ubh_get_ucg (UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) 
+		ufs_panic (sb, "ufs_alloc_fragments",
+			"internal error, bad magic number on cg %u", cgno);
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+
+	if (count == uspi->s_fpb) {
+		result = ufs_alloccg_block (inode, ucpi, goal, err);
+		if (result == (unsigned)-1)
+			return 0;
+		goto succed;
+	}
+
+	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
+		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
+			break;
+	
+	if (allocsize == uspi->s_fpb) {
+		result = ufs_alloccg_block (inode, ucpi, goal, err);
+		if (result == (unsigned)-1)
+			return 0;
+		goal = ufs_dtogd (result);
+		for (i = count; i < uspi->s_fpb; i++)
+			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
+		i = uspi->s_fpb - count;
+		DQUOT_FREE_BLOCK(inode, i);
+
+		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
+		fs32_add(sb, &ucg->cg_frsum[i], 1);
+		goto succed;
+	}
+
+	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
+	if (result == (unsigned)-1)
+		return 0;
+	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+		*err = -EDQUOT;
+		return 0;
+	}
+	for (i = 0; i < count; i++)
+		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
+	
+	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
+
+	if (count != allocsize)
+		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
+
+succed:
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	result += cgno * uspi->s_fpg;
+	UFSD(("EXIT3, result %u\n", result))
+	return result;
+}
+
+static unsigned ufs_alloccg_block (struct inode * inode,
+	struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cylinder_group * ucg;
+	unsigned result, cylno, blkno;
+
+	UFSD(("ENTER, goal %u\n", goal))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (goal == 0) {
+		goal = ucpi->c_rotor;
+		goto norot;
+	}
+	goal = ufs_blknum (goal);
+	goal = ufs_dtogd (goal);
+	
+	/*
+	 * If the requested block is available, use it.
+	 */
+	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
+		result = goal;
+		goto gotit;
+	}
+	
+norot:	
+	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
+	if (result == (unsigned)-1)
+		return (unsigned)-1;
+	ucpi->c_rotor = result;
+gotit:
+	blkno = ufs_fragstoblks(result);
+	ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
+	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+		ufs_clusteracct (sb, ucpi, blkno, -1);
+	if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+		*err = -EDQUOT;
+		return (unsigned)-1;
+	}
+
+	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
+	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
+	cylno = ufs_cbtocylno(result);
+	fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
+	fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
+	
+	UFSD(("EXIT, result %u\n", result))
+
+	return result;
+}
+
+static unsigned ufs_bitmap_search (struct super_block * sb,
+	struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cylinder_group * ucg;
+	unsigned start, length, location, result;
+	unsigned possition, fragsize, blockmap, mask;
+	
+	UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (goal)
+		start = ufs_dtogd(goal) >> 3;
+	else
+		start = ucpi->c_frotor >> 3;
+		
+	length = ((uspi->s_fpg + 7) >> 3) - start;
+	location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
+		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
+		1 << (count - 1 + (uspi->s_fpb & 7))); 
+	if (location == 0) {
+		length = start + 1;
+		location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length, 
+			(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
+			1 << (count - 1 + (uspi->s_fpb & 7)));
+		if (location == 0) {
+			ufs_error (sb, "ufs_bitmap_search",
+			"bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
+			ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
+			return (unsigned)-1;
+		}
+		start = 0;
+	}
+	result = (start + length - location) << 3;
+	ucpi->c_frotor = result;
+
+	/*
+	 * found the byte in the map
+	 */
+	blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
+	fragsize = 0;
+	for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
+		if (blockmap & mask) {
+			if (!(possition & uspi->s_fpbmask))
+				fragsize = 1;
+			else 
+				fragsize++;
+		}
+		else {
+			if (fragsize == count) {
+				result += possition - count;
+				UFSD(("EXIT, result %u\n", result))
+				return result;
+			}
+			fragsize = 0;
+		}
+	}
+	if (fragsize == count) {
+		result += possition - count;
+		UFSD(("EXIT, result %u\n", result))
+		return result;
+	}
+	ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
+	UFSD(("EXIT (FAILED)\n"))
+	return (unsigned)-1;
+}
+
+static void ufs_clusteracct(struct super_block * sb,
+	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
+{
+	struct ufs_sb_private_info * uspi;
+	int i, start, end, forw, back;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	if (uspi->s_contigsumsize <= 0)
+		return;
+
+	if (cnt > 0)
+		ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
+	else
+		ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
+
+	/*
+	 * Find the size of the cluster going forward.
+	 */
+	start = blkno + 1;
+	end = start + uspi->s_contigsumsize;
+	if ( end >= ucpi->c_nclusterblks)
+		end = ucpi->c_nclusterblks;
+	i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
+	if (i > end)
+		i = end;
+	forw = i - start;
+	
+	/*
+	 * Find the size of the cluster going backward.
+	 */
+	start = blkno - 1;
+	end = start - uspi->s_contigsumsize;
+	if (end < 0 ) 
+		end = -1;
+	i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
+	if ( i < end) 
+		i = end;
+	back = start - i;
+	
+	/*
+	 * Account for old cluster and the possibly new forward and
+	 * back clusters.
+	 */
+	i = back + forw + 1;
+	if (i > uspi->s_contigsumsize)
+		i = uspi->s_contigsumsize;
+	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
+	if (back > 0)
+		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
+	if (forw > 0)
+		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
+}
+
+
+static unsigned char ufs_fragtable_8fpb[] = {
+	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
+	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
+	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
+	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
+	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
+	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
+	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
+	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
+};
+
+static unsigned char ufs_fragtable_other[] = {
+	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
+	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
+	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
+	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
+	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
+	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
+	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
+};
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
new file mode 100644
index 0000000..14abb8b
--- /dev/null
+++ b/fs/ufs/cylinder.c
@@ -0,0 +1,209 @@
+/*
+ *  linux/fs/ufs/cylinder.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  ext2 - inode (block) bitmap caching inspired
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_CYLINDER_DEBUG
+
+#ifdef UFS_CYLINDER_DEBUG
+#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+
+/*
+ * Read cylinder group into cache. The memory space for ufs_cg_private_info
+ * structure is already allocated during ufs_read_super.
+ */
+static void ufs_read_cylinder (struct super_block * sb,
+	unsigned cgno, unsigned bitmap_nr)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned i, j;
+
+	UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
+	uspi = sbi->s_uspi;
+	ucpi = sbi->s_ucpi[bitmap_nr];
+	ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
+
+	UCPI_UBH->fragment = ufs_cgcmin(cgno);
+	UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits;
+	/*
+	 * We have already the first fragment of cylinder group block in buffer
+	 */
+	UCPI_UBH->bh[0] = sbi->s_ucg[cgno];
+	for (i = 1; i < UCPI_UBH->count; i++)
+		if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
+			goto failed;
+	sbi->s_cgno[bitmap_nr] = cgno;
+			
+	ucpi->c_cgx	= fs32_to_cpu(sb, ucg->cg_cgx);
+	ucpi->c_ncyl	= fs16_to_cpu(sb, ucg->cg_ncyl);
+	ucpi->c_niblk	= fs16_to_cpu(sb, ucg->cg_niblk);
+	ucpi->c_ndblk	= fs32_to_cpu(sb, ucg->cg_ndblk);
+	ucpi->c_rotor	= fs32_to_cpu(sb, ucg->cg_rotor);
+	ucpi->c_frotor	= fs32_to_cpu(sb, ucg->cg_frotor);
+	ucpi->c_irotor	= fs32_to_cpu(sb, ucg->cg_irotor);
+	ucpi->c_btotoff	= fs32_to_cpu(sb, ucg->cg_btotoff);
+	ucpi->c_boff	= fs32_to_cpu(sb, ucg->cg_boff);
+	ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
+	ucpi->c_freeoff	= fs32_to_cpu(sb, ucg->cg_freeoff);
+	ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
+	ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
+	ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
+	ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
+	UFSD(("EXIT\n"))
+	return;	
+	
+failed:
+	for (j = 1; j < i; j++)
+		brelse (sbi->s_ucg[j]);
+	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
+	ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
+}
+
+/*
+ * Remove cylinder group from cache, doesn't release memory
+ * allocated for cylinder group (this is done at ufs_put_super only).
+ */
+void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi; 
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	unsigned i;
+
+	UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
+
+	uspi = sbi->s_uspi;
+	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
+		UFSD(("EXIT\n"))
+		return;
+	}
+	ucpi = sbi->s_ucpi[bitmap_nr];
+	ucg = ubh_get_ucg(UCPI_UBH);
+
+	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
+		ufs_panic (sb, "ufs_put_cylinder", "internal error");
+		return;
+	}
+	/*
+	 * rotor is not so important data, so we put it to disk 
+	 * at the end of working with cylinder
+	 */
+	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
+	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
+	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	for (i = 1; i < UCPI_UBH->count; i++) {
+		brelse (UCPI_UBH->bh[i]);
+	}
+
+	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
+	UFSD(("EXIT\n"))
+}
+
+/*
+ * Find cylinder group in cache and return it as pointer.
+ * If cylinder group is not in cache, we will load it from disk.
+ *
+ * The cache is managed by LRU algorithm. 
+ */
+struct ufs_cg_private_info * ufs_load_cylinder (
+	struct super_block * sb, unsigned cgno)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_cg_private_info * ucpi;
+	unsigned cg, i, j;
+
+	UFSD(("ENTER, cgno %u\n", cgno))
+
+	uspi = sbi->s_uspi;
+	if (cgno >= uspi->s_ncg) {
+		ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
+		return NULL;
+	}
+	/*
+	 * Cylinder group number cg it in cache and it was last used
+	 */
+	if (sbi->s_cgno[0] == cgno) {
+		UFSD(("EXIT\n"))
+		return sbi->s_ucpi[0];
+	}
+	/*
+	 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
+	 */
+	if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
+		if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
+			if (sbi->s_cgno[cgno] != cgno) {
+				ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
+				UFSD(("EXIT (FAILED)\n"))
+				return NULL;
+			}
+			else {
+				UFSD(("EXIT\n"))
+				return sbi->s_ucpi[cgno];
+			}
+		} else {
+			ufs_read_cylinder (sb, cgno, cgno);
+			UFSD(("EXIT\n"))
+			return sbi->s_ucpi[cgno];
+		}
+	}
+	/*
+	 * Cylinder group number cg is in cache but it was not last used, 
+	 * we will move to the first position
+	 */
+	for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
+	if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
+		cg = sbi->s_cgno[i];
+		ucpi = sbi->s_ucpi[i];
+		for (j = i; j > 0; j--) {
+			sbi->s_cgno[j] = sbi->s_cgno[j-1];
+			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
+		}
+		sbi->s_cgno[0] = cg;
+		sbi->s_ucpi[0] = ucpi;
+	/*
+	 * Cylinder group number cg is not in cache, we will read it from disk
+	 * and put it to the first position
+	 */
+	} else {
+		if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
+			sbi->s_cg_loaded++;
+		else
+			ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
+		ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
+		for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
+			sbi->s_cgno[j] = sbi->s_cgno[j-1];
+			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
+		}
+		sbi->s_ucpi[0] = ucpi;
+		ufs_read_cylinder (sb, cgno, 0);
+	}
+	UFSD(("EXIT\n"))
+	return sbi->s_ucpi[0];
+}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
new file mode 100644
index 0000000..d0915fb
--- /dev/null
+++ b/fs/ufs/dir.c
@@ -0,0 +1,627 @@
+/*
+ *  linux/fs/ufs/ufs_dir.c
+ *
+ * Copyright (C) 1996
+ * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
+ * Laboratory for Computer Science Research Computing Facility
+ * Rutgers, The State University of New Jersey
+ *
+ * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
+ *
+ * 4.4BSD (FreeBSD) support added on February 1st 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
+ * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_DIR_DEBUG
+
+#ifdef UFS_DIR_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static int
+ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_entry *,
+		     struct buffer_head *, unsigned long);
+
+
+/*
+ * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
+ *
+ * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
+ */
+static inline int ufs_match(struct super_block *sb, int len,
+		const char * const name, struct ufs_dir_entry * de)
+{
+	if (len != ufs_get_de_namlen(sb, de))
+		return 0;
+	if (!de->d_ino)
+		return 0;
+	return !memcmp(name, de->d_name, len);
+}
+
+/*
+ * This is blatantly stolen from ext2fs
+ */
+static int
+ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	int error = 0;
+	unsigned long offset, lblk;
+	int i, stored;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de;
+	struct super_block * sb;
+	int de_reclen;
+	unsigned flags;
+	u64     blk= 0L;
+
+	lock_kernel();
+
+	sb = inode->i_sb;
+	flags = UFS_SB(sb)->s_flags;
+
+	UFSD(("ENTER, ino %lu  f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
+
+	stored = 0;
+	bh = NULL;
+	offset = filp->f_pos & (sb->s_blocksize - 1);
+
+	while (!error && !stored && filp->f_pos < inode->i_size) {
+		lblk = (filp->f_pos) >> sb->s_blocksize_bits;
+		blk = ufs_frag_map(inode, lblk);
+		if (!blk || !(bh = sb_bread(sb, blk))) {
+			/* XXX - error - skip to the next block */
+			printk("ufs_readdir: "
+			       "dir inode %lu has a hole at offset %lu\n",
+			       inode->i_ino, (unsigned long int)filp->f_pos);
+			filp->f_pos += sb->s_blocksize - offset;
+			continue;
+		}
+
+revalidate:
+		/* If the dir block has changed since the last call to
+		 * readdir(2), then we might be pointing to an invalid
+		 * dirent right now.  Scan from the start of the block
+		 * to make sure. */
+		if (filp->f_version != inode->i_version) {
+			for (i = 0; i < sb->s_blocksize && i < offset; ) {
+				de = (struct ufs_dir_entry *)(bh->b_data + i);
+				/* It's too expensive to do a full
+				 * dirent test each time round this
+				 * loop, but we do have to test at
+				 * least that it is non-zero.  A
+				 * failure will be detected in the
+				 * dirent test below. */
+				de_reclen = fs16_to_cpu(sb, de->d_reclen);
+				if (de_reclen < 1)
+					break;
+				i += de_reclen;
+			}
+			offset = i;
+			filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+				| offset;
+			filp->f_version = inode->i_version;
+		}
+
+		while (!error && filp->f_pos < inode->i_size
+		       && offset < sb->s_blocksize) {
+			de = (struct ufs_dir_entry *) (bh->b_data + offset);
+			/* XXX - put in a real ufs_check_dir_entry() */
+			if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) {
+				filp->f_pos = (filp->f_pos &
+				              (sb->s_blocksize - 1)) +
+				               sb->s_blocksize;
+				brelse(bh);
+				unlock_kernel();
+				return stored;
+			}
+			if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
+						   bh, offset)) {
+				/* On error, skip the f_pos to the
+				   next block. */
+				filp->f_pos = (filp->f_pos |
+				              (sb->s_blocksize - 1)) +
+					       1;
+				brelse (bh);
+				unlock_kernel();
+				return stored;
+			}
+			offset += fs16_to_cpu(sb, de->d_reclen);
+			if (de->d_ino) {
+				/* We might block in the next section
+				 * if the data destination is
+				 * currently swapped out.  So, use a
+				 * version stamp to detect whether or
+				 * not the directory has been modified
+				 * during the copy operation. */
+				unsigned long version = filp->f_version;
+				unsigned char d_type = DT_UNKNOWN;
+
+				UFSD(("filldir(%s,%u)\n", de->d_name,
+							fs32_to_cpu(sb, de->d_ino)))
+				UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)))
+
+				if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
+					d_type = de->d_u.d_44.d_type;
+				error = filldir(dirent, de->d_name,
+						ufs_get_de_namlen(sb, de), filp->f_pos,
+						fs32_to_cpu(sb, de->d_ino), d_type);
+				if (error)
+					break;
+				if (version != filp->f_version)
+					goto revalidate;
+				stored ++;
+			}
+			filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
+		}
+		offset = 0;
+		brelse (bh);
+	}
+	unlock_kernel();
+	return 0;
+}
+
+/*
+ * define how far ahead to read directories while searching them.
+ */
+#define NAMEI_RA_CHUNKS  2
+#define NAMEI_RA_BLOCKS  4
+#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
+
+/*
+ *	ufs_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_bh). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ */
+struct ufs_dir_entry * ufs_find_entry (struct dentry *dentry,
+	struct buffer_head ** res_bh)
+{
+	struct super_block * sb;
+	struct buffer_head * bh_use[NAMEI_RA_SIZE];
+	struct buffer_head * bh_read[NAMEI_RA_SIZE];
+	unsigned long offset;
+	int block, toread, i, err;
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+
+	UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen))
+	
+	*res_bh = NULL;
+	
+	sb = dir->i_sb;
+	
+	if (namelen > UFS_MAXNAMLEN)
+		return NULL;
+
+	memset (bh_use, 0, sizeof (bh_use));
+	toread = 0;
+	for (block = 0; block < NAMEI_RA_SIZE; ++block) {
+		struct buffer_head * bh;
+
+		if ((block << sb->s_blocksize_bits) >= dir->i_size)
+			break;
+		bh = ufs_getfrag (dir, block, 0, &err);
+		bh_use[block] = bh;
+		if (bh && !buffer_uptodate(bh))
+			bh_read[toread++] = bh;
+	}
+
+	for (block = 0, offset = 0; offset < dir->i_size; block++) {
+		struct buffer_head * bh;
+		struct ufs_dir_entry * de;
+		char * dlimit;
+
+		if ((block % NAMEI_RA_BLOCKS) == 0 && toread) {
+			ll_rw_block (READ, toread, bh_read);
+			toread = 0;
+		}
+		bh = bh_use[block % NAMEI_RA_SIZE];
+		if (!bh) {
+			ufs_error (sb, "ufs_find_entry", 
+				"directory #%lu contains a hole at offset %lu",
+				dir->i_ino, offset);
+			offset += sb->s_blocksize;
+			continue;
+		}
+		wait_on_buffer (bh);
+		if (!buffer_uptodate(bh)) {
+			/*
+			 * read error: all bets are off
+			 */
+			break;
+		}
+
+		de = (struct ufs_dir_entry *) bh->b_data;
+		dlimit = bh->b_data + sb->s_blocksize;
+		while ((char *) de < dlimit && offset < dir->i_size) {
+			/* this code is executed quadratically often */
+			/* do minimal checking by hand */
+			int de_len;
+
+			if ((char *) de + namelen <= dlimit &&
+			    ufs_match(sb, namelen, name, de)) {
+				/* found a match -
+				just to be sure, do a full check */
+				if (!ufs_check_dir_entry("ufs_find_entry",
+				    dir, de, bh, offset))
+					goto failed;
+				for (i = 0; i < NAMEI_RA_SIZE; ++i) {
+					if (bh_use[i] != bh)
+						brelse (bh_use[i]);
+				}
+				*res_bh = bh;
+				return de;
+			}
+                        /* prevent looping on a bad block */
+			de_len = fs16_to_cpu(sb, de->d_reclen);
+			if (de_len <= 0)
+				goto failed;
+			offset += de_len;
+			de = (struct ufs_dir_entry *) ((char *) de + de_len);
+		}
+
+		brelse (bh);
+		if (((block + NAMEI_RA_SIZE) << sb->s_blocksize_bits ) >=
+		    dir->i_size)
+			bh = NULL;
+		else
+			bh = ufs_getfrag (dir, block + NAMEI_RA_SIZE, 0, &err);
+		bh_use[block % NAMEI_RA_SIZE] = bh;
+		if (bh && !buffer_uptodate(bh))
+			bh_read[toread++] = bh;
+	}
+
+failed:
+	for (i = 0; i < NAMEI_RA_SIZE; ++i) brelse (bh_use[i]);
+	UFSD(("EXIT\n"))
+	return NULL;
+}
+
+static int
+ufs_check_dir_entry (const char *function, struct inode *dir,
+		     struct ufs_dir_entry *de, struct buffer_head *bh,
+		     unsigned long offset)
+{
+	struct super_block *sb = dir->i_sb;
+	const char *error_msg = NULL;
+	int rlen = fs16_to_cpu(sb, de->d_reclen);
+
+	if (rlen < UFS_DIR_REC_LEN(1))
+		error_msg = "reclen is smaller than minimal";
+	else if (rlen % 4 != 0)
+		error_msg = "reclen % 4 != 0";
+	else if (rlen < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)))
+		error_msg = "reclen is too small for namlen";
+	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+		error_msg = "directory entry across blocks";
+	else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
+				      UFS_SB(sb)->s_uspi->s_ncg))
+		error_msg = "inode out of bounds";
+
+	if (error_msg != NULL)
+		ufs_error (sb, function, "bad entry in directory #%lu, size %Lu: %s - "
+			    "offset=%lu, inode=%lu, reclen=%d, namlen=%d",
+			    dir->i_ino, dir->i_size, error_msg, offset,
+			    (unsigned long)fs32_to_cpu(sb, de->d_ino),
+			    rlen, ufs_get_de_namlen(sb, de));
+	
+	return (error_msg == NULL ? 1 : 0);
+}
+
+struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct buffer_head **p)
+{
+	int err;
+	struct buffer_head *bh = ufs_bread (dir, 0, 0, &err);
+	struct ufs_dir_entry *res = NULL;
+
+	if (bh) {
+		res = (struct ufs_dir_entry *) bh->b_data;
+		res = (struct ufs_dir_entry *)((char *)res +
+			fs16_to_cpu(dir->i_sb, res->d_reclen));
+	}
+	*p = bh;
+	return res;
+}
+ino_t ufs_inode_by_name(struct inode * dir, struct dentry *dentry)
+{
+	ino_t res = 0;
+	struct ufs_dir_entry * de;
+	struct buffer_head *bh;
+
+	de = ufs_find_entry (dentry, &bh);
+	if (de) {
+		res = fs32_to_cpu(dir->i_sb, de->d_ino);
+		brelse(bh);
+	}
+	return res;
+}
+
+void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
+		struct buffer_head *bh, struct inode *inode)
+{
+	dir->i_version++;
+	de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
+	mark_buffer_dirty(bh);
+	if (IS_DIRSYNC(dir))
+		sync_dirty_buffer(bh);
+	brelse (bh);
+}
+
+/*
+ *	ufs_add_entry()
+ *
+ * adds a file entry to the specified directory, using the same
+ * semantics as ufs_find_entry(). It returns NULL if it failed.
+ */
+int ufs_add_link(struct dentry *dentry, struct inode *inode)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	unsigned long offset;
+	unsigned fragoff;
+	unsigned short rec_len;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de, * de1;
+	struct inode *dir = dentry->d_parent->d_inode;
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	int err;
+
+	UFSD(("ENTER, name %s, namelen %u\n", name, namelen))
+	
+	sb = dir->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	if (!namelen)
+		return -EINVAL;
+	bh = ufs_bread (dir, 0, 0, &err);
+	if (!bh)
+		return err;
+	rec_len = UFS_DIR_REC_LEN(namelen);
+	offset = 0;
+	de = (struct ufs_dir_entry *) bh->b_data;
+	while (1) {
+		if ((char *)de >= UFS_SECTOR_SIZE + bh->b_data) {
+			fragoff = offset & ~uspi->s_fmask;
+			if (fragoff != 0 && fragoff != UFS_SECTOR_SIZE)
+				ufs_error (sb, "ufs_add_entry", "internal error"
+					" fragoff %u", fragoff);
+			if (!fragoff) {
+				brelse (bh);
+				bh = ufs_bread (dir, offset >> sb->s_blocksize_bits, 1, &err);
+				if (!bh)
+					return err;
+			}
+			if (dir->i_size <= offset) {
+				if (dir->i_size == 0) {
+					brelse(bh);
+					return -ENOENT;
+				}
+				de = (struct ufs_dir_entry *) (bh->b_data + fragoff);
+				de->d_ino = 0;
+				de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
+				ufs_set_de_namlen(sb, de, 0);
+				dir->i_size = offset + UFS_SECTOR_SIZE;
+				mark_inode_dirty(dir);
+			} else {
+				de = (struct ufs_dir_entry *) bh->b_data;
+			}
+		}
+		if (!ufs_check_dir_entry ("ufs_add_entry", dir, de, bh, offset)) {
+			brelse (bh);
+			return -ENOENT;
+		}
+		if (ufs_match(sb, namelen, name, de)) {
+			brelse (bh);
+			return -EEXIST;
+		}
+		if (de->d_ino == 0 && fs16_to_cpu(sb, de->d_reclen) >= rec_len)
+			break;
+			
+		if (fs16_to_cpu(sb, de->d_reclen) >=
+		     UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)) + rec_len)
+			break;
+		offset += fs16_to_cpu(sb, de->d_reclen);
+		de = (struct ufs_dir_entry *) ((char *) de + fs16_to_cpu(sb, de->d_reclen));
+	}
+
+	if (de->d_ino) {
+		de1 = (struct ufs_dir_entry *) ((char *) de +
+			UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de1->d_reclen =
+			cpu_to_fs16(sb, fs16_to_cpu(sb, de->d_reclen) -
+				UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de->d_reclen =
+			cpu_to_fs16(sb, UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
+		de = de1;
+	}
+	de->d_ino = 0;
+	ufs_set_de_namlen(sb, de, namelen);
+	memcpy (de->d_name, name, namelen + 1);
+	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
+	ufs_set_de_type(sb, de, inode->i_mode);
+	mark_buffer_dirty(bh);
+	if (IS_DIRSYNC(dir))
+		sync_dirty_buffer(bh);
+	brelse (bh);
+	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_version++;
+	mark_inode_dirty(dir);
+
+	UFSD(("EXIT\n"))
+	return 0;
+}
+
+/*
+ * ufs_delete_entry deletes a directory entry by merging it with the
+ * previous entry.
+ */
+int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
+	struct buffer_head * bh )
+	
+{
+	struct super_block * sb;
+	struct ufs_dir_entry * de, * pde;
+	unsigned i;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	i = 0;
+	pde = NULL;
+	de = (struct ufs_dir_entry *) bh->b_data;
+	
+	UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
+		fs32_to_cpu(sb, de->d_ino),
+		fs16to_cpu(sb, de->d_reclen),
+		ufs_get_de_namlen(sb, de), de->d_name))
+
+	while (i < bh->b_size) {
+		if (!ufs_check_dir_entry ("ufs_delete_entry", inode, de, bh, i)) {
+			brelse(bh);
+			return -EIO;
+		}
+		if (de == dir)  {
+			if (pde)
+				fs16_add(sb, &pde->d_reclen,
+					fs16_to_cpu(sb, dir->d_reclen));
+			dir->d_ino = 0;
+			inode->i_version++;
+			inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+			mark_inode_dirty(inode);
+			mark_buffer_dirty(bh);
+			if (IS_DIRSYNC(inode))
+				sync_dirty_buffer(bh);
+			brelse(bh);
+			UFSD(("EXIT\n"))
+			return 0;
+		}
+		i += fs16_to_cpu(sb, de->d_reclen);
+		if (i == UFS_SECTOR_SIZE) pde = NULL;
+		else pde = de;
+		de = (struct ufs_dir_entry *)
+		    ((char *) de + fs16_to_cpu(sb, de->d_reclen));
+		if (i == UFS_SECTOR_SIZE && de->d_reclen == 0)
+			break;
+	}
+	UFSD(("EXIT\n"))
+	brelse(bh);
+	return -ENOENT;
+}
+
+int ufs_make_empty(struct inode * inode, struct inode *dir)
+{
+	struct super_block * sb = dir->i_sb;
+	struct buffer_head * dir_block;
+	struct ufs_dir_entry * de;
+	int err;
+
+	dir_block = ufs_bread (inode, 0, 1, &err);
+	if (!dir_block)
+		return err;
+
+	inode->i_blocks = sb->s_blocksize / UFS_SECTOR_SIZE;
+	de = (struct ufs_dir_entry *) dir_block->b_data;
+	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
+	ufs_set_de_type(sb, de, inode->i_mode);
+	ufs_set_de_namlen(sb, de, 1);
+	de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
+	strcpy (de->d_name, ".");
+	de = (struct ufs_dir_entry *)
+		((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	de->d_ino = cpu_to_fs32(sb, dir->i_ino);
+	ufs_set_de_type(sb, de, dir->i_mode);
+	de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
+	ufs_set_de_namlen(sb, de, 2);
+	strcpy (de->d_name, "..");
+	mark_buffer_dirty(dir_block);
+	brelse (dir_block);
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int ufs_empty_dir (struct inode * inode)
+{
+	struct super_block * sb;
+	unsigned long offset;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de, * de1;
+	int err;
+	
+	sb = inode->i_sb;
+
+	if (inode->i_size < UFS_DIR_REC_LEN(1) + UFS_DIR_REC_LEN(2) ||
+	    !(bh = ufs_bread (inode, 0, 0, &err))) {
+	    	ufs_warning (inode->i_sb, "empty_dir",
+			      "bad directory (dir #%lu) - no data block",
+			      inode->i_ino);
+		return 1;
+	}
+	de = (struct ufs_dir_entry *) bh->b_data;
+	de1 = (struct ufs_dir_entry *)
+		((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	if (fs32_to_cpu(sb, de->d_ino) != inode->i_ino || de1->d_ino == 0 ||
+	     strcmp (".", de->d_name) || strcmp ("..", de1->d_name)) {
+	    	ufs_warning (inode->i_sb, "empty_dir",
+			      "bad directory (dir #%lu) - no `.' or `..'",
+			      inode->i_ino);
+		return 1;
+	}
+	offset = fs16_to_cpu(sb, de->d_reclen) + fs16_to_cpu(sb, de1->d_reclen);
+	de = (struct ufs_dir_entry *)
+		((char *)de1 + fs16_to_cpu(sb, de1->d_reclen));
+	while (offset < inode->i_size ) {
+		if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
+			brelse (bh);
+			bh = ufs_bread (inode, offset >> sb->s_blocksize_bits, 1, &err);
+	 		if (!bh) {
+				ufs_error (sb, "empty_dir",
+					    "directory #%lu contains a hole at offset %lu",
+					    inode->i_ino, offset);
+				offset += sb->s_blocksize;
+				continue;
+			}
+			de = (struct ufs_dir_entry *) bh->b_data;
+		}
+		if (!ufs_check_dir_entry ("empty_dir", inode, de, bh, offset)) {
+			brelse (bh);
+			return 1;
+		}
+		if (de->d_ino) {
+			brelse (bh);
+			return 0;
+		}
+		offset += fs16_to_cpu(sb, de->d_reclen);
+		de = (struct ufs_dir_entry *)
+			((char *)de + fs16_to_cpu(sb, de->d_reclen));
+	}
+	brelse (bh);
+	return 1;
+}
+
+struct file_operations ufs_dir_operations = {
+	.read		= generic_read_dir,
+	.readdir	= ufs_readdir,
+	.fsync		= file_fsync,
+};
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
new file mode 100644
index 0000000..ed69d7fe
--- /dev/null
+++ b/fs/ufs/file.c
@@ -0,0 +1,55 @@
+/*
+ *  linux/fs/ufs/file.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 fs regular file handling primitives
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/fcntl.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+/*
+ * We have mostly NULL's here: the current defaults are ok for
+ * the ufs filesystem.
+ */
+ 
+struct file_operations ufs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_file_read,
+	.write		= generic_file_write,
+	.mmap		= generic_file_mmap,
+	.open           = generic_file_open,
+	.sendfile	= generic_file_sendfile,
+};
+
+struct inode_operations ufs_file_inode_operations = {
+	.truncate	= ufs_truncate,
+};
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
new file mode 100644
index 0000000..61a6b15
--- /dev/null
+++ b/fs/ufs/ialloc.c
@@ -0,0 +1,302 @@
+/*
+ *  linux/fs/ufs/ialloc.c
+ *
+ * Copyright (c) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/ialloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  BSD ufs-inspired inode and directory allocation by 
+ *  Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_IALLOC_DEBUG
+
+#ifdef UFS_IALLOC_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+/*
+ * NOTE! When we get the inode, we're the only people
+ * that have access to it, and as such there are no
+ * race conditions we have to worry about. The inode
+ * is not on the hash-lists, and it cannot be reached
+ * through the filesystem because the directory entry
+ * has been deleted earlier.
+ *
+ * HOWEVER: we must make sure that we get no aliases,
+ * which means that we have to call "clear_inode()"
+ * _before_ we mark the inode not in use in the inode
+ * bitmaps. Otherwise a newly created file might use
+ * the same inode number (not actually the same pointer
+ * though), and then we'd have two inodes sharing the
+ * same inode number and space on the harddisk.
+ */
+void ufs_free_inode (struct inode * inode)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	int is_directory;
+	unsigned ino, cg, bit;
+	
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	ino = inode->i_ino;
+
+	lock_super (sb);
+
+	if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
+		ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
+		unlock_super (sb);
+		return;
+	}
+	
+	cg = ufs_inotocg (ino);
+	bit = ufs_inotocgoff (ino);
+	ucpi = ufs_load_cylinder (sb, cg);
+	if (!ucpi) {
+		unlock_super (sb);
+		return;
+	}
+	ucg = ubh_get_ucg(UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg))
+		ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
+
+	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+
+	is_directory = S_ISDIR(inode->i_mode);
+
+	DQUOT_FREE_INODE(inode);
+	DQUOT_DROP(inode);
+
+	clear_inode (inode);
+
+	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
+		ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
+	else {
+		ubh_clrbit (UCPI_UBH, ucpi->c_iusedoff, bit);
+		if (ino < ucpi->c_irotor)
+			ucpi->c_irotor = ino;
+		fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1);
+		fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
+
+		if (is_directory) {
+			fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
+			fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1);
+			fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
+		}
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	
+	sb->s_dirt = 1;
+	unlock_super (sb);
+	UFSD(("EXIT\n"))
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+struct inode * ufs_new_inode(struct inode * dir, int mode)
+{
+	struct super_block * sb;
+	struct ufs_sb_info * sbi;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_cg_private_info * ucpi;
+	struct ufs_cylinder_group * ucg;
+	struct inode * inode;
+	unsigned cg, bit, i, j, start;
+	struct ufs_inode_info *ufsi;
+
+	UFSD(("ENTER\n"))
+	
+	/* Cannot create files in a deleted directory */
+	if (!dir || !dir->i_nlink)
+		return ERR_PTR(-EPERM);
+	sb = dir->i_sb;
+	inode = new_inode(sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	ufsi = UFS_I(inode);
+	sbi = UFS_SB(sb);
+	uspi = sbi->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+
+	lock_super (sb);
+
+	/*
+	 * Try to place the inode in its parent directory
+	 */
+	i = ufs_inotocg(dir->i_ino);
+	if (sbi->fs_cs(i).cs_nifree) {
+		cg = i;
+		goto cg_found;
+	}
+
+	/*
+	 * Use a quadratic hash to find a group with a free inode
+	 */
+	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
+		i += j;
+		if (i >= uspi->s_ncg)
+			i -= uspi->s_ncg;
+		if (sbi->fs_cs(i).cs_nifree) {
+			cg = i;
+			goto cg_found;
+		}
+	}
+
+	/*
+	 * That failed: try linear search for a free inode
+	 */
+	i = ufs_inotocg(dir->i_ino) + 1;
+	for (j = 2; j < uspi->s_ncg; j++) {
+		i++;
+		if (i >= uspi->s_ncg)
+			i = 0;
+		if (sbi->fs_cs(i).cs_nifree) {
+			cg = i;
+			goto cg_found;
+		}
+	}
+	
+	goto failed;
+
+cg_found:
+	ucpi = ufs_load_cylinder (sb, cg);
+	if (!ucpi)
+		goto failed;
+	ucg = ubh_get_ucg(UCPI_UBH);
+	if (!ufs_cg_chkmagic(sb, ucg)) 
+		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");
+
+	start = ucpi->c_irotor;
+	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
+	if (!(bit < uspi->s_ipg)) {
+		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
+		if (!(bit < start)) {
+			ufs_error (sb, "ufs_new_inode",
+			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
+			goto failed;
+		}
+	}
+	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
+	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
+		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
+	else {
+		ufs_panic (sb, "ufs_new_inode", "internal error");
+		goto failed;
+	}
+	
+	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
+	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
+	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
+	
+	if (S_ISDIR(mode)) {
+		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
+		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
+		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
+	}
+
+	ubh_mark_buffer_dirty (USPI_UBH);
+	ubh_mark_buffer_dirty (UCPI_UBH);
+	if (sb->s_flags & MS_SYNCHRONOUS) {
+		ubh_wait_on_buffer (UCPI_UBH);
+		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
+		ubh_wait_on_buffer (UCPI_UBH);
+	}
+	sb->s_dirt = 1;
+
+	inode->i_mode = mode;
+	inode->i_uid = current->fsuid;
+	if (dir->i_mode & S_ISGID) {
+		inode->i_gid = dir->i_gid;
+		if (S_ISDIR(mode))
+			inode->i_mode |= S_ISGID;
+	} else
+		inode->i_gid = current->fsgid;
+
+	inode->i_ino = cg * uspi->s_ipg + bit;
+	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
+	inode->i_blocks = 0;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	ufsi->i_flags = UFS_I(dir)->i_flags;
+	ufsi->i_lastfrag = 0;
+	ufsi->i_gen = 0;
+	ufsi->i_shadow = 0;
+	ufsi->i_osync = 0;
+	ufsi->i_oeftflag = 0;
+	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
+
+	insert_inode_hash(inode);
+	mark_inode_dirty(inode);
+
+	unlock_super (sb);
+
+	if (DQUOT_ALLOC_INODE(inode)) {
+		DQUOT_DROP(inode);
+		inode->i_flags |= S_NOQUOTA;
+		inode->i_nlink = 0;
+		iput(inode);
+		return ERR_PTR(-EDQUOT);
+	}
+
+	UFSD(("allocating inode %lu\n", inode->i_ino))
+	UFSD(("EXIT\n"))
+	return inode;
+
+failed:
+	unlock_super (sb);
+	make_bad_inode(inode);
+	iput (inode);
+	UFSD(("EXIT (FAILED)\n"))
+	return ERR_PTR(-ENOSPC);
+}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
new file mode 100644
index 0000000..718627c
--- /dev/null
+++ b/fs/ufs/inode.c
@@ -0,0 +1,816 @@
+/*
+ *  linux/fs/ufs/inode.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_INODE_DEBUG
+#undef UFS_INODE_DEBUG_MORE
+
+#ifdef UFS_INODE_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
+{
+	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
+	int ptrs = uspi->s_apb;
+	int ptrs_bits = uspi->s_apbshift;
+	const long direct_blocks = UFS_NDADDR,
+		indirect_blocks = ptrs,
+		double_blocks = (1 << (ptrs_bits * 2));
+	int n = 0;
+
+
+	UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%d \n",ptrs,double_blocks));
+	if (i_block < 0) {
+		ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
+	} else if (i_block < direct_blocks) {
+		offsets[n++] = i_block;
+	} else if ((i_block -= direct_blocks) < indirect_blocks) {
+		offsets[n++] = UFS_IND_BLOCK;
+		offsets[n++] = i_block;
+	} else if ((i_block -= indirect_blocks) < double_blocks) {
+		offsets[n++] = UFS_DIND_BLOCK;
+		offsets[n++] = i_block >> ptrs_bits;
+		offsets[n++] = i_block & (ptrs - 1);
+	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+		offsets[n++] = UFS_TIND_BLOCK;
+		offsets[n++] = i_block >> (ptrs_bits * 2);
+		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+		offsets[n++] = i_block & (ptrs - 1);
+	} else {
+		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
+	}
+	return n;
+}
+
+/*
+ * Returns the location of the fragment from
+ * the begining of the filesystem.
+ */
+
+u64  ufs_frag_map(struct inode *inode, sector_t frag)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
+	int shift = uspi->s_apbshift-uspi->s_fpbshift;
+	sector_t offsets[4], *p;
+	int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
+	u64  ret = 0L;
+	__fs32 block;
+	__fs64 u2_block = 0L;
+	unsigned flags = UFS_SB(sb)->s_flags;
+	u64 temp = 0L;
+
+	UFSD((": frag = %lu  depth = %d\n",frag,depth));
+	UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask));
+
+	if (depth == 0)
+		return 0;
+
+	p = offsets;
+
+	lock_kernel();
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		goto ufs2;
+
+	block = ufsi->i_u1.i_data[*p++];
+	if (!block)
+		goto out;
+	while (--depth) {
+		struct buffer_head *bh;
+		sector_t n = *p++;
+
+		bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
+		if (!bh)
+			goto out;
+		block = ((__fs32 *) bh->b_data)[n & mask];
+		brelse (bh);
+		if (!block)
+			goto out;
+	}
+	ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
+	goto out;
+ufs2:
+	u2_block = ufsi->i_u1.u2_i_data[*p++];
+	if (!u2_block)
+		goto out;
+
+
+	while (--depth) {
+		struct buffer_head *bh;
+		sector_t n = *p++;
+
+
+		temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
+		bh = sb_bread(sb, temp +(u64) (n>>shift));
+		if (!bh)
+			goto out;
+		u2_block = ((__fs64 *)bh->b_data)[n & mask];
+		brelse(bh);
+		if (!u2_block)
+			goto out;
+	}
+	temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
+	ret = temp + (u64) (frag & uspi->s_fpbmask);
+
+out:
+	unlock_kernel();
+	return ret;
+}
+
+static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
+	unsigned int fragment, unsigned int new_fragment,
+	unsigned int required, int *err, int metadata, long *phys, int *new)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * result;
+	unsigned block, blockoff, lastfrag, lastblock, lastblockoff;
+	unsigned tmp, goal;
+	__fs32 * p, * p2;
+	unsigned flags = 0;
+
+	UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
+		inode->i_ino, fragment, new_fragment, required))         
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	flags = UFS_SB(sb)->s_flags;
+        /* TODO : to be done for write support
+        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+             goto ufs2;
+         */
+
+	block = ufs_fragstoblks (fragment);
+	blockoff = ufs_fragnum (fragment);
+	p = ufsi->i_u1.i_data + block;
+	goal = 0;
+
+repeat:
+	tmp = fs32_to_cpu(sb, *p);
+	lastfrag = ufsi->i_lastfrag;
+	if (tmp && fragment < lastfrag) {
+		if (metadata) {
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
+			if (tmp == fs32_to_cpu(sb, *p)) {
+				UFSD(("EXIT, result %u\n", tmp + blockoff))
+				return result;
+			}
+			brelse (result);
+			goto repeat;
+		} else {
+			*phys = tmp;
+			return NULL;
+		}
+	}
+
+	lastblock = ufs_fragstoblks (lastfrag);
+	lastblockoff = ufs_fragnum (lastfrag);
+	/*
+	 * We will extend file into new block beyond last allocated block
+	 */
+	if (lastblock < block) {
+		/*
+		 * We must reallocate last allocated block
+		 */
+		if (lastblockoff) {
+			p2 = ufsi->i_u1.i_data + lastblock;
+			tmp = ufs_new_fragments (inode, p2, lastfrag, 
+				fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err);
+			if (!tmp) {
+				if (lastfrag != ufsi->i_lastfrag)
+					goto repeat;
+				else
+					return NULL;
+			}
+			lastfrag = ufsi->i_lastfrag;
+			
+		}
+		goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
+		tmp = ufs_new_fragments (inode, p, fragment - blockoff, 
+			goal, required + blockoff, err);
+	}
+	/*
+	 * We will extend last allocated block
+	 */
+	else if (lastblock == block) {
+		tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff),
+			fs32_to_cpu(sb, *p), required +  (blockoff - lastblockoff), err);
+	}
+	/*
+	 * We will allocate new block before last allocated block
+	 */
+	else /* (lastblock > block) */ {
+		if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
+			goal = tmp + uspi->s_fpb;
+		tmp = ufs_new_fragments (inode, p, fragment - blockoff, 
+			goal, uspi->s_fpb, err);
+	}
+	if (!tmp) {
+		if ((!blockoff && *p) || 
+		    (blockoff && lastfrag != ufsi->i_lastfrag))
+			goto repeat;
+		*err = -ENOSPC;
+		return NULL;
+	}
+
+	/* The nullification of framgents done in ufs/balloc.c is
+	 * something I don't have the stomache to move into here right
+	 * now. -DaveM
+	 */
+	if (metadata) {
+		result = sb_getblk(inode->i_sb, tmp + blockoff);
+	} else {
+		*phys = tmp;
+		result = NULL;
+		*err = 0;
+		*new = 1;
+	}
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	if (IS_SYNC(inode))
+		ufs_sync_inode (inode);
+	mark_inode_dirty(inode);
+	UFSD(("EXIT, result %u\n", tmp + blockoff))
+	return result;
+
+     /* This part : To be implemented ....
+        Required only for writing, not required for READ-ONLY.
+ufs2:
+
+	u2_block = ufs_fragstoblks(fragment);
+	u2_blockoff = ufs_fragnum(fragment);
+	p = ufsi->i_u1.u2_i_data + block;
+	goal = 0;
+
+repeat2:
+	tmp = fs32_to_cpu(sb, *p);
+	lastfrag = ufsi->i_lastfrag;
+
+     */
+}
+
+static struct buffer_head * ufs_block_getfrag (struct inode *inode,
+	struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, 
+	unsigned int blocksize, int * err, int metadata, long *phys, int *new)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * result;
+	unsigned tmp, goal, block, blockoff;
+	__fs32 * p;
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	block = ufs_fragstoblks (fragment);
+	blockoff = ufs_fragnum (fragment);
+
+	UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment))	
+
+	result = NULL;
+	if (!bh)
+		goto out;
+	if (!buffer_uptodate(bh)) {
+		ll_rw_block (READ, 1, &bh);
+		wait_on_buffer (bh);
+		if (!buffer_uptodate(bh))
+			goto out;
+	}
+
+	p = (__fs32 *) bh->b_data + block;
+repeat:
+	tmp = fs32_to_cpu(sb, *p);
+	if (tmp) {
+		if (metadata) {
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
+			if (tmp == fs32_to_cpu(sb, *p))
+				goto out;
+			brelse (result);
+			goto repeat;
+		} else {
+			*phys = tmp;
+			goto out;
+		}
+	}
+
+	if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb))
+		goal = tmp + uspi->s_fpb;
+	else
+		goal = bh->b_blocknr + uspi->s_fpb;
+	tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err);
+	if (!tmp) {
+		if (fs32_to_cpu(sb, *p))
+			goto repeat;
+		goto out;
+	}		
+
+	/* The nullification of framgents done in ufs/balloc.c is
+	 * something I don't have the stomache to move into here right
+	 * now. -DaveM
+	 */
+	if (metadata) {
+		result = sb_getblk(sb, tmp + blockoff);
+	} else {
+		*phys = tmp;
+		*new = 1;
+	}
+
+	mark_buffer_dirty(bh);
+	if (IS_SYNC(inode))
+		sync_dirty_buffer(bh);
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+out:
+	brelse (bh);
+	UFSD(("EXIT, result %u\n", tmp + blockoff))
+	return result;
+}
+
+/*
+ * This function gets the block which contains the fragment.
+ */
+
+static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
+{
+	struct super_block * sb = inode->i_sb;
+	struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
+	struct buffer_head * bh;
+	int ret, err, new;
+	unsigned long ptr,phys;
+	u64 phys64 = 0;
+	
+	if (!create) {
+		phys64 = ufs_frag_map(inode, fragment);
+		UFSD(("phys64 = %lu \n",phys64));
+		if (phys64)
+			map_bh(bh_result, sb, phys64);
+		return 0;
+	}
+
+        /* This code entered only while writing ....? */
+
+	err = -EIO;
+	new = 0;
+	ret = 0;
+	bh = NULL;
+
+	lock_kernel();
+
+	UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
+	if (fragment < 0)
+		goto abort_negative;
+	if (fragment >
+	    ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
+	     << uspi->s_fpbshift))
+		goto abort_too_big;
+
+	err = 0;
+	ptr = fragment;
+	  
+	/*
+	 * ok, these macros clean the logic up a bit and make
+	 * it much more readable:
+	 */
+#define GET_INODE_DATABLOCK(x) \
+		ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
+#define GET_INODE_PTR(x) \
+		ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
+#define GET_INDIRECT_DATABLOCK(x) \
+		ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
+				  &err, 0, &phys, &new);
+#define GET_INDIRECT_PTR(x) \
+		ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
+				  &err, 1, NULL, NULL);
+
+	if (ptr < UFS_NDIR_FRAGMENT) {
+		bh = GET_INODE_DATABLOCK(ptr);
+		goto out;
+	}
+	ptr -= UFS_NDIR_FRAGMENT;
+	if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
+		bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
+		goto get_indirect;
+	}
+	ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
+	if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
+		bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
+		goto get_double;
+	}
+	ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
+	bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
+	bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
+get_double:
+	bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
+get_indirect:
+	bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
+
+#undef GET_INODE_DATABLOCK
+#undef GET_INODE_PTR
+#undef GET_INDIRECT_DATABLOCK
+#undef GET_INDIRECT_PTR
+
+out:
+	if (err)
+		goto abort;
+	if (new)
+		set_buffer_new(bh_result);
+	map_bh(bh_result, sb, phys);
+abort:
+	unlock_kernel();
+	return err;
+
+abort_negative:
+	ufs_warning(sb, "ufs_get_block", "block < 0");
+	goto abort;
+
+abort_too_big:
+	ufs_warning(sb, "ufs_get_block", "block > big");
+	goto abort;
+}
+
+struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
+				int create, int *err)
+{
+	struct buffer_head dummy;
+	int error;
+
+	dummy.b_state = 0;
+	dummy.b_blocknr = -1000;
+	error = ufs_getfrag_block(inode, fragment, &dummy, create);
+	*err = error;
+	if (!error && buffer_mapped(&dummy)) {
+		struct buffer_head *bh;
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+		if (buffer_new(&dummy)) {
+			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+			set_buffer_uptodate(bh);
+			mark_buffer_dirty(bh);
+		}
+		return bh;
+	}
+	return NULL;
+}
+
+struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
+	int create, int * err)
+{
+	struct buffer_head * bh;
+
+	UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
+	bh = ufs_getfrag (inode, fragment, create, err);
+	if (!bh || buffer_uptodate(bh)) 		
+		return bh;
+	ll_rw_block (READ, 1, &bh);
+	wait_on_buffer (bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	brelse (bh);
+	*err = -EIO;
+	return NULL;
+}
+
+static int ufs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	return block_write_full_page(page,ufs_getfrag_block,wbc);
+}
+static int ufs_readpage(struct file *file, struct page *page)
+{
+	return block_read_full_page(page,ufs_getfrag_block);
+}
+static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
+{
+	return block_prepare_write(page,from,to,ufs_getfrag_block);
+}
+static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
+{
+	return generic_block_bmap(mapping,block,ufs_getfrag_block);
+}
+struct address_space_operations ufs_aops = {
+	.readpage = ufs_readpage,
+	.writepage = ufs_writepage,
+	.sync_page = block_sync_page,
+	.prepare_write = ufs_prepare_write,
+	.commit_write = generic_commit_write,
+	.bmap = ufs_bmap
+};
+
+void ufs_read_inode (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_inode * ufs_inode;	
+	struct ufs2_inode *ufs2_inode;
+	struct buffer_head * bh;
+	mode_t mode;
+	unsigned i;
+	unsigned flags;
+	
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+
+	if (inode->i_ino < UFS_ROOTINO || 
+	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
+		goto bad_inode;
+	}
+	
+	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
+	if (!bh) {
+		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
+		goto bad_inode;
+	}
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		goto ufs2_inode;
+
+	ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
+
+	/*
+	 * Copy data to the in-core inode.
+	 */
+	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
+	inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
+	if (inode->i_nlink == 0)
+		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+	
+	/*
+	 * Linux now has 32-bit uid and gid, so we can support EFT.
+	 */
+	inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
+	inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
+
+	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
+	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
+	inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size (for stat) */
+	inode->i_version++;
+	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
+	ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
+	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
+	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
+	ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	
+	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
+	}
+	ufsi->i_osync = 0;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ufs_dir_inode_operations;
+		inode->i_fop = &ufs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (!inode->i_blocks)
+			inode->i_op = &ufs_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &ufs_aops;
+		}
+	} else
+		init_special_inode(inode, inode->i_mode,
+			ufs_get_inode_dev(sb, ufsi));
+
+	brelse (bh);
+
+	UFSD(("EXIT\n"))
+	return;
+
+bad_inode:
+	make_bad_inode(inode);
+	return;
+
+ufs2_inode :
+	UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino))
+
+	ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
+
+	/*
+	 * Copy data to the in-core inode.
+	 */
+	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
+	inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
+	if (inode->i_nlink == 0)
+		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+
+        /*
+         * Linux now has 32-bit uid and gid, so we can support EFT.
+         */
+	inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
+	inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
+
+	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
+	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_atime.tv_sec);
+	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_ctime.tv_sec);
+	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs2_inode->ui_mtime.tv_sec);
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_ctime.tv_nsec = 0;
+	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
+	inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
+
+	inode->i_version++;
+	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
+	ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
+	/*
+	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
+	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
+	*/
+	ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
+
+	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufsi->i_u1.u2_i_data[i] =
+				ufs2_inode->ui_u2.ui_addr.ui_db[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
+	}
+	ufsi->i_osync = 0;
+
+	if (S_ISREG(inode->i_mode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_op = &ufs_dir_inode_operations;
+		inode->i_fop = &ufs_dir_operations;
+	} else if (S_ISLNK(inode->i_mode)) {
+		if (!inode->i_blocks)
+			inode->i_op = &ufs_fast_symlink_inode_operations;
+		else {
+			inode->i_op = &page_symlink_inode_operations;
+			inode->i_mapping->a_ops = &ufs_aops;
+		}
+	} else   /* TODO  : here ...*/
+		init_special_inode(inode, inode->i_mode,
+			ufs_get_inode_dev(sb, ufsi));
+
+	brelse(bh);
+
+	UFSD(("EXIT\n"))
+	return;
+}
+
+static int ufs_update_inode(struct inode * inode, int do_sync)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	struct ufs_inode * ufs_inode;
+	unsigned i;
+	unsigned flags;
+
+	UFSD(("ENTER, ino %lu\n", inode->i_ino))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+
+	if (inode->i_ino < UFS_ROOTINO || 
+	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
+		return -1;
+	}
+
+	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
+	if (!bh) {
+		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
+		return -1;
+	}
+	ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode));
+
+	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
+	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
+
+	ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
+	ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
+		
+	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
+	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
+	ufs_inode->ui_atime.tv_usec = 0;
+	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
+	ufs_inode->ui_ctime.tv_usec = 0;
+	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
+	ufs_inode->ui_mtime.tv_usec = 0;
+	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
+	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
+	ufs_inode->ui_gen = cpu_to_fs32(sb, ufsi->i_gen);
+
+	if ((flags & UFS_UID_MASK) == UFS_UID_EFT) {
+		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
+		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
+	}
+
+	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
+		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
+	} else if (inode->i_blocks) {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
+			ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
+	}
+	else {
+		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
+			ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+	}
+
+	if (!inode->i_nlink)
+		memset (ufs_inode, 0, sizeof(struct ufs_inode));
+		
+	mark_buffer_dirty(bh);
+	if (do_sync)
+		sync_dirty_buffer(bh);
+	brelse (bh);
+	
+	UFSD(("EXIT\n"))
+	return 0;
+}
+
+int ufs_write_inode (struct inode * inode, int wait)
+{
+	int ret;
+	lock_kernel();
+	ret = ufs_update_inode (inode, wait);
+	unlock_kernel();
+	return ret;
+}
+
+int ufs_sync_inode (struct inode *inode)
+{
+	return ufs_update_inode (inode, 1);
+}
+
+void ufs_delete_inode (struct inode * inode)
+{
+	/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
+	lock_kernel();
+	mark_inode_dirty(inode);
+	ufs_update_inode(inode, IS_SYNC(inode));
+	inode->i_size = 0;
+	if (inode->i_blocks)
+		ufs_truncate (inode);
+	ufs_free_inode (inode);
+	unlock_kernel();
+}
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
new file mode 100644
index 0000000..2958cde
--- /dev/null
+++ b/fs/ufs/namei.c
@@ -0,0 +1,375 @@
+/*
+ * linux/fs/ufs/namei.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/namei.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include "swab.h"	/* will go away - see comment in mknod() */
+#include "util.h"
+
+/*
+#undef UFS_NAMEI_DEBUG
+*/
+#define UFS_NAMEI_DEBUG
+
+#ifdef UFS_NAMEI_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+static inline void ufs_inc_count(struct inode *inode)
+{
+	inode->i_nlink++;
+	mark_inode_dirty(inode);
+}
+
+static inline void ufs_dec_count(struct inode *inode)
+{
+	inode->i_nlink--;
+	mark_inode_dirty(inode);
+}
+
+static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
+{
+	int err = ufs_add_link(dentry, inode);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	ufs_dec_count(inode);
+	iput(inode);
+	return err;
+}
+
+static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode * inode = NULL;
+	ino_t ino;
+	
+	if (dentry->d_name.len > UFS_MAXNAMLEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	lock_kernel();
+	ino = ufs_inode_by_name(dir, dentry);
+	if (ino) {
+		inode = iget(dir->i_sb, ino);
+		if (!inode) {
+			unlock_kernel();
+			return ERR_PTR(-EACCES);
+		}
+	}
+	unlock_kernel();
+	d_add(dentry, inode);
+	return NULL;
+}
+
+/*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate(). 
+ */
+static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
+		struct nameidata *nd)
+{
+	struct inode * inode = ufs_new_inode(dir, mode);
+	int err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		inode->i_op = &ufs_file_inode_operations;
+		inode->i_fop = &ufs_file_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+		mark_inode_dirty(inode);
+		lock_kernel();
+		err = ufs_add_nondir(dentry, inode);
+		unlock_kernel();
+	}
+	return err;
+}
+
+static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+{
+	struct inode *inode;
+	int err;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+	inode = ufs_new_inode(dir, mode);
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		init_special_inode(inode, mode, rdev);
+		/* NOTE: that'll go when we get wide dev_t */
+		ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev);
+		mark_inode_dirty(inode);
+		lock_kernel();
+		err = ufs_add_nondir(dentry, inode);
+		unlock_kernel();
+	}
+	return err;
+}
+
+static int ufs_symlink (struct inode * dir, struct dentry * dentry,
+	const char * symname)
+{
+	struct super_block * sb = dir->i_sb;
+	int err = -ENAMETOOLONG;
+	unsigned l = strlen(symname)+1;
+	struct inode * inode;
+
+	if (l > sb->s_blocksize)
+		goto out;
+
+	lock_kernel();
+	inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
+		/* slow symlink */
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_mapping->a_ops = &ufs_aops;
+		err = page_symlink(inode, symname, l);
+		if (err)
+			goto out_fail;
+	} else {
+		/* fast symlink */
+		inode->i_op = &ufs_fast_symlink_inode_operations;
+		memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
+		inode->i_size = l-1;
+	}
+	mark_inode_dirty(inode);
+
+	err = ufs_add_nondir(dentry, inode);
+out:
+	unlock_kernel();
+	return err;
+
+out_fail:
+	ufs_dec_count(inode);
+	iput(inode);
+	goto out;
+}
+
+static int ufs_link (struct dentry * old_dentry, struct inode * dir,
+	struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	int error;
+
+	lock_kernel();
+	if (inode->i_nlink >= UFS_LINK_MAX) {
+		unlock_kernel();
+		return -EMLINK;
+	}
+
+	inode->i_ctime = CURRENT_TIME_SEC;
+	ufs_inc_count(inode);
+	atomic_inc(&inode->i_count);
+
+	error = ufs_add_nondir(dentry, inode);
+	unlock_kernel();
+	return error;
+}
+
+static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+	struct inode * inode;
+	int err = -EMLINK;
+
+	if (dir->i_nlink >= UFS_LINK_MAX)
+		goto out;
+
+	lock_kernel();
+	ufs_inc_count(dir);
+
+	inode = ufs_new_inode(dir, S_IFDIR|mode);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_dir;
+
+	inode->i_op = &ufs_dir_inode_operations;
+	inode->i_fop = &ufs_dir_operations;
+
+	ufs_inc_count(inode);
+
+	err = ufs_make_empty(inode, dir);
+	if (err)
+		goto out_fail;
+
+	err = ufs_add_link(dentry, inode);
+	if (err)
+		goto out_fail;
+	unlock_kernel();
+
+	d_instantiate(dentry, inode);
+out:
+	return err;
+
+out_fail:
+	ufs_dec_count(inode);
+	ufs_dec_count(inode);
+	iput (inode);
+out_dir:
+	ufs_dec_count(dir);
+	unlock_kernel();
+	goto out;
+}
+
+static int ufs_unlink(struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	struct buffer_head * bh;
+	struct ufs_dir_entry * de;
+	int err = -ENOENT;
+
+	lock_kernel();
+	de = ufs_find_entry (dentry, &bh);
+	if (!de)
+		goto out;
+
+	err = ufs_delete_entry (dir, de, bh);
+	if (err)
+		goto out;
+
+	inode->i_ctime = dir->i_ctime;
+	ufs_dec_count(inode);
+	err = 0;
+out:
+	unlock_kernel();
+	return err;
+}
+
+static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
+{
+	struct inode * inode = dentry->d_inode;
+	int err= -ENOTEMPTY;
+
+	lock_kernel();
+	if (ufs_empty_dir (inode)) {
+		err = ufs_unlink(dir, dentry);
+		if (!err) {
+			inode->i_size = 0;
+			ufs_dec_count(inode);
+			ufs_dec_count(dir);
+		}
+	}
+	unlock_kernel();
+	return err;
+}
+
+static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
+	struct inode * new_dir,	struct dentry * new_dentry )
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	struct buffer_head *dir_bh = NULL;
+	struct ufs_dir_entry *dir_de = NULL;
+	struct buffer_head *old_bh;
+	struct ufs_dir_entry *old_de;
+	int err = -ENOENT;
+
+	lock_kernel();
+	old_de = ufs_find_entry (old_dentry, &old_bh);
+	if (!old_de)
+		goto out;
+
+	if (S_ISDIR(old_inode->i_mode)) {
+		err = -EIO;
+		dir_de = ufs_dotdot(old_inode, &dir_bh);
+		if (!dir_de)
+			goto out_old;
+	}
+
+	if (new_inode) {
+		struct buffer_head *new_bh;
+		struct ufs_dir_entry *new_de;
+
+		err = -ENOTEMPTY;
+		if (dir_de && !ufs_empty_dir (new_inode))
+			goto out_dir;
+		err = -ENOENT;
+		new_de = ufs_find_entry (new_dentry, &new_bh);
+		if (!new_de)
+			goto out_dir;
+		ufs_inc_count(old_inode);
+		ufs_set_link(new_dir, new_de, new_bh, old_inode);
+		new_inode->i_ctime = CURRENT_TIME_SEC;
+		if (dir_de)
+			new_inode->i_nlink--;
+		ufs_dec_count(new_inode);
+	} else {
+		if (dir_de) {
+			err = -EMLINK;
+			if (new_dir->i_nlink >= UFS_LINK_MAX)
+				goto out_dir;
+		}
+		ufs_inc_count(old_inode);
+		err = ufs_add_link(new_dentry, old_inode);
+		if (err) {
+			ufs_dec_count(old_inode);
+			goto out_dir;
+		}
+		if (dir_de)
+			ufs_inc_count(new_dir);
+	}
+
+	ufs_delete_entry (old_dir, old_de, old_bh);
+
+	ufs_dec_count(old_inode);
+
+	if (dir_de) {
+		ufs_set_link(old_inode, dir_de, dir_bh, new_dir);
+		ufs_dec_count(old_dir);
+	}
+	unlock_kernel();
+	return 0;
+
+out_dir:
+	if (dir_de)
+		brelse(dir_bh);
+out_old:
+	brelse (old_bh);
+out:
+	unlock_kernel();
+	return err;
+}
+
+struct inode_operations ufs_dir_inode_operations = {
+	.create		= ufs_create,
+	.lookup		= ufs_lookup,
+	.link		= ufs_link,
+	.unlink		= ufs_unlink,
+	.symlink	= ufs_symlink,
+	.mkdir		= ufs_mkdir,
+	.rmdir		= ufs_rmdir,
+	.mknod		= ufs_mknod,
+	.rename		= ufs_rename,
+};
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
new file mode 100644
index 0000000..f036d69
--- /dev/null
+++ b/fs/ufs/super.c
@@ -0,0 +1,1347 @@
+/*
+ *  linux/fs/ufs/super.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+/* Derived from
+ *
+ *  linux/fs/ext2/super.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+ 
+/*
+ * Inspired by
+ *
+ *  linux/fs/ufs/super.c
+ *
+ * Copyright (C) 1996
+ * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
+ * Laboratory for Computer Science Research Computing Facility
+ * Rutgers, The State University of New Jersey
+ *
+ * Copyright (C) 1996  Eddie C. Dost  (ecd@skynet.be)
+ *
+ * Kernel module support added on 96/04/26 by
+ * Stefan Reinauer <stepan@home.culture.mipt.ru>
+ *
+ * Module usage counts added on 96/04/29 by
+ * Gertjan van Wingerde <gertjan@cs.vu.nl>
+ *
+ * Clean swab support on 19970406 by
+ * Francois-Rene Rideau <fare@tunes.org>
+ *
+ * 4.4BSD (FreeBSD) support added on February 1st 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
+ * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
+ *
+ * NeXTstep support added on February 5th 1998 by
+ * Niels Kristian Bech Jensen <nkbj@image.dk>.
+ *
+ * write support Daniel Pirkl <daniel.pirkl@email.cz> 1998
+ * 
+ * HP/UX hfs filesystem support added by
+ * Martin K. Petersen <mkp@mkp.net>, August 1999
+ *
+ * UFS2 (of FreeBSD 5.x) support added by
+ * Niraj Kumar <niraj17@iitbombay.org>, Jan 2004
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <stdarg.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/parser.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_SUPER_DEBUG
+#undef UFS_SUPER_DEBUG_MORE
+
+
+#undef UFS_SUPER_DEBUG_MORE
+#ifdef UFS_SUPER_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+#ifdef UFS_SUPER_DEBUG_MORE
+/*
+ * Print contents of ufs_super_block, useful for debugging
+ */
+void ufs_print_super_stuff(struct super_block *sb,
+	struct ufs_super_block_first * usb1,
+	struct ufs_super_block_second * usb2, 
+	struct ufs_super_block_third * usb3)
+{
+	printk("ufs_print_super_stuff\n");
+	printk("size of usb:     %u\n", sizeof(struct ufs_super_block));
+	printk("  magic:         0x%x\n", fs32_to_cpu(sb, usb3->fs_magic));
+	printk("  sblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_sblkno));
+	printk("  cblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_cblkno));
+	printk("  iblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_iblkno));
+	printk("  dblkno:        %u\n", fs32_to_cpu(sb, usb1->fs_dblkno));
+	printk("  cgoffset:      %u\n", fs32_to_cpu(sb, usb1->fs_cgoffset));
+	printk("  ~cgmask:       0x%x\n", ~fs32_to_cpu(sb, usb1->fs_cgmask));
+	printk("  size:          %u\n", fs32_to_cpu(sb, usb1->fs_size));
+	printk("  dsize:         %u\n", fs32_to_cpu(sb, usb1->fs_dsize));
+	printk("  ncg:           %u\n", fs32_to_cpu(sb, usb1->fs_ncg));
+	printk("  bsize:         %u\n", fs32_to_cpu(sb, usb1->fs_bsize));
+	printk("  fsize:         %u\n", fs32_to_cpu(sb, usb1->fs_fsize));
+	printk("  frag:          %u\n", fs32_to_cpu(sb, usb1->fs_frag));
+	printk("  fragshift:     %u\n", fs32_to_cpu(sb, usb1->fs_fragshift));
+	printk("  ~fmask:        %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask));
+	printk("  fshift:        %u\n", fs32_to_cpu(sb, usb1->fs_fshift));
+	printk("  sbsize:        %u\n", fs32_to_cpu(sb, usb1->fs_sbsize));
+	printk("  spc:           %u\n", fs32_to_cpu(sb, usb1->fs_spc));
+	printk("  cpg:           %u\n", fs32_to_cpu(sb, usb1->fs_cpg));
+	printk("  ipg:           %u\n", fs32_to_cpu(sb, usb1->fs_ipg));
+	printk("  fpg:           %u\n", fs32_to_cpu(sb, usb1->fs_fpg));
+	printk("  csaddr:        %u\n", fs32_to_cpu(sb, usb1->fs_csaddr));
+	printk("  cssize:        %u\n", fs32_to_cpu(sb, usb1->fs_cssize));
+	printk("  cgsize:        %u\n", fs32_to_cpu(sb, usb1->fs_cgsize));
+	printk("  fstodb:        %u\n", fs32_to_cpu(sb, usb1->fs_fsbtodb));
+	printk("  contigsumsize: %d\n", fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize));
+	printk("  postblformat:  %u\n", fs32_to_cpu(sb, usb3->fs_postblformat));
+	printk("  nrpos:         %u\n", fs32_to_cpu(sb, usb3->fs_nrpos));
+	printk("  ndir           %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir));
+	printk("  nifree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree));
+	printk("  nbfree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree));
+	printk("  nffree         %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree));
+	printk("\n");
+}
+
+/*
+ * Print contents of ufs2 ufs_super_block, useful for debugging
+ */
+void ufs2_print_super_stuff(
+     struct super_block *sb,
+      struct ufs_super_block *usb)
+{
+	printk("ufs_print_super_stuff\n");
+	printk("size of usb:     %u\n", sizeof(struct ufs_super_block));
+	printk("  magic:         0x%x\n", fs32_to_cpu(sb, usb->fs_magic));
+	printk("  fs_size:   %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size));
+	printk("  fs_dsize:  %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize));
+	printk("  bsize:         %u\n", fs32_to_cpu(usb, usb->fs_bsize));
+	printk("  fsize:         %u\n", fs32_to_cpu(usb, usb->fs_fsize));
+	printk("  fs_volname:  %s\n", usb->fs_u11.fs_u2.fs_volname);
+	printk("  fs_fsmnt:  %s\n", usb->fs_u11.fs_u2.fs_fsmnt);
+	printk("  fs_sblockloc: %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_sblockloc));
+	printk("  cs_ndir(No of dirs):  %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_cstotal.cs_ndir));
+	printk("  cs_nbfree(No of free blocks):  %u\n",fs64_to_cpu(sb,
+			usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree));
+	printk("\n");
+}
+
+/*
+ * Print contents of ufs_cylinder_group, useful for debugging
+ */
+void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group *cg)
+{
+	printk("\nufs_print_cylinder_stuff\n");
+	printk("size of ucg: %u\n", sizeof(struct ufs_cylinder_group));
+	printk("  magic:        %x\n", fs32_to_cpu(sb, cg->cg_magic));
+	printk("  time:         %u\n", fs32_to_cpu(sb, cg->cg_time));
+	printk("  cgx:          %u\n", fs32_to_cpu(sb, cg->cg_cgx));
+	printk("  ncyl:         %u\n", fs16_to_cpu(sb, cg->cg_ncyl));
+	printk("  niblk:        %u\n", fs16_to_cpu(sb, cg->cg_niblk));
+	printk("  ndblk:        %u\n", fs32_to_cpu(sb, cg->cg_ndblk));
+	printk("  cs_ndir:      %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_ndir));
+	printk("  cs_nbfree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nbfree));
+	printk("  cs_nifree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nifree));
+	printk("  cs_nffree:    %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nffree));
+	printk("  rotor:        %u\n", fs32_to_cpu(sb, cg->cg_rotor));
+	printk("  frotor:       %u\n", fs32_to_cpu(sb, cg->cg_frotor));
+	printk("  irotor:       %u\n", fs32_to_cpu(sb, cg->cg_irotor));
+	printk("  frsum:        %u, %u, %u, %u, %u, %u, %u, %u\n",
+	    fs32_to_cpu(sb, cg->cg_frsum[0]), fs32_to_cpu(sb, cg->cg_frsum[1]),
+	    fs32_to_cpu(sb, cg->cg_frsum[2]), fs32_to_cpu(sb, cg->cg_frsum[3]),
+	    fs32_to_cpu(sb, cg->cg_frsum[4]), fs32_to_cpu(sb, cg->cg_frsum[5]),
+	    fs32_to_cpu(sb, cg->cg_frsum[6]), fs32_to_cpu(sb, cg->cg_frsum[7]));
+	printk("  btotoff:      %u\n", fs32_to_cpu(sb, cg->cg_btotoff));
+	printk("  boff:         %u\n", fs32_to_cpu(sb, cg->cg_boff));
+	printk("  iuseoff:      %u\n", fs32_to_cpu(sb, cg->cg_iusedoff));
+	printk("  freeoff:      %u\n", fs32_to_cpu(sb, cg->cg_freeoff));
+	printk("  nextfreeoff:  %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff));
+	printk("  clustersumoff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff));
+	printk("  clusteroff    %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff));
+	printk("  nclusterblks  %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks));
+	printk("\n");
+}
+#endif /* UFS_SUPER_DEBUG_MORE */
+
+static struct super_operations ufs_super_ops;
+
+static char error_buf[1024];
+
+void ufs_error (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	va_list args;
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_clean = UFS_FSBAD;
+		ubh_mark_buffer_dirty(USPI_UBH);
+		sb->s_dirt = 1;
+		sb->s_flags |= MS_RDONLY;
+	}
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
+	case UFS_MOUNT_ONERROR_PANIC:
+		panic ("UFS-fs panic (device %s): %s: %s\n", 
+			sb->s_id, function, error_buf);
+
+	case UFS_MOUNT_ONERROR_LOCK:
+	case UFS_MOUNT_ONERROR_UMOUNT:
+	case UFS_MOUNT_ONERROR_REPAIR:
+		printk (KERN_CRIT "UFS-fs error (device %s): %s: %s\n",
+			sb->s_id, function, error_buf);
+	}		
+}
+
+void ufs_panic (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	va_list args;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_clean = UFS_FSBAD;
+		ubh_mark_buffer_dirty(USPI_UBH);
+		sb->s_dirt = 1;
+	}
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	sb->s_flags |= MS_RDONLY;
+	printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+void ufs_warning (struct super_block * sb, const char * function,
+	const char * fmt, ...)
+{
+	va_list args;
+
+	va_start (args, fmt);
+	vsprintf (error_buf, fmt, args);
+	va_end (args);
+	printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
+		sb->s_id, function, error_buf);
+}
+
+enum {
+	Opt_type_old, Opt_type_sunx86, Opt_type_sun, Opt_type_44bsd,
+	Opt_type_ufs2, Opt_type_hp, Opt_type_nextstepcd, Opt_type_nextstep,
+	Opt_type_openstep, Opt_onerror_panic, Opt_onerror_lock,
+	Opt_onerror_umount, Opt_onerror_repair, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_type_old, "ufstype=old"},
+	{Opt_type_sunx86, "ufstype=sunx86"},
+	{Opt_type_sun, "ufstype=sun"},
+	{Opt_type_44bsd, "ufstype=44bsd"},
+	{Opt_type_ufs2, "ufstype=ufs2"},
+	{Opt_type_ufs2, "ufstype=5xbsd"},
+	{Opt_type_hp, "ufstype=hp"},
+	{Opt_type_nextstepcd, "ufstype=nextstep-cd"},
+	{Opt_type_nextstep, "ufstype=nextstep"},
+	{Opt_type_openstep, "ufstype=openstep"},
+	{Opt_onerror_panic, "onerror=panic"},
+	{Opt_onerror_lock, "onerror=lock"},
+	{Opt_onerror_umount, "onerror=umount"},
+	{Opt_onerror_repair, "onerror=repair"},
+	{Opt_err, NULL}
+};
+
+static int ufs_parse_options (char * options, unsigned * mount_options)
+{
+	char * p;
+	
+	UFSD(("ENTER\n"))
+	
+	if (!options)
+		return 1;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_type_old:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_OLD);
+			break;
+		case Opt_type_sunx86:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_SUNx86);
+			break;
+		case Opt_type_sun:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_SUN);
+			break;
+		case Opt_type_44bsd:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_44BSD);
+			break;
+		case Opt_type_ufs2:
+			ufs_clear_opt(*mount_options, UFSTYPE);
+			ufs_set_opt(*mount_options, UFSTYPE_UFS2);
+			break;
+		case Opt_type_hp:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_HP);
+			break;
+		case Opt_type_nextstepcd:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP_CD);
+			break;
+		case Opt_type_nextstep:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP);
+			break;
+		case Opt_type_openstep:
+			ufs_clear_opt (*mount_options, UFSTYPE);
+			ufs_set_opt (*mount_options, UFSTYPE_OPENSTEP);
+			break;
+		case Opt_onerror_panic:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_PANIC);
+			break;
+		case Opt_onerror_lock:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_LOCK);
+			break;
+		case Opt_onerror_umount:
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_UMOUNT);
+			break;
+		case Opt_onerror_repair:
+			printk("UFS-fs: Unable to do repair on error, "
+				"will lock lock instead\n");
+			ufs_clear_opt (*mount_options, ONERROR);
+			ufs_set_opt (*mount_options, ONERROR_REPAIR);
+			break;
+		default:
+			printk("UFS-fs: Invalid option: \"%s\" "
+					"or missing value\n", p);
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * Read on-disk structures associated with cylinder groups
+ */
+static int ufs_read_cylinder_structures (struct super_block *sb) {
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block *usb;
+	struct ufs_buffer_head * ubh;
+	unsigned char * base, * space;
+	unsigned size, blks, i;
+	unsigned flags = 0;
+	
+	UFSD(("ENTER\n"))
+	
+	uspi = sbi->s_uspi;
+
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data;
+
+        flags = UFS_SB(sb)->s_flags;
+	
+	/*
+	 * Read cs structures from (usually) first data block
+	 * on the device. 
+	 */
+	size = uspi->s_cssize;
+	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	base = space = kmalloc(size, GFP_KERNEL);
+	if (!base)
+		goto failed; 
+	for (i = 0; i < blks; i += uspi->s_fpb) {
+		size = uspi->s_bsize;
+		if (i + uspi->s_fpb > blks)
+			size = (blks - i) * uspi->s_fsize;
+
+		if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+			ubh = ubh_bread(sb,
+				fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size);
+			if (!ubh)
+				goto failed;
+			ubh_ubhcpymem (space, ubh, size);
+			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
+		}
+		else {
+			ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
+			if (!ubh)
+				goto failed;
+			ubh_ubhcpymem(space, ubh, size);
+			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
+		}
+		space += size;
+		ubh_brelse (ubh);
+		ubh = NULL;
+	}
+
+	/*
+	 * Read cylinder group (we read only first fragment from block
+	 * at this time) and prepare internal data structures for cg caching.
+	 */
+	if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL)))
+		goto failed;
+	for (i = 0; i < uspi->s_ncg; i++) 
+		sbi->s_ucg[i] = NULL;
+	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
+		sbi->s_ucpi[i] = NULL;
+		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
+	}
+	for (i = 0; i < uspi->s_ncg; i++) {
+		UFSD(("read cg %u\n", i))
+		if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
+			goto failed;
+		if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
+			goto failed;
+#ifdef UFS_SUPER_DEBUG_MORE
+		ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
+#endif
+	}
+	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
+		if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL)))
+			goto failed;
+		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
+	}
+	sbi->s_cg_loaded = 0;
+	UFSD(("EXIT\n"))
+	return 1;
+
+failed:
+	if (base) kfree (base);
+	if (sbi->s_ucg) {
+		for (i = 0; i < uspi->s_ncg; i++)
+			if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]);
+		kfree (sbi->s_ucg);
+		for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
+			if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]);
+	}
+	UFSD(("EXIT (FAILED)\n"))
+	return 0;
+}
+
+/*
+ * Put on-disk structures associated with cylinder groups and 
+ * write them back to disk
+ */
+static void ufs_put_cylinder_structures (struct super_block *sb) {
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * ubh;
+	unsigned char * base, * space;
+	unsigned blks, size, i;
+	
+	UFSD(("ENTER\n"))
+	
+	uspi = sbi->s_uspi;
+
+	size = uspi->s_cssize;
+	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
+	base = space = (char*) sbi->s_csp[0];
+	for (i = 0; i < blks; i += uspi->s_fpb) {
+		size = uspi->s_bsize;
+		if (i + uspi->s_fpb > blks)
+			size = (blks - i) * uspi->s_fsize;
+		ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
+		ubh_memcpyubh (ubh, space, size);
+		space += size;
+		ubh_mark_buffer_uptodate (ubh, 1);
+		ubh_mark_buffer_dirty (ubh);
+		ubh_brelse (ubh);
+	}
+	for (i = 0; i < sbi->s_cg_loaded; i++) {
+		ufs_put_cylinder (sb, i);
+		kfree (sbi->s_ucpi[i]);
+	}
+	for (; i < UFS_MAX_GROUP_LOADED; i++) 
+		kfree (sbi->s_ucpi[i]);
+	for (i = 0; i < uspi->s_ncg; i++) 
+		brelse (sbi->s_ucg[i]);
+	kfree (sbi->s_ucg);
+	kfree (base);
+	UFSD(("EXIT\n"))
+}
+
+static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct ufs_sb_info * sbi;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_second * usb2;
+	struct ufs_super_block_third * usb3;
+	struct ufs_super_block *usb;
+	struct ufs_buffer_head * ubh;	
+	struct inode *inode;
+	unsigned block_size, super_block_size;
+	unsigned flags;
+
+	uspi = NULL;
+	ubh = NULL;
+	flags = 0;
+	
+	UFSD(("ENTER\n"))
+		
+	sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
+	if (!sbi)
+		goto failed_nomem;
+	sb->s_fs_info = sbi;
+	memset(sbi, 0, sizeof(struct ufs_sb_info));
+
+	UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY)))
+	
+#ifndef CONFIG_UFS_FS_WRITE
+	if (!(sb->s_flags & MS_RDONLY)) {
+		printk("ufs was compiled with read-only support, "
+		"can't be mounted as read-write\n");
+		goto failed;
+	}
+#endif
+	/*
+	 * Set default mount options
+	 * Parse mount options
+	 */
+	sbi->s_mount_opt = 0;
+	ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK);
+	if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) {
+		printk("wrong mount options\n");
+		goto failed;
+	}
+	if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) {
+		if (!silent)
+			printk("You didn't specify the type of your ufs filesystem\n\n"
+			"mount -t ufs -o ufstype="
+			"sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|netxstep-cd|openstep ...\n\n"
+			">>>WARNING<<< Wrong ufstype may corrupt your filesystem, "
+			"default is ufstype=old\n");
+		ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD);
+	}
+
+	sbi->s_uspi = uspi =
+		kmalloc (sizeof(struct ufs_sb_private_info), GFP_KERNEL);
+	if (!uspi)
+		goto failed;
+
+	/* Keep 2Gig file limit. Some UFS variants need to override 
+	   this but as I don't know which I'll let those in the know loosen
+	   the rules */
+	   
+	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
+	case UFS_MOUNT_UFSTYPE_44BSD:
+		UFSD(("ufstype=44bsd\n"))
+		uspi->s_fsize = block_size = 512;
+		uspi->s_fmask = ~(512 - 1);
+		uspi->s_fshift = 9;
+		uspi->s_sbsize = super_block_size = 1536;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		break;
+	case UFS_MOUNT_UFSTYPE_UFS2:
+		UFSD(("ufstype=ufs2\n"))
+		uspi->s_fsize = block_size = 512;
+		uspi->s_fmask = ~(512 - 1);
+		uspi->s_fshift = 9;
+		uspi->s_sbsize = super_block_size = 1536;
+		uspi->s_sbbase =  0;
+		flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			printk(KERN_INFO "ufstype=ufs2 is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+ 		}
+		break;
+		
+	case UFS_MOUNT_UFSTYPE_SUN:
+		UFSD(("ufstype=sun\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		uspi->s_maxsymlinklen = 56;
+		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN;
+		break;
+
+	case UFS_MOUNT_UFSTYPE_SUNx86:
+		UFSD(("ufstype=sunx86\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		uspi->s_maxsymlinklen = 56;
+		flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN;
+		break;
+
+	case UFS_MOUNT_UFSTYPE_OLD:
+		UFSD(("ufstype=old\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=old is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_NEXTSTEP:
+		UFSD(("ufstype=nextstep\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=nextstep is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
+		UFSD(("ufstype=nextstep-cd\n"))
+		uspi->s_fsize = block_size = 2048;
+		uspi->s_fmask = ~(2048 - 1);
+		uspi->s_fshift = 11;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=nextstep-cd is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_OPENSTEP:
+		UFSD(("ufstype=openstep\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=openstep is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+		}
+		break;
+	
+	case UFS_MOUNT_UFSTYPE_HP:
+		UFSD(("ufstype=hp\n"))
+		uspi->s_fsize = block_size = 1024;
+		uspi->s_fmask = ~(1024 - 1);
+		uspi->s_fshift = 10;
+		uspi->s_sbsize = super_block_size = 2048;
+		uspi->s_sbbase = 0;
+		flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (!silent)
+				printk(KERN_INFO "ufstype=hp is supported read-only\n");
+			sb->s_flags |= MS_RDONLY;
+ 		}
+ 		break;
+	default:
+		if (!silent)
+			printk("unknown ufstype\n");
+		goto failed;
+	}
+	
+again:	
+	if (!sb_set_blocksize(sb, block_size)) {
+		printk(KERN_ERR "UFS: failed to set blocksize\n");
+		goto failed;
+	}
+
+	/*
+	 * read ufs super block from device
+	 */
+	if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + SBLOCK_UFS2/block_size, super_block_size);
+	}
+	else {
+		ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
+	}
+	if (!ubh) 
+            goto failed;
+
+	
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb2 = ubh_get_usb_second(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
+
+	/*
+	 * Check ufs magic number
+	 */
+	sbi->s_bytesex = BYTESEX_LE;
+	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
+		case UFS_MAGIC:
+		case UFS2_MAGIC:
+		case UFS_MAGIC_LFN:
+	        case UFS_MAGIC_FEA:
+	        case UFS_MAGIC_4GB:
+			goto magic_found;
+	}
+	sbi->s_bytesex = BYTESEX_BE;
+	switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
+		case UFS_MAGIC:
+		case UFS2_MAGIC:
+		case UFS_MAGIC_LFN:
+	        case UFS_MAGIC_FEA:
+	        case UFS_MAGIC_4GB:
+			goto magic_found;
+	}
+
+	if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) 
+	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) 
+	  || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) 
+	  && uspi->s_sbbase < 256) {
+		ubh_brelse_uspi(uspi);
+		ubh = NULL;
+		uspi->s_sbbase += 8;
+		goto again;
+	}
+	if (!silent)
+		printk("ufs_read_super: bad magic number\n");
+	goto failed;
+
+magic_found:
+	/*
+	 * Check block and fragment sizes
+	 */
+	uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize);
+	uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize);
+	uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize);
+	uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
+	uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
+
+	if (uspi->s_fsize & (uspi->s_fsize - 1)) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is not a power of 2\n",
+			uspi->s_fsize);
+			goto failed;
+	}
+	if (uspi->s_fsize < 512) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is too small\n",
+			uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_fsize > 4096) {
+		printk(KERN_ERR "ufs_read_super: fragment size %u is too large\n",
+			uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_bsize & (uspi->s_bsize - 1)) {
+		printk(KERN_ERR "ufs_read_super: block size %u is not a power of 2\n",
+			uspi->s_bsize);
+		goto failed;
+	}
+	if (uspi->s_bsize < 4096) {
+		printk(KERN_ERR "ufs_read_super: block size %u is too small\n",
+			uspi->s_bsize);
+		goto failed;
+	}
+	if (uspi->s_bsize / uspi->s_fsize > 8) {
+		printk(KERN_ERR "ufs_read_super: too many fragments per block (%u)\n",
+			uspi->s_bsize / uspi->s_fsize);
+		goto failed;
+	}
+	if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) {
+		ubh_brelse_uspi(uspi);
+		ubh = NULL;
+		block_size = uspi->s_fsize;
+		super_block_size = uspi->s_sbsize;
+		UFSD(("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size))
+		goto again;
+	}
+
+#ifdef UFS_SUPER_DEBUG_MORE
+        if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
+		ufs2_print_super_stuff(sb,usb);
+        else
+		ufs_print_super_stuff(sb, usb1, usb2, usb3);
+#endif
+
+	/*
+	 * Check, if file system was correctly unmounted.
+	 * If not, make it read only.
+	 */
+	if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) ||
+	  ((flags & UFS_ST_MASK) == UFS_ST_OLD) ||
+	  (((flags & UFS_ST_MASK) == UFS_ST_SUN || 
+	  (flags & UFS_ST_MASK) == UFS_ST_SUNx86) && 
+	  (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
+		switch(usb1->fs_clean) {
+		case UFS_FSCLEAN:
+			UFSD(("fs is clean\n"))
+			break;
+		case UFS_FSSTABLE:
+			UFSD(("fs is stable\n"))
+			break;
+		case UFS_FSOSF1:
+			UFSD(("fs is DEC OSF/1\n"))
+			break;
+		case UFS_FSACTIVE:
+			printk("ufs_read_super: fs is active\n");
+			sb->s_flags |= MS_RDONLY;
+			break;
+		case UFS_FSBAD:
+			printk("ufs_read_super: fs is bad\n");
+			sb->s_flags |= MS_RDONLY;
+			break;
+		default:
+			printk("ufs_read_super: can't grok fs_clean 0x%x\n", usb1->fs_clean);
+			sb->s_flags |= MS_RDONLY;
+			break;
+		}
+	}
+	else {
+		printk("ufs_read_super: fs needs fsck\n");
+		sb->s_flags |= MS_RDONLY;
+	}
+
+	/*
+	 * Read ufs_super_block into internal data structures
+	 */
+	sb->s_op = &ufs_super_ops;
+	sb->dq_op = NULL; /***/
+	sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
+
+	uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno);
+	uspi->s_cblkno = fs32_to_cpu(sb, usb1->fs_cblkno);
+	uspi->s_iblkno = fs32_to_cpu(sb, usb1->fs_iblkno);
+	uspi->s_dblkno = fs32_to_cpu(sb, usb1->fs_dblkno);
+	uspi->s_cgoffset = fs32_to_cpu(sb, usb1->fs_cgoffset);
+	uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
+
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		uspi->s_u2_size  = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size);
+		uspi->s_u2_dsize = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
+	}
+	else {
+		uspi->s_size  =  fs32_to_cpu(sb, usb1->fs_size);
+		uspi->s_dsize =  fs32_to_cpu(sb, usb1->fs_dsize);
+	}
+
+	uspi->s_ncg = fs32_to_cpu(sb, usb1->fs_ncg);
+	/* s_bsize already set */
+	/* s_fsize already set */
+	uspi->s_fpb = fs32_to_cpu(sb, usb1->fs_frag);
+	uspi->s_minfree = fs32_to_cpu(sb, usb1->fs_minfree);
+	uspi->s_bmask = fs32_to_cpu(sb, usb1->fs_bmask);
+	uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
+	uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift);
+	uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
+	UFSD(("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift,
+		uspi->s_fshift));
+	uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift);
+	uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb);
+	/* s_sbsize already set */
+	uspi->s_csmask = fs32_to_cpu(sb, usb1->fs_csmask);
+	uspi->s_csshift = fs32_to_cpu(sb, usb1->fs_csshift);
+	uspi->s_nindir = fs32_to_cpu(sb, usb1->fs_nindir);
+	uspi->s_inopb = fs32_to_cpu(sb, usb1->fs_inopb);
+	uspi->s_nspf = fs32_to_cpu(sb, usb1->fs_nspf);
+	uspi->s_npsect = ufs_get_fs_npsect(sb, usb1, usb3);
+	uspi->s_interleave = fs32_to_cpu(sb, usb1->fs_interleave);
+	uspi->s_trackskew = fs32_to_cpu(sb, usb1->fs_trackskew);
+	uspi->s_csaddr = fs32_to_cpu(sb, usb1->fs_csaddr);
+	uspi->s_cssize = fs32_to_cpu(sb, usb1->fs_cssize);
+	uspi->s_cgsize = fs32_to_cpu(sb, usb1->fs_cgsize);
+	uspi->s_ntrak = fs32_to_cpu(sb, usb1->fs_ntrak);
+	uspi->s_nsect = fs32_to_cpu(sb, usb1->fs_nsect);
+	uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc);
+	uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg);
+	uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg);
+	uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_cpc);
+	uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize);
+	uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3);
+	uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3);
+	uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat);
+	uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos);
+	uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
+	uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
+
+	/*
+	 * Compute another frequently used values
+	 */
+	uspi->s_fpbmask = uspi->s_fpb - 1;
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		uspi->s_apbshift = uspi->s_bshift - 3;
+	}
+	else {
+		uspi->s_apbshift = uspi->s_bshift - 2;
+	}
+	uspi->s_2apbshift = uspi->s_apbshift * 2;
+	uspi->s_3apbshift = uspi->s_apbshift * 3;
+	uspi->s_apb = 1 << uspi->s_apbshift;
+	uspi->s_2apb = 1 << uspi->s_2apbshift;
+	uspi->s_3apb = 1 << uspi->s_3apbshift;
+	uspi->s_apbmask = uspi->s_apb - 1;
+	uspi->s_nspfshift = uspi->s_fshift - UFS_SECTOR_BITS;
+	uspi->s_nspb = uspi->s_nspf << uspi->s_fpbshift;
+	uspi->s_inopf = uspi->s_inopb >> uspi->s_fpbshift;
+	uspi->s_bpf = uspi->s_fsize << 3;
+	uspi->s_bpfshift = uspi->s_fshift + 3;
+	uspi->s_bpfmask = uspi->s_bpf - 1;
+	if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) ==
+	    UFS_MOUNT_UFSTYPE_44BSD)
+		uspi->s_maxsymlinklen =
+		    fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen);
+	
+	sbi->s_flags = flags;
+
+	inode = iget(sb, UFS_ROOTINO);
+	if (!inode || is_bad_inode(inode))
+		goto failed;
+	sb->s_root = d_alloc_root(inode);
+	if (!sb->s_root)
+		goto dalloc_failed;
+
+
+	/*
+	 * Read cylinder group structures
+	 */
+	if (!(sb->s_flags & MS_RDONLY))
+		if (!ufs_read_cylinder_structures(sb))
+			goto failed;
+
+	UFSD(("EXIT\n"))
+	return 0;
+
+dalloc_failed:
+	iput(inode);
+failed:
+	if (ubh) ubh_brelse_uspi (uspi);
+	if (uspi) kfree (uspi);
+	if (sbi) kfree(sbi);
+	sb->s_fs_info = NULL;
+	UFSD(("EXIT (FAILED)\n"))
+	return -EINVAL;
+
+failed_nomem:
+	UFSD(("EXIT (NOMEM)\n"))
+	return -ENOMEM;
+}
+
+static void ufs_write_super (struct super_block *sb) {
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_third * usb3;
+	unsigned flags;
+
+	lock_kernel();
+
+	UFSD(("ENTER\n"))
+	flags = UFS_SB(sb)->s_flags;
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+		if ((flags & UFS_ST_MASK) == UFS_ST_SUN 
+		  || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+			ufs_set_fs_state(sb, usb1, usb3,
+					UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
+		ubh_mark_buffer_dirty (USPI_UBH);
+	}
+	sb->s_dirt = 0;
+	UFSD(("EXIT\n"))
+	unlock_kernel();
+}
+
+static void ufs_put_super (struct super_block *sb)
+{
+	struct ufs_sb_info * sbi = UFS_SB(sb);
+		
+	UFSD(("ENTER\n"))
+
+	if (!(sb->s_flags & MS_RDONLY))
+		ufs_put_cylinder_structures (sb);
+	
+	ubh_brelse_uspi (sbi->s_uspi);
+	kfree (sbi->s_uspi);
+	kfree (sbi);
+	sb->s_fs_info = NULL;
+	return;
+}
+
+
+static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block_third * usb3;
+	unsigned new_mount_opt, ufstype;
+	unsigned flags;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	flags = UFS_SB(sb)->s_flags;
+	usb1 = ubh_get_usb_first(USPI_UBH);
+	usb3 = ubh_get_usb_third(USPI_UBH);
+	
+	/*
+	 * Allow the "check" option to be passed as a remount option.
+	 * It is not possible to change ufstype option during remount
+	 */
+	ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE;
+	new_mount_opt = 0;
+	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
+	if (!ufs_parse_options (data, &new_mount_opt))
+		return -EINVAL;
+	if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
+		new_mount_opt |= ufstype;
+	}
+	else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
+		printk("ufstype can't be changed during remount\n");
+		return -EINVAL;
+	}
+
+	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
+		UFS_SB(sb)->s_mount_opt = new_mount_opt;
+		return 0;
+	}
+	
+	/*
+	 * fs was mouted as rw, remounting ro
+	 */
+	if (*mount_flags & MS_RDONLY) {
+		ufs_put_cylinder_structures(sb);
+		usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+		if ((flags & UFS_ST_MASK) == UFS_ST_SUN
+		  || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) 
+			ufs_set_fs_state(sb, usb1, usb3,
+				UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
+		ubh_mark_buffer_dirty (USPI_UBH);
+		sb->s_dirt = 0;
+		sb->s_flags |= MS_RDONLY;
+	}
+	/*
+	 * fs was mounted as ro, remounting rw
+	 */
+	else {
+#ifndef CONFIG_UFS_FS_WRITE
+		printk("ufs was compiled with read-only support, "
+		"can't be mounted as read-write\n");
+		return -EINVAL;
+#else
+		if (ufstype != UFS_MOUNT_UFSTYPE_SUN && 
+		    ufstype != UFS_MOUNT_UFSTYPE_44BSD &&
+		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86) {
+			printk("this ufstype is read-only supported\n");
+			return -EINVAL;
+		}
+		if (!ufs_read_cylinder_structures (sb)) {
+			printk("failed during remounting\n");
+			return -EPERM;
+		}
+		sb->s_flags &= ~MS_RDONLY;
+#endif
+	}
+	UFS_SB(sb)->s_mount_opt = new_mount_opt;
+	return 0;
+}
+
+static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
+{
+	struct ufs_sb_private_info * uspi;
+	struct ufs_super_block_first * usb1;
+	struct ufs_super_block * usb;
+	unsigned  flags = 0;
+
+	lock_kernel();
+
+	uspi = UFS_SB(sb)->s_uspi;
+	usb1 = ubh_get_usb_first (USPI_UBH);
+	usb  = (struct ufs_super_block *)
+		((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
+	
+	flags = UFS_SB(sb)->s_flags;
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+		buf->f_type = UFS2_MAGIC;
+		buf->f_blocks = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize);
+		buf->f_bfree = ufs_blkstofrags(fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree)) +
+			fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nffree);
+		buf->f_ffree = fs64_to_cpu(sb,
+        		usb->fs_u11.fs_u2.fs_cstotal.cs_nifree);
+	}
+	else {
+		buf->f_type = UFS_MAGIC;
+		buf->f_blocks = uspi->s_dsize;
+		buf->f_bfree = ufs_blkstofrags(fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)) +
+			fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree);
+		buf->f_ffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree);
+	}
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
+		? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
+	buf->f_files = uspi->s_ncg * uspi->s_ipg;
+	buf->f_namelen = UFS_MAXNAMLEN;
+
+	unlock_kernel();
+
+	return 0;
+}
+
+static kmem_cache_t * ufs_inode_cachep;
+
+static struct inode *ufs_alloc_inode(struct super_block *sb)
+{
+	struct ufs_inode_info *ei;
+	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	ei->vfs_inode.i_version = 1;
+	return &ei->vfs_inode;
+}
+
+static void ufs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+static int init_inodecache(void)
+{
+	ufs_inode_cachep = kmem_cache_create("ufs_inode_cache",
+					     sizeof(struct ufs_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     init_once, NULL);
+	if (ufs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+static void destroy_inodecache(void)
+{
+	if (kmem_cache_destroy(ufs_inode_cachep))
+		printk(KERN_INFO "ufs_inode_cache: not all structures were freed\n");
+}
+
+#ifdef CONFIG_QUOTA
+static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t);
+static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
+#endif
+
+static struct super_operations ufs_super_ops = {
+	.alloc_inode	= ufs_alloc_inode,
+	.destroy_inode	= ufs_destroy_inode,
+	.read_inode	= ufs_read_inode,
+	.write_inode	= ufs_write_inode,
+	.delete_inode	= ufs_delete_inode,
+	.put_super	= ufs_put_super,
+	.write_super	= ufs_write_super,
+	.statfs		= ufs_statfs,
+	.remount_fs	= ufs_remount,
+#ifdef CONFIG_QUOTA
+	.quota_read	= ufs_quota_read,
+	.quota_write	= ufs_quota_write,
+#endif
+};
+
+#ifdef CONFIG_QUOTA
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t ufs_quota_read(struct super_block *sb, int type, char *data,
+			       size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> sb->s_blocksize_bits;
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t toread;
+	struct buffer_head *bh;
+	loff_t i_size = i_size_read(inode);
+
+	if (off > i_size)
+		return 0;
+	if (off+len > i_size)
+		len = i_size-off;
+	toread = len;
+	while (toread > 0) {
+		tocopy = sb->s_blocksize - offset < toread ?
+				sb->s_blocksize - offset : toread;
+
+		bh = ufs_bread(inode, blk, 0, &err);
+		if (err)
+			return err;
+		if (!bh)	/* A hole? */
+			memset(data, 0, tocopy);
+		else {
+			memcpy(data, bh->b_data+offset, tocopy);
+			brelse(bh);
+		}
+		offset = 0;
+		toread -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+	return len;
+}
+
+/* Write to quotafile */
+static ssize_t ufs_quota_write(struct super_block *sb, int type,
+				const char *data, size_t len, loff_t off)
+{
+	struct inode *inode = sb_dqopt(sb)->files[type];
+	sector_t blk = off >> sb->s_blocksize_bits;
+	int err = 0;
+	int offset = off & (sb->s_blocksize - 1);
+	int tocopy;
+	size_t towrite = len;
+	struct buffer_head *bh;
+
+	down(&inode->i_sem);
+	while (towrite > 0) {
+		tocopy = sb->s_blocksize - offset < towrite ?
+				sb->s_blocksize - offset : towrite;
+
+		bh = ufs_bread(inode, blk, 1, &err);
+		if (!bh)
+			goto out;
+		lock_buffer(bh);
+		memcpy(bh->b_data+offset, data, tocopy);
+		flush_dcache_page(bh->b_page);
+		set_buffer_uptodate(bh);
+		mark_buffer_dirty(bh);
+		unlock_buffer(bh);
+		brelse(bh);
+		offset = 0;
+		towrite -= tocopy;
+		data += tocopy;
+		blk++;
+	}
+out:
+	if (len == towrite)
+		return err;
+	if (inode->i_size < off+len-towrite)
+		i_size_write(inode, off+len-towrite);
+	inode->i_version++;
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	up(&inode->i_sem);
+	return len - towrite;
+}
+
+#endif
+
+static struct super_block *ufs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super);
+}
+
+static struct file_system_type ufs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "ufs",
+	.get_sb		= ufs_get_sb,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init init_ufs_fs(void)
+{
+	int err = init_inodecache();
+	if (err)
+		goto out1;
+	err = register_filesystem(&ufs_fs_type);
+	if (err)
+		goto out;
+	return 0;
+out:
+	destroy_inodecache();
+out1:
+	return err;
+}
+
+static void __exit exit_ufs_fs(void)
+{
+	unregister_filesystem(&ufs_fs_type);
+	destroy_inodecache();
+}
+
+module_init(init_ufs_fs)
+module_exit(exit_ufs_fs)
+MODULE_LICENSE("GPL");
diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
new file mode 100644
index 0000000..1683d2b
--- /dev/null
+++ b/fs/ufs/swab.h
@@ -0,0 +1,133 @@
+/*
+ *  linux/fs/ufs/swab.h
+ *
+ * Copyright (C) 1997, 1998 Francois-Rene Rideau <fare@tunes.org>
+ * Copyright (C) 1998 Jakub Jelinek <jj@ultra.linux.cz>
+ * Copyright (C) 2001 Christoph Hellwig <hch@infradead.org>
+ */
+
+#ifndef _UFS_SWAB_H
+#define _UFS_SWAB_H
+
+/*
+ * Notes:
+ *    HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes
+ *    in case there are ufs implementations that have strange bytesexes,
+ *    you'll need to modify code here as well as in ufs_super.c and ufs_fs.h
+ *    to support them.
+ */
+
+enum {
+	BYTESEX_LE,
+	BYTESEX_BE
+};
+
+static inline u64
+fs64_to_cpu(struct super_block *sbp, __fs64 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le64_to_cpu((__force __le64)n);
+	else
+		return be64_to_cpu((__force __be64)n);
+}
+
+static inline __fs64
+cpu_to_fs64(struct super_block *sbp, u64 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs64)cpu_to_le64(n);
+	else
+		return (__force __fs64)cpu_to_be64(n);
+}
+
+static __inline u32
+fs64_add(struct super_block *sbp, u32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return *n = cpu_to_le64(le64_to_cpu(*n)+d);
+	else
+		return *n = cpu_to_be64(be64_to_cpu(*n)+d);
+}
+
+static __inline u32
+fs64_sub(struct super_block *sbp, u32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return *n = cpu_to_le64(le64_to_cpu(*n)-d);
+	else
+		return *n = cpu_to_be64(be64_to_cpu(*n)-d);
+}
+
+static __inline u32
+fs32_to_cpu(struct super_block *sbp, __fs32 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le32_to_cpu((__force __le32)n);
+	else
+		return be32_to_cpu((__force __be32)n);
+}
+
+static inline __fs32
+cpu_to_fs32(struct super_block *sbp, u32 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs32)cpu_to_le32(n);
+	else
+		return (__force __fs32)cpu_to_be32(n);
+}
+
+static inline void
+fs32_add(struct super_block *sbp, __fs32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
+	else
+		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
+}
+
+static inline void
+fs32_sub(struct super_block *sbp, __fs32 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
+	else
+		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
+}
+
+static inline u16
+fs16_to_cpu(struct super_block *sbp, __fs16 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return le16_to_cpu((__force __le16)n);
+	else
+		return be16_to_cpu((__force __be16)n);
+}
+
+static inline __fs16
+cpu_to_fs16(struct super_block *sbp, u16 n)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		return (__force __fs16)cpu_to_le16(n);
+	else
+		return (__force __fs16)cpu_to_be16(n);
+}
+
+static inline void
+fs16_add(struct super_block *sbp, __fs16 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+	else
+		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+}
+
+static inline void
+fs16_sub(struct super_block *sbp, __fs16 *n, int d)
+{
+	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
+	else
+		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
+}
+
+#endif /* _UFS_SWAB_H */
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
new file mode 100644
index 0000000..a0e4914
--- /dev/null
+++ b/fs/ufs/symlink.c
@@ -0,0 +1,42 @@
+/*
+ *  linux/fs/ufs/symlink.c
+ *
+ * Only fast symlinks left here - the rest is done by generic code. AV, 1999
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@emai.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/symlink.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/symlink.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext2 symlink handling code
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/ufs_fs.h>
+
+static int ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct ufs_inode_info *p = UFS_I(dentry->d_inode);
+	nd_set_link(nd, (char*)p->i_u1.i_symlink);
+	return 0;
+}
+
+struct inode_operations ufs_fast_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= ufs_follow_link,
+};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
new file mode 100644
index 0000000..e312bf8
--- /dev/null
+++ b/fs/ufs/truncate.c
@@ -0,0 +1,477 @@
+/*
+ *  linux/fs/ufs/truncate.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ *
+ *  from
+ *
+ *  linux/fs/ext2/truncate.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/truncate.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+/*
+ * Real random numbers for secure rm added 94/02/18
+ * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ufs_fs.h>
+#include <linux/fcntl.h>
+#include <linux/time.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_TRUNCATE_DEBUG
+
+#ifdef UFS_TRUNCATE_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+ 
+/*
+ * Secure deletion currently doesn't work. It interacts very badly
+ * with buffers shared with memory mappings, and for that reason
+ * can't be done in the truncate() routines. It should instead be
+ * done separately in "release()" before calling the truncate routines
+ * that will release the actual file blocks.
+ *
+ *		Linus
+ */
+
+#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
+#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
+
+#define DATA_BUFFER_USED(bh) \
+	(atomic_read(&bh->b_count)>1 || buffer_locked(bh))
+
+static int ufs_trunc_direct (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	__fs32 * p;
+	unsigned frag1, frag2, frag3, frag4, block1, block2;
+	unsigned frag_to_free, free_count;
+	unsigned i, j, tmp;
+	int retry;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	
+	frag_to_free = 0;
+	free_count = 0;
+	retry = 0;
+	
+	frag1 = DIRECT_FRAGMENT;
+	frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
+	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
+	frag3 = frag4 & ~uspi->s_fpbmask;
+	block1 = block2 = 0;
+	if (frag2 > frag3) {
+		frag2 = frag4;
+		frag3 = frag4 = 0;
+	}
+	else if (frag2 < frag3) {
+		block1 = ufs_fragstoblks (frag2);
+		block2 = ufs_fragstoblks (frag3);
+	}
+
+	UFSD(("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4))
+
+	if (frag1 >= frag2)
+		goto next1;		
+
+	/*
+	 * Free first free fragments
+	 */
+	p = ufsi->i_u1.i_data + ufs_fragstoblks (frag1);
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp )
+		ufs_panic (sb, "ufs_trunc_direct", "internal error");
+	frag1 = ufs_fragnum (frag1);
+	frag2 = ufs_fragnum (frag2);
+	for (j = frag1; j < frag2; j++) {
+		bh = sb_find_get_block (sb, tmp + j);
+		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+			retry = 1;
+			brelse (bh);
+			goto next1;
+		}
+		bforget (bh);
+	}
+	inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
+	mark_inode_dirty(inode);
+	ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
+	frag_to_free = tmp + frag1;
+
+next1:
+	/*
+	 * Free whole blocks
+	 */
+	for (i = block1 ; i < block2; i++) {
+		p = ufsi->i_u1.i_data + i;
+		tmp = fs32_to_cpu(sb, *p);
+		if (!tmp)
+			continue;
+		for (j = 0; j < uspi->s_fpb; j++) {
+			bh = sb_find_get_block(sb, tmp + j);
+			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+				retry = 1;
+				brelse (bh);
+				goto next2;
+			}
+			bforget (bh);
+		}
+		*p = 0;
+		inode->i_blocks -= uspi->s_nspb;
+		mark_inode_dirty(inode);
+		if (free_count == 0) {
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		} else if (free_count > 0 && frag_to_free == tmp - free_count)
+			free_count += uspi->s_fpb;
+		else {
+			ufs_free_blocks (inode, frag_to_free, free_count);
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		}
+next2:;
+	}
+	
+	if (free_count > 0)
+		ufs_free_blocks (inode, frag_to_free, free_count);
+
+	if (frag3 >= frag4)
+		goto next3;
+
+	/*
+	 * Free last free fragments
+	 */
+	p = ufsi->i_u1.i_data + ufs_fragstoblks (frag3);
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp )
+		ufs_panic(sb, "ufs_truncate_direct", "internal error");
+	frag4 = ufs_fragnum (frag4);
+	for (j = 0; j < frag4; j++) {
+		bh = sb_find_get_block (sb, tmp + j);
+		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
+			retry = 1;
+			brelse (bh);
+			goto next1;
+		}
+		bforget (bh);
+	}
+	*p = 0;
+	inode->i_blocks -= frag4 << uspi->s_nspfshift;
+	mark_inode_dirty(inode);
+	ufs_free_fragments (inode, tmp, frag4);
+ next3:
+
+	UFSD(("EXIT\n"))
+	return retry;
+}
+
+
+static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * ind_ubh;
+	struct buffer_head * bh;
+	__fs32 * ind;
+	unsigned indirect_block, i, j, tmp;
+	unsigned frag_to_free, free_count;
+	int retry;
+
+	UFSD(("ENTER\n"))
+		
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	frag_to_free = 0;
+	free_count = 0;
+	retry = 0;
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp)
+		return 0;
+	ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (ind_ubh);
+		return 1;
+	}
+	if (!ind_ubh) {
+		*p = 0;
+		return 0;
+	}
+
+	indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
+	for (i = indirect_block; i < uspi->s_apb; i++) {
+		ind = ubh_get_addr32 (ind_ubh, i);
+		tmp = fs32_to_cpu(sb, *ind);
+		if (!tmp)
+			continue;
+		for (j = 0; j < uspi->s_fpb; j++) {
+			bh = sb_find_get_block(sb, tmp + j);
+			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
+				retry = 1;
+				brelse (bh);
+				goto next;
+			}
+			bforget (bh);
+		}	
+		*ind = 0;
+		ubh_mark_buffer_dirty(ind_ubh);
+		if (free_count == 0) {
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		} else if (free_count > 0 && frag_to_free == tmp - free_count)
+			free_count += uspi->s_fpb;
+		else {
+			ufs_free_blocks (inode, frag_to_free, free_count);
+			frag_to_free = tmp;
+			free_count = uspi->s_fpb;
+		}
+		inode->i_blocks -= uspi->s_nspb;
+		mark_inode_dirty(inode);
+next:;
+	}
+
+	if (free_count > 0) {
+		ufs_free_blocks (inode, frag_to_free, free_count);
+	}
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32(ind_ubh,i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(ind_ubh) != 1) {
+			retry = 1;
+		}
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(ind_ubh);
+			ind_ubh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
+		ubh_wait_on_buffer (ind_ubh);
+		ubh_ll_rw_block (WRITE, 1, &ind_ubh);
+		ubh_wait_on_buffer (ind_ubh);
+	}
+	ubh_brelse (ind_ubh);
+	
+	UFSD(("EXIT\n"))
+	
+	return retry;
+}
+
+static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
+{
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * dind_bh;
+	unsigned i, tmp, dindirect_block;
+	__fs32 * dind;
+	int retry = 0;
+	
+	UFSD(("ENTER\n"))
+	
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	dindirect_block = (DIRECT_BLOCK > offset) 
+		? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
+	retry = 0;
+	
+	tmp = fs32_to_cpu(sb, *p);
+	if (!tmp)
+		return 0;
+	dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (dind_bh);
+		return 1;
+	}
+	if (!dind_bh) {
+		*p = 0;
+		return 0;
+	}
+
+	for (i = dindirect_block ; i < uspi->s_apb ; i++) {
+		dind = ubh_get_addr32 (dind_bh, i);
+		tmp = fs32_to_cpu(sb, *dind);
+		if (!tmp)
+			continue;
+		retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
+		ubh_mark_buffer_dirty(dind_bh);
+	}
+
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32 (dind_bh, i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(dind_bh) != 1)
+			retry = 1;
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(dind_bh);
+			dind_bh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
+		ubh_wait_on_buffer (dind_bh);
+		ubh_ll_rw_block (WRITE, 1, &dind_bh);
+		ubh_wait_on_buffer (dind_bh);
+	}
+	ubh_brelse (dind_bh);
+	
+	UFSD(("EXIT\n"))
+	
+	return retry;
+}
+
+static int ufs_trunc_tindirect (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct ufs_buffer_head * tind_bh;
+	unsigned tindirect_block, tmp, i;
+	__fs32 * tind, * p;
+	int retry;
+	
+	UFSD(("ENTER\n"))
+
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+	retry = 0;
+	
+	tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
+		? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
+	p = ufsi->i_u1.i_data + UFS_TIND_BLOCK;
+	if (!(tmp = fs32_to_cpu(sb, *p)))
+		return 0;
+	tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
+	if (tmp != fs32_to_cpu(sb, *p)) {
+		ubh_brelse (tind_bh);
+		return 1;
+	}
+	if (!tind_bh) {
+		*p = 0;
+		return 0;
+	}
+
+	for (i = tindirect_block ; i < uspi->s_apb ; i++) {
+		tind = ubh_get_addr32 (tind_bh, i);
+		retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + 
+			uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
+		ubh_mark_buffer_dirty(tind_bh);
+	}
+	for (i = 0; i < uspi->s_apb; i++)
+		if (*ubh_get_addr32 (tind_bh, i))
+			break;
+	if (i >= uspi->s_apb) {
+		if (ubh_max_bcount(tind_bh) != 1)
+			retry = 1;
+		else {
+			tmp = fs32_to_cpu(sb, *p);
+			*p = 0;
+			inode->i_blocks -= uspi->s_nspb;
+			mark_inode_dirty(inode);
+			ufs_free_blocks (inode, tmp, uspi->s_fpb);
+			ubh_bforget(tind_bh);
+			tind_bh = NULL;
+		}
+	}
+	if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
+		ubh_wait_on_buffer (tind_bh);
+		ubh_ll_rw_block (WRITE, 1, &tind_bh);
+		ubh_wait_on_buffer (tind_bh);
+	}
+	ubh_brelse (tind_bh);
+	
+	UFSD(("EXIT\n"))
+	return retry;
+}
+		
+void ufs_truncate (struct inode * inode)
+{
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	struct super_block * sb;
+	struct ufs_sb_private_info * uspi;
+	struct buffer_head * bh;
+	unsigned offset;
+	int err, retry;
+	
+	UFSD(("ENTER\n"))
+	sb = inode->i_sb;
+	uspi = UFS_SB(sb)->s_uspi;
+
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+	lock_kernel();
+	while (1) {
+		retry = ufs_trunc_direct(inode);
+		retry |= ufs_trunc_indirect (inode, UFS_IND_BLOCK,
+			(__fs32 *) &ufsi->i_u1.i_data[UFS_IND_BLOCK]);
+		retry |= ufs_trunc_dindirect (inode, UFS_IND_BLOCK + uspi->s_apb,
+			(__fs32 *) &ufsi->i_u1.i_data[UFS_DIND_BLOCK]);
+		retry |= ufs_trunc_tindirect (inode);
+		if (!retry)
+			break;
+		if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
+			ufs_sync_inode (inode);
+		blk_run_address_space(inode->i_mapping);
+		yield();
+	}
+	offset = inode->i_size & uspi->s_fshift;
+	if (offset) {
+		bh = ufs_bread (inode, inode->i_size >> uspi->s_fshift, 0, &err);
+		if (bh) {
+			memset (bh->b_data + offset, 0, uspi->s_fsize - offset);
+			mark_buffer_dirty (bh);
+			brelse (bh);
+		}
+	}
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	ufsi->i_lastfrag = DIRECT_FRAGMENT;
+	unlock_kernel();
+	mark_inode_dirty(inode);
+	UFSD(("EXIT\n"))
+}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
new file mode 100644
index 0000000..59acc8f
--- /dev/null
+++ b/fs/ufs/util.c
@@ -0,0 +1,257 @@
+/*
+ *  linux/fs/ufs/util.c
+ *
+ * Copyright (C) 1998
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+ 
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ufs_fs.h>
+#include <linux/buffer_head.h>
+
+#include "swab.h"
+#include "util.h"
+
+#undef UFS_UTILS_DEBUG
+
+#ifdef UFS_UTILS_DEBUG
+#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
+#else
+#define UFSD(x)
+#endif
+
+
+struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
+	struct super_block *sb, u64 fragment, u64 size)
+{
+	struct ufs_buffer_head * ubh;
+	unsigned i, j ;
+	u64  count = 0;
+	if (size & ~uspi->s_fmask)
+		return NULL;
+	count = size >> uspi->s_fshift;
+	if (count > UFS_MAXFRAG)
+		return NULL;
+	ubh = (struct ufs_buffer_head *)
+		kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
+	if (!ubh)
+		return NULL;
+	ubh->fragment = fragment;
+	ubh->count = count;
+	for (i = 0; i < count; i++)
+		if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
+			goto failed;
+	for (; i < UFS_MAXFRAG; i++)
+		ubh->bh[i] = NULL;
+	return ubh;
+failed:
+	for (j = 0; j < i; j++)
+		brelse (ubh->bh[j]);
+	kfree(ubh);
+	return NULL;
+}
+
+struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
+	struct super_block *sb, u64 fragment, u64 size)
+{
+	unsigned i, j;
+	u64 count = 0;
+	if (size & ~uspi->s_fmask)
+		return NULL;
+	count = size >> uspi->s_fshift;
+	if (count <= 0 || count > UFS_MAXFRAG)
+		return NULL;
+	USPI_UBH->fragment = fragment;
+	USPI_UBH->count = count;
+	for (i = 0; i < count; i++)
+		if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
+			goto failed;
+	for (; i < UFS_MAXFRAG; i++)
+		USPI_UBH->bh[i] = NULL;
+	return USPI_UBH;
+failed:
+	for (j = 0; j < i; j++)
+		brelse (USPI_UBH->bh[j]);
+	return NULL;
+}
+
+void ubh_brelse (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for (i = 0; i < ubh->count; i++)
+		brelse (ubh->bh[i]);
+	kfree (ubh);
+}
+
+void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
+{
+	unsigned i;
+	if (!USPI_UBH)
+		return;
+	for ( i = 0; i < USPI_UBH->count; i++ ) {
+		brelse (USPI_UBH->bh[i]);
+		USPI_UBH->bh[i] = NULL;
+	}
+}
+
+void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < ubh->count; i++ )
+		mark_buffer_dirty (ubh->bh[i]);
+}
+
+void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	if (flag) {
+		for ( i = 0; i < ubh->count; i++ )
+			set_buffer_uptodate (ubh->bh[i]);
+	} else {
+		for ( i = 0; i < ubh->count; i++ )
+			clear_buffer_uptodate (ubh->bh[i]);
+	}
+}
+
+void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[])
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < nr; i++ )
+		ll_rw_block (rw, ubh[i]->count, ubh[i]->bh);
+}
+
+void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh)
+		return;
+	for ( i = 0; i < ubh->count; i++ )
+		wait_on_buffer (ubh->bh[i]);
+}
+
+unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	unsigned max = 0;
+	if (!ubh)
+		return 0;
+	for ( i = 0; i < ubh->count; i++ ) 
+		if ( atomic_read(&ubh->bh[i]->b_count) > max )
+			max = atomic_read(&ubh->bh[i]->b_count);
+	return max;
+}
+
+void ubh_bforget (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	if (!ubh) 
+		return;
+	for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] ) 
+		bforget (ubh->bh[i]);
+}
+ 
+int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
+{
+	unsigned i;
+	unsigned result = 0;
+	if (!ubh)
+		return 0;
+	for ( i = 0; i < ubh->count; i++ )
+		result |= buffer_dirty(ubh->bh[i]);
+	return result;
+}
+
+void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi, 
+	unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
+{
+	unsigned len, bhno;
+	if (size > (ubh->count << uspi->s_fshift))
+		size = ubh->count << uspi->s_fshift;
+	bhno = 0;
+	while (size) {
+		len = min_t(unsigned int, size, uspi->s_fsize);
+		memcpy (mem, ubh->bh[bhno]->b_data, len);
+		mem += uspi->s_fsize;
+		size -= len;
+		bhno++;
+	}
+}
+
+void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi, 
+	struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
+{
+	unsigned len, bhno;
+	if (size > (ubh->count << uspi->s_fshift))
+		size = ubh->count << uspi->s_fshift;
+	bhno = 0;
+	while (size) {
+		len = min_t(unsigned int, size, uspi->s_fsize);
+		memcpy (ubh->bh[bhno]->b_data, mem, len);
+		mem += uspi->s_fsize;
+		size -= len;
+		bhno++;
+	}
+}
+
+dev_t
+ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
+{
+	__fs32 fs32;
+	dev_t dev;
+
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		fs32 = ufsi->i_u1.i_data[1];
+	else
+		fs32 = ufsi->i_u1.i_data[0];
+	fs32 = fs32_to_cpu(sb, fs32);
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUNx86:
+	case UFS_ST_SUN:
+		if ((fs32 & 0xffff0000) == 0 ||
+		    (fs32 & 0xffff0000) == 0xffff0000)
+			dev = old_decode_dev(fs32 & 0x7fff);
+		else
+			dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
+		break;
+
+	default:
+		dev = old_decode_dev(fs32);
+		break;
+	}
+	return dev;
+}
+
+void
+ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
+{
+	__fs32 fs32;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUNx86:
+	case UFS_ST_SUN:
+		fs32 = sysv_encode_dev(dev);
+		if ((fs32 & 0xffff8000) == 0) {
+			fs32 = old_encode_dev(dev);
+		}
+		break;
+
+	default:
+		fs32 = old_encode_dev(dev);
+		break;
+	}
+	fs32 = cpu_to_fs32(sb, fs32);
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		ufsi->i_u1.i_data[1] = fs32;
+	else
+		ufsi->i_u1.i_data[0] = fs32;
+}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
new file mode 100644
index 0000000..b264007
--- /dev/null
+++ b/fs/ufs/util.h
@@ -0,0 +1,526 @@
+/*
+ *  linux/fs/ufs/util.h
+ *
+ * Copyright (C) 1998 
+ * Daniel Pirkl <daniel.pirkl@email.cz>
+ * Charles University, Faculty of Mathematics and Physics
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include "swab.h"
+
+
+/*
+ * some useful macros
+ */
+#define in_range(b,first,len)	((b)>=(first)&&(b)<(first)+(len))
+
+/*
+ * macros used for retyping
+ */
+#define UCPI_UBH ((struct ufs_buffer_head *)ucpi)
+#define USPI_UBH ((struct ufs_buffer_head *)uspi)
+
+
+
+/*
+ * macros used for accessing structures
+ */
+static inline s32
+ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
+		 struct ufs_super_block_third *usb3)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state);
+	case UFS_ST_SUNx86:
+		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
+	case UFS_ST_44BSD:
+	default:
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_state);
+	}
+}
+
+static inline void
+ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
+		 struct ufs_super_block_third *usb3, s32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value);
+		break;
+	case UFS_ST_SUNx86:
+		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
+		break;
+	case UFS_ST_44BSD:
+		usb3->fs_u2.fs_44.fs_state = cpu_to_fs32(sb, value);
+		break;
+	}
+}
+
+static inline u32
+ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
+		  struct ufs_super_block_third *usb3)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+		return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect);
+	else
+		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
+}
+
+static inline u64
+ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
+{
+	__fs64 tmp;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1];
+		break;
+	case UFS_ST_SUNx86:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qbmask[1];
+		break;
+	case UFS_ST_44BSD:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qbmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qbmask[1];
+		break;
+	}
+
+	return fs64_to_cpu(sb, tmp);
+}
+
+static inline u64
+ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
+{
+	__fs64 tmp;
+
+	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
+	case UFS_ST_SUN:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1];
+		break;
+	case UFS_ST_SUNx86:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qfmask[1];
+		break;
+	case UFS_ST_44BSD:
+		((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qfmask[0];
+		((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qfmask[1];
+		break;
+	}
+
+	return fs64_to_cpu(sb, tmp);
+}
+
+static inline u16
+ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
+		return fs16_to_cpu(sb, de->d_u.d_namlen);
+	else
+		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
+}
+
+static inline void
+ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
+		de->d_u.d_namlen = cpu_to_fs16(sb, value);
+	else
+		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
+}
+
+static inline void
+ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
+{
+	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
+		return;
+
+	/*
+	 * TODO turn this into a table lookup
+	 */
+	switch (mode & S_IFMT) {
+	case S_IFSOCK:
+		de->d_u.d_44.d_type = DT_SOCK;
+		break;
+	case S_IFLNK:
+		de->d_u.d_44.d_type = DT_LNK;
+		break;
+	case S_IFREG:
+		de->d_u.d_44.d_type = DT_REG;
+		break;
+	case S_IFBLK:
+		de->d_u.d_44.d_type = DT_BLK;
+		break;
+	case S_IFDIR:
+		de->d_u.d_44.d_type = DT_DIR;
+		break;
+	case S_IFCHR:
+		de->d_u.d_44.d_type = DT_CHR;
+		break;
+	case S_IFIFO:
+		de->d_u.d_44.d_type = DT_FIFO;
+		break;
+	default:
+		de->d_u.d_44.d_type = DT_UNKNOWN;
+	}
+}
+
+static inline u32
+ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
+	case UFS_UID_44BSD:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
+	default:
+		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
+	}
+}
+
+static inline void
+ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
+		break;
+	case UFS_UID_44BSD:
+		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
+		break;
+	}
+	inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); 
+}
+
+static inline u32
+ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
+	case UFS_UID_44BSD:
+		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
+	default:
+		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
+	}
+}
+
+static inline void
+ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
+{
+	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
+	case UFS_UID_EFT:
+		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
+		break;
+	case UFS_UID_44BSD:
+		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
+		break;
+	}
+	inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
+}
+
+extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
+extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
+
+/*
+ * These functions manipulate ufs buffers
+ */
+#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)  
+extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
+extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
+extern void ubh_brelse (struct ufs_buffer_head *);
+extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
+extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
+extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
+extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **);
+extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
+extern unsigned ubh_max_bcount (struct ufs_buffer_head *);
+extern void ubh_bforget (struct ufs_buffer_head *);
+extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
+#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
+extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
+#define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
+extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
+
+
+
+/*
+ * macros to get important structures from ufs_buffer_head
+ */
+#define ubh_get_usb_first(ubh) \
+	((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
+
+#define ubh_get_usb_second(ubh) \
+	((struct ufs_super_block_second *)(ubh)-> \
+	bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
+
+#define ubh_get_usb_third(ubh) \
+	((struct ufs_super_block_third *)((ubh)-> \
+	bh[UFS_SECTOR_SIZE*2 >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE*2 & ~uspi->s_fmask)))
+
+#define ubh_get_ucg(ubh) \
+	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
+
+
+/*
+ * Extract byte from ufs_buffer_head
+ * Extract the bits for a block from a map inside ufs_buffer_head
+ */
+#define ubh_get_addr8(ubh,begin) \
+	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
+	((begin) & ~uspi->s_fmask))
+
+#define ubh_get_addr16(ubh,begin) \
+	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
+	((begin) & (uspi->fsize>>1) - 1)))
+
+#define ubh_get_addr32(ubh,begin) \
+	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
+	((begin) & ((uspi->s_fsize>>2) - 1)))
+
+#define ubh_get_addr ubh_get_addr8
+
+#define ubh_blkmap(ubh,begin,bit) \
+	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
+
+
+/*
+ * Macros for access to superblock array structures
+ */
+#define ubh_postbl(ubh,cylno,i) \
+	((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
+	? (*(__s16*)(ubh_get_addr(ubh, \
+	(unsigned)(&((struct ufs_super_block *)0)->fs_opostbl) \
+	+ (((cylno) * 16 + (i)) << 1) ) )) \
+	: (*(__s16*)(ubh_get_addr(ubh, \
+	uspi->s_postbloff + (((cylno) * uspi->s_nrpos + (i)) << 1) ))))
+
+#define ubh_rotbl(ubh,i) \
+	((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
+	? (*(__u8*)(ubh_get_addr(ubh, \
+	(unsigned)(&((struct ufs_super_block *)0)->fs_space) + (i)))) \
+	: (*(__u8*)(ubh_get_addr(ubh, uspi->s_rotbloff + (i)))))
+
+/*
+ * Determine the number of available frags given a
+ * percentage to hold in reserve.
+ */
+#define ufs_freespace(usb, percentreserved) \
+	(ufs_blkstofrags(fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nbfree)) + \
+	fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nffree) - (uspi->s_dsize * (percentreserved) / 100))
+
+/*
+ * Macros to access cylinder group array structures
+ */
+#define ubh_cg_blktot(ucpi,cylno) \
+	(*((__fs32*)ubh_get_addr(UCPI_UBH, (ucpi)->c_btotoff + ((cylno) << 2))))
+
+#define ubh_cg_blks(ucpi,cylno,rpos) \
+	(*((__fs16*)ubh_get_addr(UCPI_UBH, \
+	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
+
+/*
+ * Bitmap operations
+ * These functions work like classical bitmap operations.
+ * The difference is that we don't have the whole bitmap
+ * in one contiguous chunk of memory, but in several buffers.
+ * The parameters of each function are super_block, ufs_buffer_head and
+ * position of the beginning of the bitmap.
+ */
+#define ubh_setbit(ubh,begin,bit) \
+	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
+
+#define ubh_clrbit(ubh,begin,bit) \
+	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
+
+#define ubh_isset(ubh,begin,bit) \
+	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
+
+#define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
+
+#define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
+
+#define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
+static inline unsigned _ubh_find_next_zero_bit_(
+	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
+	unsigned begin, unsigned size, unsigned offset)
+{
+	unsigned base, count, pos;
+
+	size -= offset;
+	begin <<= 3;
+	offset += begin;
+	base = offset >> uspi->s_bpfshift;
+	offset &= uspi->s_bpfmask;
+	for (;;) {
+		count = min_t(unsigned int, size + offset, uspi->s_bpf);
+		size -= count - offset;
+		pos = ext2_find_next_zero_bit (ubh->bh[base]->b_data, count, offset);
+		if (pos < count || !size)
+			break;
+		base++;
+		offset = 0;
+	}
+	return (base << uspi->s_bpfshift) + pos - begin;
+} 	
+
+static inline unsigned find_last_zero_bit (unsigned char * bitmap,
+	unsigned size, unsigned offset)
+{
+	unsigned bit, i;
+	unsigned char * mapp;
+	unsigned char map;
+
+	mapp = bitmap + (size >> 3);
+	map = *mapp--;
+	bit = 1 << (size & 7);
+	for (i = size; i > offset; i--) {
+		if ((map & bit) == 0)
+			break;
+		if ((i & 7) != 0) {
+			bit >>= 1;
+		} else {
+			map = *mapp--;
+			bit = 1 << 7;
+		}
+	}
+	return i;
+}
+
+#define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
+static inline unsigned _ubh_find_last_zero_bit_(
+	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
+	unsigned begin, unsigned start, unsigned end)
+{
+	unsigned base, count, pos, size;
+
+	size = start - end;
+	begin <<= 3;
+	start += begin;
+	base = start >> uspi->s_bpfshift;
+	start &= uspi->s_bpfmask;
+	for (;;) {
+		count = min_t(unsigned int,
+			    size + (uspi->s_bpf - start), uspi->s_bpf)
+			- (uspi->s_bpf - start);
+		size -= count;
+		pos = find_last_zero_bit (ubh->bh[base]->b_data,
+			start, start - count);
+		if (pos > start - count || !size)
+			break;
+		base--;
+		start = uspi->s_bpf;
+	}
+	return (base << uspi->s_bpfshift) + pos - begin;
+} 	
+
+#define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
+
+#define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
+static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
+	case 4:
+		return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+	case 2:
+		return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+	case 1:
+		return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+	}
+	return 0;	
+}
+
+#define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
+static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	*ubh_get_addr (ubh, begin + block) = 0x00;
+	    	return; 
+	case 4:
+		*ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
+		return;
+	case 2:
+		*ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
+		return;
+	case 1:
+		*ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
+		return;
+	}
+}
+
+#define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
+static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
+	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+{
+	switch (uspi->s_fpb) {
+	case 8:
+	    	*ubh_get_addr(ubh, begin + block) = 0xff;
+	    	return;
+	case 4:
+		*ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
+		return;
+	case 2:
+		*ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
+		return;
+	case 1:
+		*ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
+		return;
+	}
+}
+
+static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
+	__fs32 * fraglist, int cnt)
+{
+	struct ufs_sb_private_info * uspi;
+	unsigned fragsize, pos;
+	
+	uspi = UFS_SB(sb)->s_uspi;
+	
+	fragsize = 0;
+	for (pos = 0; pos < uspi->s_fpb; pos++) {
+		if (blockmap & (1 << pos)) {
+			fragsize++;
+		}
+		else if (fragsize > 0) {
+			fs32_add(sb, &fraglist[fragsize], cnt);
+			fragsize = 0;
+		}
+	}
+	if (fragsize > 0 && fragsize < uspi->s_fpb)
+		fs32_add(sb, &fraglist[fragsize], cnt);
+}
+
+#define ubh_scanc(ubh,begin,size,table,mask) _ubh_scanc_(uspi,ubh,begin,size,table,mask)
+static inline unsigned _ubh_scanc_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh, 
+	unsigned begin, unsigned size, unsigned char * table, unsigned char mask)
+{
+	unsigned rest, offset;
+	unsigned char * cp;
+	
+
+	offset = begin & ~uspi->s_fmask;
+	begin >>= uspi->s_fshift;
+	for (;;) {
+		if ((offset + size) < uspi->s_fsize)
+			rest = size;
+		else
+			rest = uspi->s_fsize - offset;
+		size -= rest;
+		cp = ubh->bh[begin]->b_data + offset;
+		while ((table[*cp++] & mask) == 0 && --rest);
+		if (rest || !size)
+			break;
+		begin++;
+		offset = 0;
+	}
+	return (size + rest);
+}