Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (166 commits)
  Revert "ax25: zero length frame filtering in AX25"
  Revert "netrom: zero length frame filtering in NetRom"
  cfg80211: default CONFIG_WIRELESS_OLD_REGULATORY to n
  mac80211/iwlwifi: move virtual A-MDPU queue bookkeeping to iwlwifi
  mac80211: fix aggregation to not require queue stop
  mac80211: add skb length sanity checking
  mac80211: unify and fix TX aggregation start
  mac80211: clean up __ieee80211_tx args
  mac80211: rework the pending packets code
  mac80211: fix A-MPDU queue assignment
  mac80211: rewrite fragmentation
  iwlwifi: show current driver status in user readable format
  b43: Add BCM4307 PCI-ID
  cfg80211: fix locking in nl80211_set_wiphy
  mac80211: fix RX path
  ath5k: properly drop packets from ops->tx
  ar9170: single module build
  ath9k: fix dma mapping leak of rx buffer upon rmmod
  rt2x00: New USB ID for rt73usb
  ath5k: warn and correct rate for unknown hw rate indexes
  ...
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index e4a54b615..b45d913 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -903,8 +903,9 @@
 	stq	$26, 0($sp)
 	.prologue 0
 
+	mov	$31, $17
 	lda	$16, 8($sp)
-	jsr	$26, do_pipe
+	jsr	$26, do_pipe_flags
 
 	ldq	$26, 0($sp)
 	bne	$0, 1f
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ae41f09..42ee059 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -46,8 +46,6 @@
 #include <asm/hwrpb.h>
 #include <asm/processor.h>
 
-extern int do_pipe(int *);
-
 /*
  * Brk needs to return an error.  Still support Linux's brk(0) query idiom,
  * which OSF programs just shouldn't be doing.  We're still not quite
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f839..af9405c 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@
 	data8 sys_ni_syscall
 	data8 sys_umask		  /* 60 */
 	data8 sys_chroot
-	data8 sys_ustat
+	data8 compat_sys_ustat
 	data8 sys_dup2
 	data8 sys_getppid
 	data8 sys_getpgrp	  /* 65 */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e49975..5c0f408 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@
 	return 1;
 }
 
-static struct dentry_operations pfmfs_dentry_operations = {
+static const struct dentry_operations pfmfs_dentry_operations = {
 	.d_delete = pfmfs_delete_dentry,
 };
 
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 49aac6e..2a47271 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -355,40 +355,6 @@
 	return ret;
 }
 
-/* ustat compatibility */
-struct ustat32 {
-	compat_daddr_t	f_tfree;
-	compat_ino_t	f_tinode;
-	char		f_fname[6];
-	char		f_fpack[6];
-};
-
-extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
-
-SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
-{
-	int err;
-	struct ustat tmp;
-	struct ustat32 tmp32;
-	mm_segment_t old_fs = get_fs();
-
-	set_fs(KERNEL_DS);
-	err = sys_ustat(dev, (struct ustat __user *)&tmp);
-	set_fs(old_fs);
-
-	if (err)
-		goto out;
-
-	memset(&tmp32, 0, sizeof(struct ustat32));
-	tmp32.f_tfree = tmp.f_tfree;
-	tmp32.f_tinode = tmp.f_tinode;
-
-	err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
-
-out:
-	return err;
-}
-
 SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
 	compat_off_t __user *, offset, s32, count)
 {
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 7438e92..f61d6b0 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -253,7 +253,7 @@
 	PTR	compat_sys_utime		/* 6130 */
 	PTR	sys_mknod
 	PTR	sys_32_personality
-	PTR	sys_32_ustat
+	PTR	compat_sys_ustat
 	PTR	compat_sys_statfs
 	PTR	compat_sys_fstatfs		/* 6135 */
 	PTR	sys_sysfs
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b0fef4f..60997f1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,7 +265,7 @@
 	PTR	sys_olduname
 	PTR	sys_umask			/* 4060 */
 	PTR	sys_chroot
-	PTR	sys_32_ustat
+	PTR	compat_sys_ustat
 	PTR	sys_dup2
 	PTR	sys_getppid
 	PTR	sys_getpgrp			/* 4065 */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 303d2b6..03b9a01 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -130,7 +130,7 @@
 	ENTRY_OURS(newuname)
 	ENTRY_SAME(umask)		/* 60 */
 	ENTRY_SAME(chroot)
-	ENTRY_SAME(ustat)
+	ENTRY_COMP(ustat)
 	ENTRY_SAME(dup2)
 	ENTRY_SAME(getppid)
 	ENTRY_SAME(getpgrp)		/* 65 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 72353f6..fe16649 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -65,7 +65,7 @@
 SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
 COMPAT_SYS_SPU(umask)
 SYSCALL_SPU(chroot)
-SYSCALL(ustat)
+COMPAT_SYS(ustat)
 SYSCALL_SPU(dup2)
 SYSCALL_SPU(getppid)
 SYSCALL_SPU(getpgrp)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 62c706e..87cf5a7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -252,7 +252,7 @@
 sys32_ustat_wrapper:
 	llgfr	%r2,%r2			# dev_t
 	llgtr	%r3,%r3			# struct ustat *
-	jg	sys_ustat
+	jg	compat_sys_ustat
 
 	.globl	sys32_dup2_wrapper
 sys32_dup2_wrapper:
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 6cd1a5b..79457f6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1031,7 +1031,7 @@
  *    If the address space is non-shared (ie. mm->count == 1) we avoid
  *    cross calls when we want to flush the currently running process's
  *    tlb state.  This is done by clearing all cpu bits except the current
- *    processor's in current->active_mm->cpu_vm_mask and performing the
+ *    processor's in current->mm->cpu_vm_mask and performing the
  *    flush locally only.  This will force any subsequent cpus which run
  *    this task to flush the context from the local tlb if the process
  *    migrates to another cpu (again).
@@ -1074,7 +1074,7 @@
 	u32 ctx = CTX_HWBITS(mm->context);
 	int cpu = get_cpu();
 
-	if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
+	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
 		mm->cpu_vm_mask = cpumask_of_cpu(cpu);
 	else
 		smp_cross_call_masked(&xcall_flush_tlb_pending,
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f93c42a..a8000b1 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -51,7 +51,7 @@
 /*150*/	.word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
 	.word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
 /*160*/	.word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
-	.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
+	.word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
 /*170*/	.word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
 	.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
 /*180*/	.word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 5a0d76d..8ef8876 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -557,7 +557,7 @@
 	.quad sys32_olduname
 	.quad sys_umask		/* 60 */
 	.quad sys_chroot
-	.quad sys32_ustat
+	.quad compat_sys_ustat
 	.quad sys_dup2
 	.quad sys_getppid
 	.quad sys_getpgrp		/* 65 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 6c0d7f6..efac92f 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -638,28 +638,6 @@
 	return err ? -EFAULT : 0;
 }
 
-long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
-{
-	struct ustat u;
-	mm_segment_t seg;
-	int ret;
-
-	seg = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_ustat(dev, (struct ustat __user *)&u);
-	set_fs(seg);
-	if (ret < 0)
-		return ret;
-
-	if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
-	    __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
-	    __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
-	    __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
-	    __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
-		ret = -EFAULT;
-	return ret;
-}
-
 asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
 			     compat_uptr_t __user *envp, struct pt_regs *regs)
 {
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 50ca486..1f7e625 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -129,13 +129,6 @@
 	} _sifields;
 } compat_siginfo_t;
 
-struct ustat32 {
-	__u32			f_tfree;
-	compat_ino_t		f_tinode;
-	char			f_fname[6];
-	char			f_fpack[6];
-};
-
 #define IA32_STACK_TOP IA32_PAGE_OFFSET
 
 #ifdef __KERNEL__
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index ffb08be..72a6dcd 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -70,8 +70,6 @@
 asmlinkage long sys32_olduname(struct oldold_utsname __user *);
 long sys32_uname(struct old_utsname __user *);
 
-long sys32_ustat(unsigned, struct ustat32 __user *);
-
 asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
 			     compat_uptr_t __user *, struct pt_regs *);
 asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4373adb..9d9490e 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -26,6 +26,10 @@
 #define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
 #define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
 #define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
+#define PCI_DEVICE_ID_INTEL_IGDGM_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_IGDGM_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_IGDG_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_IGDG_IG         0xA001
 #define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
 #define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
 #define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
@@ -60,7 +64,12 @@
 
 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+
+#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
 
 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@
 			size = 512;
 		}
 		size += 4; /* add in BIOS popup space */
-	} else if (IS_G33) {
+	} else if (IS_G33 && !IS_IGD) {
 	/* G33's GTT size defined in gmch_ctrl */
 		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
 		case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@
 			size = 512;
 		}
 		size += 4;
-	} else if (IS_G4X) {
+	} else if (IS_G4X || IS_IGD) {
 		/* On 4 series hardware, GTT stolen is separate from graphics
 		 * stolen, ignore it in stolen gtt entries counting.  However,
 		 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@
 		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
 		NULL, &intel_g33_driver },
+	{ PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+		NULL, &intel_g33_driver },
+	{ PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
 	    "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@
 	ID(PCI_DEVICE_ID_INTEL_82945G_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+	ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
+	ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
 	ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
 	ID(PCI_DEVICE_ID_INTEL_82G35_HB),
 	ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a5dd7a6..8b8c8c2 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -63,8 +63,7 @@
 #define BIB_CMC			((1) << 30)
 #define BIB_IMC			((1) << 31)
 
-static u32 *
-generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
 {
 	struct fw_descriptor *desc;
 	static u32 config_rom[256];
@@ -128,8 +127,7 @@
 	return config_rom;
 }
 
-static void
-update_config_roms(void)
+static void update_config_roms(void)
 {
 	struct fw_card *card;
 	u32 *config_rom;
@@ -141,8 +139,7 @@
 	}
 }
 
-int
-fw_core_add_descriptor(struct fw_descriptor *desc)
+int fw_core_add_descriptor(struct fw_descriptor *desc)
 {
 	size_t i;
 
@@ -171,8 +168,7 @@
 	return 0;
 }
 
-void
-fw_core_remove_descriptor(struct fw_descriptor *desc)
+void fw_core_remove_descriptor(struct fw_descriptor *desc)
 {
 	mutex_lock(&card_mutex);
 
@@ -185,12 +181,30 @@
 	mutex_unlock(&card_mutex);
 }
 
+static int set_broadcast_channel(struct device *dev, void *data)
+{
+	fw_device_set_broadcast_channel(fw_device(dev), (long)data);
+	return 0;
+}
+
+static void allocate_broadcast_channel(struct fw_card *card, int generation)
+{
+	int channel, bandwidth = 0;
+
+	fw_iso_resource_manage(card, generation, 1ULL << 31,
+			       &channel, &bandwidth, true);
+	if (channel == 31) {
+		card->broadcast_channel_allocated = true;
+		device_for_each_child(card->device, (void *)(long)generation,
+				      set_broadcast_channel);
+	}
+}
+
 static const char gap_count_table[] = {
 	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
 };
 
-void
-fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
 {
 	int scheduled;
 
@@ -200,37 +214,38 @@
 		fw_card_put(card);
 }
 
-static void
-fw_card_bm_work(struct work_struct *work)
+static void fw_card_bm_work(struct work_struct *work)
 {
 	struct fw_card *card = container_of(work, struct fw_card, work.work);
 	struct fw_device *root_device;
-	struct fw_node *root_node, *local_node;
+	struct fw_node *root_node;
 	unsigned long flags;
-	int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
+	int root_id, new_root_id, irm_id, local_id;
+	int gap_count, generation, grace, rcode;
 	bool do_reset = false;
 	bool root_device_is_running;
 	bool root_device_is_cmc;
 	__be32 lock_data[2];
 
 	spin_lock_irqsave(&card->lock, flags);
-	local_node = card->local_node;
-	root_node  = card->root_node;
 
-	if (local_node == NULL) {
+	if (card->local_node == NULL) {
 		spin_unlock_irqrestore(&card->lock, flags);
 		goto out_put_card;
 	}
-	fw_node_get(local_node);
-	fw_node_get(root_node);
 
 	generation = card->generation;
+	root_node = card->root_node;
+	fw_node_get(root_node);
 	root_device = root_node->data;
 	root_device_is_running = root_device &&
 			atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
 	root_device_is_cmc = root_device && root_device->cmc;
-	root_id = root_node->node_id;
-	grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
+	root_id  = root_node->node_id;
+	irm_id   = card->irm_node->node_id;
+	local_id = card->local_node->node_id;
+
+	grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
 
 	if (is_next_generation(generation, card->bm_generation) ||
 	    (card->bm_generation != generation && grace)) {
@@ -246,16 +261,15 @@
 		 * next generation.
 		 */
 
-		irm_id = card->irm_node->node_id;
 		if (!card->irm_node->link_on) {
-			new_root_id = local_node->node_id;
+			new_root_id = local_id;
 			fw_notify("IRM has link off, making local node (%02x) root.\n",
 				  new_root_id);
 			goto pick_me;
 		}
 
 		lock_data[0] = cpu_to_be32(0x3f);
-		lock_data[1] = cpu_to_be32(local_node->node_id);
+		lock_data[1] = cpu_to_be32(local_id);
 
 		spin_unlock_irqrestore(&card->lock, flags);
 
@@ -269,9 +283,14 @@
 			goto out;
 
 		if (rcode == RCODE_COMPLETE &&
-		    lock_data[0] != cpu_to_be32(0x3f))
-			/* Somebody else is BM, let them do the work. */
+		    lock_data[0] != cpu_to_be32(0x3f)) {
+
+			/* Somebody else is BM.  Only act as IRM. */
+			if (local_id == irm_id)
+				allocate_broadcast_channel(card, generation);
+
 			goto out;
+		}
 
 		spin_lock_irqsave(&card->lock, flags);
 
@@ -282,19 +301,18 @@
 			 * do a bus reset and pick the local node as
 			 * root, and thus, IRM.
 			 */
-			new_root_id = local_node->node_id;
+			new_root_id = local_id;
 			fw_notify("BM lock failed, making local node (%02x) root.\n",
 				  new_root_id);
 			goto pick_me;
 		}
 	} else if (card->bm_generation != generation) {
 		/*
-		 * OK, we weren't BM in the last generation, and it's
-		 * less than 100ms since last bus reset. Reschedule
-		 * this task 100ms from now.
+		 * We weren't BM in the last generation, and the last
+		 * bus reset is less than 125ms ago.  Reschedule this job.
 		 */
 		spin_unlock_irqrestore(&card->lock, flags);
-		fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10));
+		fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
 		goto out;
 	}
 
@@ -310,7 +328,7 @@
 		 * Either link_on is false, or we failed to read the
 		 * config rom.  In either case, pick another root.
 		 */
-		new_root_id = local_node->node_id;
+		new_root_id = local_id;
 	} else if (!root_device_is_running) {
 		/*
 		 * If we haven't probed this device yet, bail out now
@@ -332,7 +350,7 @@
 		 * successfully read the config rom, but it's not
 		 * cycle master capable.
 		 */
-		new_root_id = local_node->node_id;
+		new_root_id = local_id;
 	}
 
  pick_me:
@@ -363,25 +381,28 @@
 			  card->index, new_root_id, gap_count);
 		fw_send_phy_config(card, new_root_id, generation, gap_count);
 		fw_core_initiate_bus_reset(card, 1);
+		/* Will allocate broadcast channel after the reset. */
+	} else {
+		if (local_id == irm_id)
+			allocate_broadcast_channel(card, generation);
 	}
+
  out:
 	fw_node_put(root_node);
-	fw_node_put(local_node);
  out_put_card:
 	fw_card_put(card);
 }
 
-static void
-flush_timer_callback(unsigned long data)
+static void flush_timer_callback(unsigned long data)
 {
 	struct fw_card *card = (struct fw_card *)data;
 
 	fw_flush_transactions(card);
 }
 
-void
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
-		   struct device *device)
+void fw_card_initialize(struct fw_card *card,
+			const struct fw_card_driver *driver,
+			struct device *device)
 {
 	static atomic_t index = ATOMIC_INIT(-1);
 
@@ -406,13 +427,12 @@
 }
 EXPORT_SYMBOL(fw_card_initialize);
 
-int
-fw_card_add(struct fw_card *card,
-	    u32 max_receive, u32 link_speed, u64 guid)
+int fw_card_add(struct fw_card *card,
+		u32 max_receive, u32 link_speed, u64 guid)
 {
 	u32 *config_rom;
 	size_t length;
-	int err;
+	int ret;
 
 	card->max_receive = max_receive;
 	card->link_speed = link_speed;
@@ -423,13 +443,14 @@
 	list_add_tail(&card->link, &card_list);
 	mutex_unlock(&card_mutex);
 
-	err = card->driver->enable(card, config_rom, length);
-	if (err < 0) {
+	ret = card->driver->enable(card, config_rom, length);
+	if (ret < 0) {
 		mutex_lock(&card_mutex);
 		list_del(&card->link);
 		mutex_unlock(&card_mutex);
 	}
-	return err;
+
+	return ret;
 }
 EXPORT_SYMBOL(fw_card_add);
 
@@ -442,23 +463,20 @@
  * dummy driver just fails all IO.
  */
 
-static int
-dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
 {
 	BUG();
 	return -1;
 }
 
-static int
-dummy_update_phy_reg(struct fw_card *card, int address,
-		     int clear_bits, int set_bits)
+static int dummy_update_phy_reg(struct fw_card *card, int address,
+				int clear_bits, int set_bits)
 {
 	return -ENODEV;
 }
 
-static int
-dummy_set_config_rom(struct fw_card *card,
-		     u32 *config_rom, size_t length)
+static int dummy_set_config_rom(struct fw_card *card,
+				u32 *config_rom, size_t length)
 {
 	/*
 	 * We take the card out of card_list before setting the dummy
@@ -468,27 +486,23 @@
 	return -1;
 }
 
-static void
-dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
 {
 	packet->callback(packet, card, -ENODEV);
 }
 
-static void
-dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
 {
 	packet->callback(packet, card, -ENODEV);
 }
 
-static int
-dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
 {
 	return -ENOENT;
 }
 
-static int
-dummy_enable_phys_dma(struct fw_card *card,
-		      int node_id, int generation)
+static int dummy_enable_phys_dma(struct fw_card *card,
+				 int node_id, int generation)
 {
 	return -ENODEV;
 }
@@ -503,16 +517,14 @@
 	.enable_phys_dma = dummy_enable_phys_dma,
 };
 
-void
-fw_card_release(struct kref *kref)
+void fw_card_release(struct kref *kref)
 {
 	struct fw_card *card = container_of(kref, struct fw_card, kref);
 
 	complete(&card->done);
 }
 
-void
-fw_core_remove_card(struct fw_card *card)
+void fw_core_remove_card(struct fw_card *card)
 {
 	card->driver->update_phy_reg(card, 4,
 				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -536,8 +548,7 @@
 }
 EXPORT_SYMBOL(fw_core_remove_card);
 
-int
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
 {
 	int reg = short_reset ? 5 : 1;
 	int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index ed03234..7eb6594 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -18,67 +18,40 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/firewire-cdev.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/poll.h>
 #include <linux/preempt.h>
+#include <linux/spinlock.h>
 #include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/idr.h>
-#include <linux/compat.h>
-#include <linux/firewire-cdev.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
 #include <asm/system.h>
 #include <asm/uaccess.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
 #include "fw-device.h"
-
-struct client;
-struct client_resource {
-	struct list_head link;
-	void (*release)(struct client *client, struct client_resource *r);
-	u32 handle;
-};
-
-/*
- * dequeue_event() just kfree()'s the event, so the event has to be
- * the first field in the struct.
- */
-
-struct event {
-	struct { void *data; size_t size; } v[2];
-	struct list_head link;
-};
-
-struct bus_reset {
-	struct event event;
-	struct fw_cdev_event_bus_reset reset;
-};
-
-struct response {
-	struct event event;
-	struct fw_transaction transaction;
-	struct client *client;
-	struct client_resource resource;
-	struct fw_cdev_event_response response;
-};
-
-struct iso_interrupt {
-	struct event event;
-	struct fw_cdev_event_iso_interrupt interrupt;
-};
+#include "fw-topology.h"
+#include "fw-transaction.h"
 
 struct client {
 	u32 version;
 	struct fw_device *device;
+
 	spinlock_t lock;
-	u32 resource_handle;
-	struct list_head resource_list;
+	bool in_shutdown;
+	struct idr resource_idr;
 	struct list_head event_list;
 	wait_queue_head_t wait;
 	u64 bus_reset_closure;
@@ -89,16 +62,118 @@
 	unsigned long vm_start;
 
 	struct list_head link;
+	struct kref kref;
 };
 
-static inline void __user *
-u64_to_uptr(__u64 value)
+static inline void client_get(struct client *client)
+{
+	kref_get(&client->kref);
+}
+
+static void client_release(struct kref *kref)
+{
+	struct client *client = container_of(kref, struct client, kref);
+
+	fw_device_put(client->device);
+	kfree(client);
+}
+
+static void client_put(struct client *client)
+{
+	kref_put(&client->kref, client_release);
+}
+
+struct client_resource;
+typedef void (*client_resource_release_fn_t)(struct client *,
+					     struct client_resource *);
+struct client_resource {
+	client_resource_release_fn_t release;
+	int handle;
+};
+
+struct address_handler_resource {
+	struct client_resource resource;
+	struct fw_address_handler handler;
+	__u64 closure;
+	struct client *client;
+};
+
+struct outbound_transaction_resource {
+	struct client_resource resource;
+	struct fw_transaction transaction;
+};
+
+struct inbound_transaction_resource {
+	struct client_resource resource;
+	struct fw_request *request;
+	void *data;
+	size_t length;
+};
+
+struct descriptor_resource {
+	struct client_resource resource;
+	struct fw_descriptor descriptor;
+	u32 data[0];
+};
+
+struct iso_resource {
+	struct client_resource resource;
+	struct client *client;
+	/* Schedule work and access todo only with client->lock held. */
+	struct delayed_work work;
+	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
+	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
+	int generation;
+	u64 channels;
+	s32 bandwidth;
+	struct iso_resource_event *e_alloc, *e_dealloc;
+};
+
+static void schedule_iso_resource(struct iso_resource *);
+static void release_iso_resource(struct client *, struct client_resource *);
+
+/*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+ * the first field in a struct XYZ_event.
+ */
+struct event {
+	struct { void *data; size_t size; } v[2];
+	struct list_head link;
+};
+
+struct bus_reset_event {
+	struct event event;
+	struct fw_cdev_event_bus_reset reset;
+};
+
+struct outbound_transaction_event {
+	struct event event;
+	struct client *client;
+	struct outbound_transaction_resource r;
+	struct fw_cdev_event_response response;
+};
+
+struct inbound_transaction_event {
+	struct event event;
+	struct fw_cdev_event_request request;
+};
+
+struct iso_interrupt_event {
+	struct event event;
+	struct fw_cdev_event_iso_interrupt interrupt;
+};
+
+struct iso_resource_event {
+	struct event event;
+	struct fw_cdev_event_iso_resource resource;
+};
+
+static inline void __user *u64_to_uptr(__u64 value)
 {
 	return (void __user *)(unsigned long)value;
 }
 
-static inline __u64
-uptr_to_u64(void __user *ptr)
+static inline __u64 uptr_to_u64(void __user *ptr)
 {
 	return (__u64)(unsigned long)ptr;
 }
@@ -107,7 +182,6 @@
 {
 	struct fw_device *device;
 	struct client *client;
-	unsigned long flags;
 
 	device = fw_device_get_by_devt(inode->i_rdev);
 	if (device == NULL)
@@ -125,16 +199,17 @@
 	}
 
 	client->device = device;
-	INIT_LIST_HEAD(&client->event_list);
-	INIT_LIST_HEAD(&client->resource_list);
 	spin_lock_init(&client->lock);
+	idr_init(&client->resource_idr);
+	INIT_LIST_HEAD(&client->event_list);
 	init_waitqueue_head(&client->wait);
+	kref_init(&client->kref);
 
 	file->private_data = client;
 
-	spin_lock_irqsave(&device->card->lock, flags);
+	mutex_lock(&device->client_list_mutex);
 	list_add_tail(&client->link, &device->client_list);
-	spin_unlock_irqrestore(&device->card->lock, flags);
+	mutex_unlock(&device->client_list_mutex);
 
 	return 0;
 }
@@ -150,68 +225,69 @@
 	event->v[1].size = size1;
 
 	spin_lock_irqsave(&client->lock, flags);
-	list_add_tail(&event->link, &client->event_list);
+	if (client->in_shutdown)
+		kfree(event);
+	else
+		list_add_tail(&event->link, &client->event_list);
 	spin_unlock_irqrestore(&client->lock, flags);
 
 	wake_up_interruptible(&client->wait);
 }
 
-static int
-dequeue_event(struct client *client, char __user *buffer, size_t count)
+static int dequeue_event(struct client *client,
+			 char __user *buffer, size_t count)
 {
-	unsigned long flags;
 	struct event *event;
 	size_t size, total;
-	int i, retval;
+	int i, ret;
 
-	retval = wait_event_interruptible(client->wait,
-					  !list_empty(&client->event_list) ||
-					  fw_device_is_shutdown(client->device));
-	if (retval < 0)
-		return retval;
+	ret = wait_event_interruptible(client->wait,
+			!list_empty(&client->event_list) ||
+			fw_device_is_shutdown(client->device));
+	if (ret < 0)
+		return ret;
 
 	if (list_empty(&client->event_list) &&
 		       fw_device_is_shutdown(client->device))
 		return -ENODEV;
 
-	spin_lock_irqsave(&client->lock, flags);
-	event = container_of(client->event_list.next, struct event, link);
+	spin_lock_irq(&client->lock);
+	event = list_first_entry(&client->event_list, struct event, link);
 	list_del(&event->link);
-	spin_unlock_irqrestore(&client->lock, flags);
+	spin_unlock_irq(&client->lock);
 
 	total = 0;
 	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
 		size = min(event->v[i].size, count - total);
 		if (copy_to_user(buffer + total, event->v[i].data, size)) {
-			retval = -EFAULT;
+			ret = -EFAULT;
 			goto out;
 		}
 		total += size;
 	}
-	retval = total;
+	ret = total;
 
  out:
 	kfree(event);
 
-	return retval;
+	return ret;
 }
 
-static ssize_t
-fw_device_op_read(struct file *file,
-		  char __user *buffer, size_t count, loff_t *offset)
+static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
+				 size_t count, loff_t *offset)
 {
 	struct client *client = file->private_data;
 
 	return dequeue_event(client, buffer, count);
 }
 
-/* caller must hold card->lock so that node pointers can be dereferenced here */
-static void
-fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
-		     struct client *client)
+static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+				 struct client *client)
 {
 	struct fw_card *card = client->device->card;
 
+	spin_lock_irq(&card->lock);
+
 	event->closure	     = client->bus_reset_closure;
 	event->type          = FW_CDEV_EVENT_BUS_RESET;
 	event->generation    = client->device->generation;
@@ -220,39 +296,49 @@
 	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
 	event->irm_node_id   = card->irm_node->node_id;
 	event->root_node_id  = card->root_node->node_id;
+
+	spin_unlock_irq(&card->lock);
 }
 
-static void
-for_each_client(struct fw_device *device,
-		void (*callback)(struct client *client))
+static void for_each_client(struct fw_device *device,
+			    void (*callback)(struct client *client))
 {
-	struct fw_card *card = device->card;
 	struct client *c;
-	unsigned long flags;
 
-	spin_lock_irqsave(&card->lock, flags);
-
+	mutex_lock(&device->client_list_mutex);
 	list_for_each_entry(c, &device->client_list, link)
 		callback(c);
-
-	spin_unlock_irqrestore(&card->lock, flags);
+	mutex_unlock(&device->client_list_mutex);
 }
 
-static void
-queue_bus_reset_event(struct client *client)
+static int schedule_reallocations(int id, void *p, void *data)
 {
-	struct bus_reset *bus_reset;
+	struct client_resource *r = p;
 
-	bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
-	if (bus_reset == NULL) {
+	if (r->release == release_iso_resource)
+		schedule_iso_resource(container_of(r,
+					struct iso_resource, resource));
+	return 0;
+}
+
+static void queue_bus_reset_event(struct client *client)
+{
+	struct bus_reset_event *e;
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (e == NULL) {
 		fw_notify("Out of memory when allocating bus reset event\n");
 		return;
 	}
 
-	fill_bus_reset_event(&bus_reset->reset, client);
+	fill_bus_reset_event(&e->reset, client);
 
-	queue_event(client, &bus_reset->event,
-		    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
+	queue_event(client, &e->event,
+		    &e->reset, sizeof(e->reset), NULL, 0);
+
+	spin_lock_irq(&client->lock);
+	idr_for_each(&client->resource_idr, schedule_reallocations, client);
+	spin_unlock_irq(&client->lock);
 }
 
 void fw_device_cdev_update(struct fw_device *device)
@@ -274,11 +360,11 @@
 {
 	struct fw_cdev_get_info *get_info = buffer;
 	struct fw_cdev_event_bus_reset bus_reset;
-	struct fw_card *card = client->device->card;
 	unsigned long ret = 0;
 
 	client->version = get_info->version;
 	get_info->version = FW_CDEV_VERSION;
+	get_info->card = client->device->card->index;
 
 	down_read(&fw_device_rwsem);
 
@@ -300,49 +386,61 @@
 	client->bus_reset_closure = get_info->bus_reset_closure;
 	if (get_info->bus_reset != 0) {
 		void __user *uptr = u64_to_uptr(get_info->bus_reset);
-		unsigned long flags;
 
-		spin_lock_irqsave(&card->lock, flags);
 		fill_bus_reset_event(&bus_reset, client);
-		spin_unlock_irqrestore(&card->lock, flags);
-
 		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
 			return -EFAULT;
 	}
 
-	get_info->card = card->index;
-
 	return 0;
 }
 
-static void
-add_client_resource(struct client *client, struct client_resource *resource)
+static int add_client_resource(struct client *client,
+			       struct client_resource *resource, gfp_t gfp_mask)
 {
 	unsigned long flags;
+	int ret;
+
+ retry:
+	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
+		return -ENOMEM;
 
 	spin_lock_irqsave(&client->lock, flags);
-	list_add_tail(&resource->link, &client->resource_list);
-	resource->handle = client->resource_handle++;
-	spin_unlock_irqrestore(&client->lock, flags);
-}
-
-static int
-release_client_resource(struct client *client, u32 handle,
-			struct client_resource **resource)
-{
-	struct client_resource *r;
-	unsigned long flags;
-
-	spin_lock_irqsave(&client->lock, flags);
-	list_for_each_entry(r, &client->resource_list, link) {
-		if (r->handle == handle) {
-			list_del(&r->link);
-			break;
-		}
+	if (client->in_shutdown)
+		ret = -ECANCELED;
+	else
+		ret = idr_get_new(&client->resource_idr, resource,
+				  &resource->handle);
+	if (ret >= 0) {
+		client_get(client);
+		if (resource->release == release_iso_resource)
+			schedule_iso_resource(container_of(resource,
+						struct iso_resource, resource));
 	}
 	spin_unlock_irqrestore(&client->lock, flags);
 
-	if (&r->link == &client->resource_list)
+	if (ret == -EAGAIN)
+		goto retry;
+
+	return ret < 0 ? ret : 0;
+}
+
+static int release_client_resource(struct client *client, u32 handle,
+				   client_resource_release_fn_t release,
+				   struct client_resource **resource)
+{
+	struct client_resource *r;
+
+	spin_lock_irq(&client->lock);
+	if (client->in_shutdown)
+		r = NULL;
+	else
+		r = idr_find(&client->resource_idr, handle);
+	if (r && r->release == release)
+		idr_remove(&client->resource_idr, handle);
+	spin_unlock_irq(&client->lock);
+
+	if (!(r && r->release == release))
 		return -EINVAL;
 
 	if (resource)
@@ -350,203 +448,239 @@
 	else
 		r->release(client, r);
 
+	client_put(client);
+
 	return 0;
 }
 
-static void
-release_transaction(struct client *client, struct client_resource *resource)
+static void release_transaction(struct client *client,
+				struct client_resource *resource)
 {
-	struct response *response =
-		container_of(resource, struct response, resource);
+	struct outbound_transaction_resource *r = container_of(resource,
+			struct outbound_transaction_resource, resource);
 
-	fw_cancel_transaction(client->device->card, &response->transaction);
+	fw_cancel_transaction(client->device->card, &r->transaction);
 }
 
-static void
-complete_transaction(struct fw_card *card, int rcode,
-		     void *payload, size_t length, void *data)
+static void complete_transaction(struct fw_card *card, int rcode,
+				 void *payload, size_t length, void *data)
 {
-	struct response *response = data;
-	struct client *client = response->client;
+	struct outbound_transaction_event *e = data;
+	struct fw_cdev_event_response *rsp = &e->response;
+	struct client *client = e->client;
 	unsigned long flags;
-	struct fw_cdev_event_response *r = &response->response;
 
-	if (length < r->length)
-		r->length = length;
+	if (length < rsp->length)
+		rsp->length = length;
 	if (rcode == RCODE_COMPLETE)
-		memcpy(r->data, payload, r->length);
+		memcpy(rsp->data, payload, rsp->length);
 
 	spin_lock_irqsave(&client->lock, flags);
-	list_del(&response->resource.link);
+	/*
+	 * 1. If called while in shutdown, the idr tree must be left untouched.
+	 *    The idr handle will be removed and the client reference will be
+	 *    dropped later.
+	 * 2. If the call chain was release_client_resource ->
+	 *    release_transaction -> complete_transaction (instead of a normal
+	 *    conclusion of the transaction), i.e. if this resource was already
+	 *    unregistered from the idr, the client reference will be dropped
+	 *    by release_client_resource and we must not drop it here.
+	 */
+	if (!client->in_shutdown &&
+	    idr_find(&client->resource_idr, e->r.resource.handle)) {
+		idr_remove(&client->resource_idr, e->r.resource.handle);
+		/* Drop the idr's reference */
+		client_put(client);
+	}
 	spin_unlock_irqrestore(&client->lock, flags);
 
-	r->type   = FW_CDEV_EVENT_RESPONSE;
-	r->rcode  = rcode;
+	rsp->type = FW_CDEV_EVENT_RESPONSE;
+	rsp->rcode = rcode;
 
 	/*
-	 * In the case that sizeof(*r) doesn't align with the position of the
+	 * In the case that sizeof(*rsp) doesn't align with the position of the
 	 * data, and the read is short, preserve an extra copy of the data
 	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
 	 * for short reads and some apps depended on it, this is both safe
 	 * and prudent for compatibility.
 	 */
-	if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
-		queue_event(client, &response->event, r, sizeof(*r),
-			    r->data, r->length);
+	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
+		queue_event(client, &e->event, rsp, sizeof(*rsp),
+			    rsp->data, rsp->length);
 	else
-		queue_event(client, &response->event, r, sizeof(*r) + r->length,
+		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
 			    NULL, 0);
+
+	/* Drop the transaction callback's reference */
+	client_put(client);
+}
+
+static int init_request(struct client *client,
+			struct fw_cdev_send_request *request,
+			int destination_id, int speed)
+{
+	struct outbound_transaction_event *e;
+	int ret;
+
+	if (request->tcode != TCODE_STREAM_DATA &&
+	    (request->length > 4096 || request->length > 512 << speed))
+		return -EIO;
+
+	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
+	if (e == NULL)
+		return -ENOMEM;
+
+	e->client = client;
+	e->response.length = request->length;
+	e->response.closure = request->closure;
+
+	if (request->data &&
+	    copy_from_user(e->response.data,
+			   u64_to_uptr(request->data), request->length)) {
+		ret = -EFAULT;
+		goto failed;
+	}
+
+	e->r.resource.release = release_transaction;
+	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
+	if (ret < 0)
+		goto failed;
+
+	/* Get a reference for the transaction callback */
+	client_get(client);
+
+	fw_send_request(client->device->card, &e->r.transaction,
+			request->tcode, destination_id, request->generation,
+			speed, request->offset, e->response.data,
+			request->length, complete_transaction, e);
+	return 0;
+
+ failed:
+	kfree(e);
+
+	return ret;
 }
 
 static int ioctl_send_request(struct client *client, void *buffer)
 {
-	struct fw_device *device = client->device;
 	struct fw_cdev_send_request *request = buffer;
-	struct response *response;
 
-	/* What is the biggest size we'll accept, really? */
-	if (request->length > 4096)
+	switch (request->tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+	case TCODE_WRITE_BLOCK_REQUEST:
+	case TCODE_READ_QUADLET_REQUEST:
+	case TCODE_READ_BLOCK_REQUEST:
+	case TCODE_LOCK_MASK_SWAP:
+	case TCODE_LOCK_COMPARE_SWAP:
+	case TCODE_LOCK_FETCH_ADD:
+	case TCODE_LOCK_LITTLE_ADD:
+	case TCODE_LOCK_BOUNDED_ADD:
+	case TCODE_LOCK_WRAP_ADD:
+	case TCODE_LOCK_VENDOR_DEPENDENT:
+		break;
+	default:
 		return -EINVAL;
-
-	response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
-	if (response == NULL)
-		return -ENOMEM;
-
-	response->client = client;
-	response->response.length = request->length;
-	response->response.closure = request->closure;
-
-	if (request->data &&
-	    copy_from_user(response->response.data,
-			   u64_to_uptr(request->data), request->length)) {
-		kfree(response);
-		return -EFAULT;
 	}
 
-	response->resource.release = release_transaction;
-	add_client_resource(client, &response->resource);
-
-	fw_send_request(device->card, &response->transaction,
-			request->tcode & 0x1f,
-			device->node->node_id,
-			request->generation,
-			device->max_speed,
-			request->offset,
-			response->response.data, request->length,
-			complete_transaction, response);
-
-	if (request->data)
-		return sizeof(request) + request->length;
-	else
-		return sizeof(request);
+	return init_request(client, request, client->device->node_id,
+			    client->device->max_speed);
 }
 
-struct address_handler {
-	struct fw_address_handler handler;
-	__u64 closure;
-	struct client *client;
-	struct client_resource resource;
-};
-
-struct request {
-	struct fw_request *request;
-	void *data;
-	size_t length;
-	struct client_resource resource;
-};
-
-struct request_event {
-	struct event event;
-	struct fw_cdev_event_request request;
-};
-
-static void
-release_request(struct client *client, struct client_resource *resource)
+static void release_request(struct client *client,
+			    struct client_resource *resource)
 {
-	struct request *request =
-		container_of(resource, struct request, resource);
+	struct inbound_transaction_resource *r = container_of(resource,
+			struct inbound_transaction_resource, resource);
 
-	fw_send_response(client->device->card, request->request,
+	fw_send_response(client->device->card, r->request,
 			 RCODE_CONFLICT_ERROR);
-	kfree(request);
+	kfree(r);
 }
 
-static void
-handle_request(struct fw_card *card, struct fw_request *r,
-	       int tcode, int destination, int source,
-	       int generation, int speed,
-	       unsigned long long offset,
-	       void *payload, size_t length, void *callback_data)
+static void handle_request(struct fw_card *card, struct fw_request *request,
+			   int tcode, int destination, int source,
+			   int generation, int speed,
+			   unsigned long long offset,
+			   void *payload, size_t length, void *callback_data)
 {
-	struct address_handler *handler = callback_data;
-	struct request *request;
-	struct request_event *e;
-	struct client *client = handler->client;
+	struct address_handler_resource *handler = callback_data;
+	struct inbound_transaction_resource *r;
+	struct inbound_transaction_event *e;
+	int ret;
 
-	request = kmalloc(sizeof(*request), GFP_ATOMIC);
+	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
-	if (request == NULL || e == NULL) {
-		kfree(request);
-		kfree(e);
-		fw_send_response(card, r, RCODE_CONFLICT_ERROR);
-		return;
-	}
+	if (r == NULL || e == NULL)
+		goto failed;
 
-	request->request = r;
-	request->data    = payload;
-	request->length  = length;
+	r->request = request;
+	r->data    = payload;
+	r->length  = length;
 
-	request->resource.release = release_request;
-	add_client_resource(client, &request->resource);
+	r->resource.release = release_request;
+	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
+	if (ret < 0)
+		goto failed;
 
 	e->request.type    = FW_CDEV_EVENT_REQUEST;
 	e->request.tcode   = tcode;
 	e->request.offset  = offset;
 	e->request.length  = length;
-	e->request.handle  = request->resource.handle;
+	e->request.handle  = r->resource.handle;
 	e->request.closure = handler->closure;
 
-	queue_event(client, &e->event,
+	queue_event(handler->client, &e->event,
 		    &e->request, sizeof(e->request), payload, length);
+	return;
+
+ failed:
+	kfree(r);
+	kfree(e);
+	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
 }
 
-static void
-release_address_handler(struct client *client,
-			struct client_resource *resource)
+static void release_address_handler(struct client *client,
+				    struct client_resource *resource)
 {
-	struct address_handler *handler =
-		container_of(resource, struct address_handler, resource);
+	struct address_handler_resource *r =
+	    container_of(resource, struct address_handler_resource, resource);
 
-	fw_core_remove_address_handler(&handler->handler);
-	kfree(handler);
+	fw_core_remove_address_handler(&r->handler);
+	kfree(r);
 }
 
 static int ioctl_allocate(struct client *client, void *buffer)
 {
 	struct fw_cdev_allocate *request = buffer;
-	struct address_handler *handler;
+	struct address_handler_resource *r;
 	struct fw_address_region region;
+	int ret;
 
-	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
-	if (handler == NULL)
+	r = kmalloc(sizeof(*r), GFP_KERNEL);
+	if (r == NULL)
 		return -ENOMEM;
 
 	region.start = request->offset;
 	region.end = request->offset + request->length;
-	handler->handler.length = request->length;
-	handler->handler.address_callback = handle_request;
-	handler->handler.callback_data = handler;
-	handler->closure = request->closure;
-	handler->client = client;
+	r->handler.length = request->length;
+	r->handler.address_callback = handle_request;
+	r->handler.callback_data = r;
+	r->closure = request->closure;
+	r->client = client;
 
-	if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
-		kfree(handler);
-		return -EBUSY;
+	ret = fw_core_add_address_handler(&r->handler, &region);
+	if (ret < 0) {
+		kfree(r);
+		return ret;
 	}
 
-	handler->resource.release = release_address_handler;
-	add_client_resource(client, &handler->resource);
-	request->handle = handler->resource.handle;
+	r->resource.release = release_address_handler;
+	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+	if (ret < 0) {
+		release_address_handler(client, &r->resource);
+		return ret;
+	}
+	request->handle = r->resource.handle;
 
 	return 0;
 }
@@ -555,18 +689,22 @@
 {
 	struct fw_cdev_deallocate *request = buffer;
 
-	return release_client_resource(client, request->handle, NULL);
+	return release_client_resource(client, request->handle,
+				       release_address_handler, NULL);
 }
 
 static int ioctl_send_response(struct client *client, void *buffer)
 {
 	struct fw_cdev_send_response *request = buffer;
 	struct client_resource *resource;
-	struct request *r;
+	struct inbound_transaction_resource *r;
 
-	if (release_client_resource(client, request->handle, &resource) < 0)
+	if (release_client_resource(client, request->handle,
+				    release_request, &resource) < 0)
 		return -EINVAL;
-	r = container_of(resource, struct request, resource);
+
+	r = container_of(resource, struct inbound_transaction_resource,
+			 resource);
 	if (request->length < r->length)
 		r->length = request->length;
 	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
@@ -588,85 +726,92 @@
 	return fw_core_initiate_bus_reset(client->device->card, short_reset);
 }
 
-struct descriptor {
-	struct fw_descriptor d;
-	struct client_resource resource;
-	u32 data[0];
-};
-
 static void release_descriptor(struct client *client,
 			       struct client_resource *resource)
 {
-	struct descriptor *descriptor =
-		container_of(resource, struct descriptor, resource);
+	struct descriptor_resource *r =
+		container_of(resource, struct descriptor_resource, resource);
 
-	fw_core_remove_descriptor(&descriptor->d);
-	kfree(descriptor);
+	fw_core_remove_descriptor(&r->descriptor);
+	kfree(r);
 }
 
 static int ioctl_add_descriptor(struct client *client, void *buffer)
 {
 	struct fw_cdev_add_descriptor *request = buffer;
-	struct descriptor *descriptor;
-	int retval;
+	struct fw_card *card = client->device->card;
+	struct descriptor_resource *r;
+	int ret;
+
+	/* Access policy: Allow this ioctl only on local nodes' device files. */
+	spin_lock_irq(&card->lock);
+	ret = client->device->node_id != card->local_node->node_id;
+	spin_unlock_irq(&card->lock);
+	if (ret)
+		return -ENOSYS;
 
 	if (request->length > 256)
 		return -EINVAL;
 
-	descriptor =
-		kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
-	if (descriptor == NULL)
+	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+	if (r == NULL)
 		return -ENOMEM;
 
-	if (copy_from_user(descriptor->data,
+	if (copy_from_user(r->data,
 			   u64_to_uptr(request->data), request->length * 4)) {
-		kfree(descriptor);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto failed;
 	}
 
-	descriptor->d.length = request->length;
-	descriptor->d.immediate = request->immediate;
-	descriptor->d.key = request->key;
-	descriptor->d.data = descriptor->data;
+	r->descriptor.length    = request->length;
+	r->descriptor.immediate = request->immediate;
+	r->descriptor.key       = request->key;
+	r->descriptor.data      = r->data;
 
-	retval = fw_core_add_descriptor(&descriptor->d);
-	if (retval < 0) {
-		kfree(descriptor);
-		return retval;
+	ret = fw_core_add_descriptor(&r->descriptor);
+	if (ret < 0)
+		goto failed;
+
+	r->resource.release = release_descriptor;
+	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+	if (ret < 0) {
+		fw_core_remove_descriptor(&r->descriptor);
+		goto failed;
 	}
-
-	descriptor->resource.release = release_descriptor;
-	add_client_resource(client, &descriptor->resource);
-	request->handle = descriptor->resource.handle;
+	request->handle = r->resource.handle;
 
 	return 0;
+ failed:
+	kfree(r);
+
+	return ret;
 }
 
 static int ioctl_remove_descriptor(struct client *client, void *buffer)
 {
 	struct fw_cdev_remove_descriptor *request = buffer;
 
-	return release_client_resource(client, request->handle, NULL);
+	return release_client_resource(client, request->handle,
+				       release_descriptor, NULL);
 }
 
-static void
-iso_callback(struct fw_iso_context *context, u32 cycle,
-	     size_t header_length, void *header, void *data)
+static void iso_callback(struct fw_iso_context *context, u32 cycle,
+			 size_t header_length, void *header, void *data)
 {
 	struct client *client = data;
-	struct iso_interrupt *irq;
+	struct iso_interrupt_event *e;
 
-	irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
-	if (irq == NULL)
+	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+	if (e == NULL)
 		return;
 
-	irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
-	irq->interrupt.closure   = client->iso_closure;
-	irq->interrupt.cycle     = cycle;
-	irq->interrupt.header_length = header_length;
-	memcpy(irq->interrupt.header, header, header_length);
-	queue_event(client, &irq->event, &irq->interrupt,
-		    sizeof(irq->interrupt) + header_length, NULL, 0);
+	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
+	e->interrupt.closure   = client->iso_closure;
+	e->interrupt.cycle     = cycle;
+	e->interrupt.header_length = header_length;
+	memcpy(e->interrupt.header, header, header_length);
+	queue_event(client, &e->event, &e->interrupt,
+		    sizeof(e->interrupt) + header_length, NULL, 0);
 }
 
 static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -871,6 +1016,261 @@
 	return 0;
 }
 
+static void iso_resource_work(struct work_struct *work)
+{
+	struct iso_resource_event *e;
+	struct iso_resource *r =
+			container_of(work, struct iso_resource, work.work);
+	struct client *client = r->client;
+	int generation, channel, bandwidth, todo;
+	bool skip, free, success;
+
+	spin_lock_irq(&client->lock);
+	generation = client->device->generation;
+	todo = r->todo;
+	/* Allow 1000ms grace period for other reallocations. */
+	if (todo == ISO_RES_ALLOC &&
+	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+		if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
+			client_get(client);
+		skip = true;
+	} else {
+		/* We could be called twice within the same generation. */
+		skip = todo == ISO_RES_REALLOC &&
+		       r->generation == generation;
+	}
+	free = todo == ISO_RES_DEALLOC ||
+	       todo == ISO_RES_ALLOC_ONCE ||
+	       todo == ISO_RES_DEALLOC_ONCE;
+	r->generation = generation;
+	spin_unlock_irq(&client->lock);
+
+	if (skip)
+		goto out;
+
+	bandwidth = r->bandwidth;
+
+	fw_iso_resource_manage(client->device->card, generation,
+			r->channels, &channel, &bandwidth,
+			todo == ISO_RES_ALLOC ||
+			todo == ISO_RES_REALLOC ||
+			todo == ISO_RES_ALLOC_ONCE);
+	/*
+	 * Is this generation outdated already?  As long as this resource sticks
+	 * in the idr, it will be scheduled again for a newer generation or at
+	 * shutdown.
+	 */
+	if (channel == -EAGAIN &&
+	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
+		goto out;
+
+	success = channel >= 0 || bandwidth > 0;
+
+	spin_lock_irq(&client->lock);
+	/*
+	 * Transit from allocation to reallocation, except if the client
+	 * requested deallocation in the meantime.
+	 */
+	if (r->todo == ISO_RES_ALLOC)
+		r->todo = ISO_RES_REALLOC;
+	/*
+	 * Allocation or reallocation failure?  Pull this resource out of the
+	 * idr and prepare for deletion, unless the client is shutting down.
+	 */
+	if (r->todo == ISO_RES_REALLOC && !success &&
+	    !client->in_shutdown &&
+	    idr_find(&client->resource_idr, r->resource.handle)) {
+		idr_remove(&client->resource_idr, r->resource.handle);
+		client_put(client);
+		free = true;
+	}
+	spin_unlock_irq(&client->lock);
+
+	if (todo == ISO_RES_ALLOC && channel >= 0)
+		r->channels = 1ULL << channel;
+
+	if (todo == ISO_RES_REALLOC && success)
+		goto out;
+
+	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
+		e = r->e_alloc;
+		r->e_alloc = NULL;
+	} else {
+		e = r->e_dealloc;
+		r->e_dealloc = NULL;
+	}
+	e->resource.handle	= r->resource.handle;
+	e->resource.channel	= channel;
+	e->resource.bandwidth	= bandwidth;
+
+	queue_event(client, &e->event,
+		    &e->resource, sizeof(e->resource), NULL, 0);
+
+	if (free) {
+		cancel_delayed_work(&r->work);
+		kfree(r->e_alloc);
+		kfree(r->e_dealloc);
+		kfree(r);
+	}
+ out:
+	client_put(client);
+}
+
+static void schedule_iso_resource(struct iso_resource *r)
+{
+	client_get(r->client);
+	if (!schedule_delayed_work(&r->work, 0))
+		client_put(r->client);
+}
+
+static void release_iso_resource(struct client *client,
+				 struct client_resource *resource)
+{
+	struct iso_resource *r =
+		container_of(resource, struct iso_resource, resource);
+
+	spin_lock_irq(&client->lock);
+	r->todo = ISO_RES_DEALLOC;
+	schedule_iso_resource(r);
+	spin_unlock_irq(&client->lock);
+}
+
+static int init_iso_resource(struct client *client,
+		struct fw_cdev_allocate_iso_resource *request, int todo)
+{
+	struct iso_resource_event *e1, *e2;
+	struct iso_resource *r;
+	int ret;
+
+	if ((request->channels == 0 && request->bandwidth == 0) ||
+	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+	    request->bandwidth < 0)
+		return -EINVAL;
+
+	r  = kmalloc(sizeof(*r), GFP_KERNEL);
+	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
+	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
+	if (r == NULL || e1 == NULL || e2 == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_DELAYED_WORK(&r->work, iso_resource_work);
+	r->client	= client;
+	r->todo		= todo;
+	r->generation	= -1;
+	r->channels	= request->channels;
+	r->bandwidth	= request->bandwidth;
+	r->e_alloc	= e1;
+	r->e_dealloc	= e2;
+
+	e1->resource.closure	= request->closure;
+	e1->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
+	e2->resource.closure	= request->closure;
+	e2->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
+
+	if (todo == ISO_RES_ALLOC) {
+		r->resource.release = release_iso_resource;
+		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+		if (ret < 0)
+			goto fail;
+	} else {
+		r->resource.release = NULL;
+		r->resource.handle = -1;
+		schedule_iso_resource(r);
+	}
+	request->handle = r->resource.handle;
+
+	return 0;
+ fail:
+	kfree(r);
+	kfree(e1);
+	kfree(e2);
+
+	return ret;
+}
+
+static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+{
+	struct fw_cdev_allocate_iso_resource *request = buffer;
+
+	return init_iso_resource(client, request, ISO_RES_ALLOC);
+}
+
+static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+{
+	struct fw_cdev_deallocate *request = buffer;
+
+	return release_client_resource(client, request->handle,
+				       release_iso_resource, NULL);
+}
+
+static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+{
+	struct fw_cdev_allocate_iso_resource *request = buffer;
+
+	return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+}
+
+static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+{
+	struct fw_cdev_allocate_iso_resource *request = buffer;
+
+	return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+}
+
+/*
+ * Returns a speed code:  Maximum speed to or from this device,
+ * limited by the device's link speed, the local node's link speed,
+ * and all PHY port speeds between the two links.
+ */
+static int ioctl_get_speed(struct client *client, void *buffer)
+{
+	return client->device->max_speed;
+}
+
+static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+{
+	struct fw_cdev_send_request *request = buffer;
+
+	switch (request->tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+	case TCODE_WRITE_BLOCK_REQUEST:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Security policy: Only allow accesses to Units Space. */
+	if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+		return -EACCES;
+
+	return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+}
+
+static int ioctl_send_stream_packet(struct client *client, void *buffer)
+{
+	struct fw_cdev_send_stream_packet *p = buffer;
+	struct fw_cdev_send_request request;
+	int dest;
+
+	if (p->speed > client->device->card->link_speed ||
+	    p->length > 1024 << p->speed)
+		return -EIO;
+
+	if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+		return -EINVAL;
+
+	dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+	request.tcode		= TCODE_STREAM_DATA;
+	request.length		= p->length;
+	request.closure		= p->closure;
+	request.data		= p->data;
+	request.generation	= p->generation;
+
+	return init_request(client, &request, dest, p->speed);
+}
+
 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
 	ioctl_get_info,
 	ioctl_send_request,
@@ -885,13 +1285,20 @@
 	ioctl_start_iso,
 	ioctl_stop_iso,
 	ioctl_get_cycle_timer,
+	ioctl_allocate_iso_resource,
+	ioctl_deallocate_iso_resource,
+	ioctl_allocate_iso_resource_once,
+	ioctl_deallocate_iso_resource_once,
+	ioctl_get_speed,
+	ioctl_send_broadcast_request,
+	ioctl_send_stream_packet,
 };
 
-static int
-dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
+static int dispatch_ioctl(struct client *client,
+			  unsigned int cmd, void __user *arg)
 {
 	char buffer[256];
-	int retval;
+	int ret;
 
 	if (_IOC_TYPE(cmd) != '#' ||
 	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
@@ -903,9 +1310,9 @@
 			return -EFAULT;
 	}
 
-	retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
-	if (retval < 0)
-		return retval;
+	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+	if (ret < 0)
+		return ret;
 
 	if (_IOC_DIR(cmd) & _IOC_READ) {
 		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
@@ -913,12 +1320,11 @@
 			return -EFAULT;
 	}
 
-	return retval;
+	return ret;
 }
 
-static long
-fw_device_op_ioctl(struct file *file,
-		   unsigned int cmd, unsigned long arg)
+static long fw_device_op_ioctl(struct file *file,
+			       unsigned int cmd, unsigned long arg)
 {
 	struct client *client = file->private_data;
 
@@ -929,9 +1335,8 @@
 }
 
 #ifdef CONFIG_COMPAT
-static long
-fw_device_op_compat_ioctl(struct file *file,
-			  unsigned int cmd, unsigned long arg)
+static long fw_device_op_compat_ioctl(struct file *file,
+				      unsigned int cmd, unsigned long arg)
 {
 	struct client *client = file->private_data;
 
@@ -947,7 +1352,7 @@
 	struct client *client = file->private_data;
 	enum dma_data_direction direction;
 	unsigned long size;
-	int page_count, retval;
+	int page_count, ret;
 
 	if (fw_device_is_shutdown(client->device))
 		return -ENODEV;
@@ -973,48 +1378,57 @@
 	else
 		direction = DMA_FROM_DEVICE;
 
-	retval = fw_iso_buffer_init(&client->buffer, client->device->card,
-				    page_count, direction);
-	if (retval < 0)
-		return retval;
+	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
+				 page_count, direction);
+	if (ret < 0)
+		return ret;
 
-	retval = fw_iso_buffer_map(&client->buffer, vma);
-	if (retval < 0)
+	ret = fw_iso_buffer_map(&client->buffer, vma);
+	if (ret < 0)
 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
 
-	return retval;
+	return ret;
+}
+
+static int shutdown_resource(int id, void *p, void *data)
+{
+	struct client_resource *r = p;
+	struct client *client = data;
+
+	r->release(client, r);
+	client_put(client);
+
+	return 0;
 }
 
 static int fw_device_op_release(struct inode *inode, struct file *file)
 {
 	struct client *client = file->private_data;
 	struct event *e, *next_e;
-	struct client_resource *r, *next_r;
-	unsigned long flags;
 
-	if (client->buffer.pages)
-		fw_iso_buffer_destroy(&client->buffer, client->device->card);
+	mutex_lock(&client->device->client_list_mutex);
+	list_del(&client->link);
+	mutex_unlock(&client->device->client_list_mutex);
 
 	if (client->iso_context)
 		fw_iso_context_destroy(client->iso_context);
 
-	list_for_each_entry_safe(r, next_r, &client->resource_list, link)
-		r->release(client, r);
+	if (client->buffer.pages)
+		fw_iso_buffer_destroy(&client->buffer, client->device->card);
 
-	/*
-	 * FIXME: We should wait for the async tasklets to stop
-	 * running before freeing the memory.
-	 */
+	/* Freeze client->resource_idr and client->event_list */
+	spin_lock_irq(&client->lock);
+	client->in_shutdown = true;
+	spin_unlock_irq(&client->lock);
+
+	idr_for_each(&client->resource_idr, shutdown_resource, client);
+	idr_remove_all(&client->resource_idr);
+	idr_destroy(&client->resource_idr);
 
 	list_for_each_entry_safe(e, next_e, &client->event_list, link)
 		kfree(e);
 
-	spin_lock_irqsave(&client->device->card->lock, flags);
-	list_del(&client->link);
-	spin_unlock_irqrestore(&client->device->card->lock, flags);
-
-	fw_device_put(client->device);
-	kfree(client);
+	client_put(client);
 
 	return 0;
 }
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index bf53acb..a47e212 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -18,22 +18,26 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
-#include <linux/module.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kthread.h>
-#include <linux/device.h>
+#include <linux/ctype.h>
 #include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
 #include <linux/idr.h>
 #include <linux/jiffies.h>
-#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/rwsem.h>
 #include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
 #include <asm/system.h>
-#include <linux/ctype.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
+
 #include "fw-device.h"
+#include "fw-topology.h"
+#include "fw-transaction.h"
 
 void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
 {
@@ -132,8 +136,7 @@
 			vendor, model, specifier_id, version);
 }
 
-static int
-fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
 	struct fw_unit *unit = fw_unit(dev);
 	char modalias[64];
@@ -152,27 +155,6 @@
 };
 EXPORT_SYMBOL(fw_bus_type);
 
-static void fw_device_release(struct device *dev)
-{
-	struct fw_device *device = fw_device(dev);
-	struct fw_card *card = device->card;
-	unsigned long flags;
-
-	/*
-	 * Take the card lock so we don't set this to NULL while a
-	 * FW_NODE_UPDATED callback is being handled or while the
-	 * bus manager work looks at this node.
-	 */
-	spin_lock_irqsave(&card->lock, flags);
-	device->node->data = NULL;
-	spin_unlock_irqrestore(&card->lock, flags);
-
-	fw_node_put(device->node);
-	kfree(device->config_rom);
-	kfree(device);
-	fw_card_put(card);
-}
-
 int fw_device_enable_phys_dma(struct fw_device *device)
 {
 	int generation = device->generation;
@@ -191,8 +173,8 @@
 	u32 key;
 };
 
-static ssize_t
-show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
+static ssize_t show_immediate(struct device *dev,
+			      struct device_attribute *dattr, char *buf)
 {
 	struct config_rom_attribute *attr =
 		container_of(dattr, struct config_rom_attribute, attr);
@@ -223,8 +205,8 @@
 #define IMMEDIATE_ATTR(name, key)				\
 	{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
 
-static ssize_t
-show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
+static ssize_t show_text_leaf(struct device *dev,
+			      struct device_attribute *dattr, char *buf)
 {
 	struct config_rom_attribute *attr =
 		container_of(dattr, struct config_rom_attribute, attr);
@@ -293,10 +275,9 @@
 	TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
 };
 
-static void
-init_fw_attribute_group(struct device *dev,
-			struct device_attribute *attrs,
-			struct fw_attribute_group *group)
+static void init_fw_attribute_group(struct device *dev,
+				    struct device_attribute *attrs,
+				    struct fw_attribute_group *group)
 {
 	struct device_attribute *attr;
 	int i, j;
@@ -319,9 +300,8 @@
 	dev->groups = group->groups;
 }
 
-static ssize_t
-modalias_show(struct device *dev,
-	      struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
 {
 	struct fw_unit *unit = fw_unit(dev);
 	int length;
@@ -332,9 +312,8 @@
 	return length + 1;
 }
 
-static ssize_t
-rom_index_show(struct device *dev,
-	       struct device_attribute *attr, char *buf)
+static ssize_t rom_index_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
 {
 	struct fw_device *device = fw_device(dev->parent);
 	struct fw_unit *unit = fw_unit(dev);
@@ -349,8 +328,8 @@
 	__ATTR_NULL,
 };
 
-static ssize_t
-config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t config_rom_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
 {
 	struct fw_device *device = fw_device(dev);
 	size_t length;
@@ -363,8 +342,8 @@
 	return length;
 }
 
-static ssize_t
-guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t guid_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
 {
 	struct fw_device *device = fw_device(dev);
 	int ret;
@@ -383,8 +362,8 @@
 	__ATTR_NULL,
 };
 
-static int
-read_rom(struct fw_device *device, int generation, int index, u32 *data)
+static int read_rom(struct fw_device *device,
+		    int generation, int index, u32 *data)
 {
 	int rcode;
 
@@ -539,7 +518,7 @@
 
 	kfree(old_rom);
 	ret = 0;
-	device->cmc = rom[2] & 1 << 30;
+	device->cmc = rom[2] >> 30 & 1;
  out:
 	kfree(rom);
 
@@ -679,11 +658,53 @@
 	fw_device_put(device);
 }
 
+static void fw_device_release(struct device *dev)
+{
+	struct fw_device *device = fw_device(dev);
+	struct fw_card *card = device->card;
+	unsigned long flags;
+
+	/*
+	 * Take the card lock so we don't set this to NULL while a
+	 * FW_NODE_UPDATED callback is being handled or while the
+	 * bus manager work looks at this node.
+	 */
+	spin_lock_irqsave(&card->lock, flags);
+	device->node->data = NULL;
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	fw_node_put(device->node);
+	kfree(device->config_rom);
+	kfree(device);
+	fw_card_put(card);
+}
+
 static struct device_type fw_device_type = {
-	.release	= fw_device_release,
+	.release = fw_device_release,
 };
 
-static void fw_device_update(struct work_struct *work);
+static int update_unit(struct device *dev, void *data)
+{
+	struct fw_unit *unit = fw_unit(dev);
+	struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+	if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+		down(&dev->sem);
+		driver->update(unit);
+		up(&dev->sem);
+	}
+
+	return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+	struct fw_device *device =
+		container_of(work, struct fw_device, work.work);
+
+	fw_device_cdev_update(device);
+	device_for_each_child(&device->device, NULL, update_unit);
+}
 
 /*
  * If a device was pending for deletion because its node went away but its
@@ -735,12 +756,50 @@
 	return match;
 }
 
+enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
+
+void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
+{
+	struct fw_card *card = device->card;
+	__be32 data;
+	int rcode;
+
+	if (!card->broadcast_channel_allocated)
+		return;
+
+	if (device->bc_implemented == BC_UNKNOWN) {
+		rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
+				device->node_id, generation, device->max_speed,
+				CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+				&data, 4);
+		switch (rcode) {
+		case RCODE_COMPLETE:
+			if (data & cpu_to_be32(1 << 31)) {
+				device->bc_implemented = BC_IMPLEMENTED;
+				break;
+			}
+			/* else fall through to case address error */
+		case RCODE_ADDRESS_ERROR:
+			device->bc_implemented = BC_UNIMPLEMENTED;
+		}
+	}
+
+	if (device->bc_implemented == BC_IMPLEMENTED) {
+		data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
+				   BROADCAST_CHANNEL_VALID);
+		fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+				device->node_id, generation, device->max_speed,
+				CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+				&data, 4);
+	}
+}
+
 static void fw_device_init(struct work_struct *work)
 {
 	struct fw_device *device =
 		container_of(work, struct fw_device, work.work);
 	struct device *revived_dev;
-	int minor, err;
+	int minor, ret;
 
 	/*
 	 * All failure paths here set node->data to NULL, so that we
@@ -776,12 +835,12 @@
 
 	fw_device_get(device);
 	down_write(&fw_device_rwsem);
-	err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+	ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
 	      idr_get_new(&fw_device_idr, device, &minor) :
 	      -ENOMEM;
 	up_write(&fw_device_rwsem);
 
-	if (err < 0)
+	if (ret < 0)
 		goto error;
 
 	device->device.bus = &fw_bus_type;
@@ -828,6 +887,8 @@
 				  device->config_rom[3], device->config_rom[4],
 				  1 << device->max_speed);
 		device->config_rom_retries = 0;
+
+		fw_device_set_broadcast_channel(device, device->generation);
 	}
 
 	/*
@@ -851,29 +912,6 @@
 	put_device(&device->device);	/* our reference */
 }
 
-static int update_unit(struct device *dev, void *data)
-{
-	struct fw_unit *unit = fw_unit(dev);
-	struct fw_driver *driver = (struct fw_driver *)dev->driver;
-
-	if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
-		down(&dev->sem);
-		driver->update(unit);
-		up(&dev->sem);
-	}
-
-	return 0;
-}
-
-static void fw_device_update(struct work_struct *work)
-{
-	struct fw_device *device =
-		container_of(work, struct fw_device, work.work);
-
-	fw_device_cdev_update(device);
-	device_for_each_child(&device->device, NULL, update_unit);
-}
-
 enum {
 	REREAD_BIB_ERROR,
 	REREAD_BIB_GONE,
@@ -894,7 +932,7 @@
 		if (i == 0 && q == 0)
 			return REREAD_BIB_GONE;
 
-		if (i > device->config_rom_length || q != device->config_rom[i])
+		if (q != device->config_rom[i])
 			return REREAD_BIB_CHANGED;
 	}
 
@@ -1004,6 +1042,7 @@
 		device->node = fw_node_get(node);
 		device->node_id = node->node_id;
 		device->generation = card->generation;
+		mutex_init(&device->client_list_mutex);
 		INIT_LIST_HEAD(&device->client_list);
 
 		/*
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 8ef6ec2..9758893 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -19,10 +19,17 @@
 #ifndef __fw_device_h
 #define __fw_device_h
 
+#include <linux/device.h>
 #include <linux/fs.h>
-#include <linux/cdev.h>
 #include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
 #include <asm/atomic.h>
 
 enum fw_device_state {
@@ -38,6 +45,9 @@
 	struct attribute *attrs[11];
 };
 
+struct fw_node;
+struct fw_card;
+
 /*
  * Note, fw_device.generation always has to be read before fw_device.node_id.
  * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
@@ -61,13 +71,18 @@
 	int node_id;
 	int generation;
 	unsigned max_speed;
-	bool cmc;
 	struct fw_card *card;
 	struct device device;
+
+	struct mutex client_list_mutex;
 	struct list_head client_list;
+
 	u32 *config_rom;
 	size_t config_rom_length;
 	int config_rom_retries;
+	unsigned cmc:1;
+	unsigned bc_implemented:2;
+
 	struct delayed_work work;
 	struct fw_attribute_group attribute_group;
 };
@@ -96,6 +111,7 @@
 
 struct fw_device *fw_device_get_by_devt(dev_t devt);
 int fw_device_enable_phys_dma(struct fw_device *device);
+void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
 
 void fw_device_cdev_update(struct fw_device *device);
 void fw_device_cdev_remove(struct fw_device *device);
@@ -176,8 +192,7 @@
 	const struct fw_device_id *id_table;
 };
 
-static inline struct fw_driver *
-fw_driver(struct device_driver *drv)
+static inline struct fw_driver *fw_driver(struct device_driver *drv)
 {
 	return container_of(drv, struct fw_driver, driver);
 }
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index e14c03d..2baf100 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,5 +1,7 @@
 /*
- * Isochronous IO functionality
+ * Isochronous I/O functionality:
+ *   - Isochronous DMA context management
+ *   - Isochronous bus resource management (channels, bandwidth), client side
  *
  * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
  *
@@ -18,21 +20,25 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/firewire-constants.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
 
-#include "fw-transaction.h"
 #include "fw-topology.h"
-#include "fw-device.h"
+#include "fw-transaction.h"
 
-int
-fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
-		   int page_count, enum dma_data_direction direction)
+/*
+ * Isochronous DMA context management
+ */
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+		       int page_count, enum dma_data_direction direction)
 {
-	int i, j, retval = -ENOMEM;
+	int i, j;
 	dma_addr_t address;
 
 	buffer->page_count = page_count;
@@ -69,19 +75,21 @@
 	kfree(buffer->pages);
  out:
 	buffer->pages = NULL;
-	return retval;
+
+	return -ENOMEM;
 }
 
 int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
 {
 	unsigned long uaddr;
-	int i, retval;
+	int i, err;
 
 	uaddr = vma->vm_start;
 	for (i = 0; i < buffer->page_count; i++) {
-		retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
-		if (retval)
-			return retval;
+		err = vm_insert_page(vma, uaddr, buffer->pages[i]);
+		if (err)
+			return err;
+
 		uaddr += PAGE_SIZE;
 	}
 
@@ -105,14 +113,14 @@
 	buffer->pages = NULL;
 }
 
-struct fw_iso_context *
-fw_iso_context_create(struct fw_card *card, int type,
-		      int channel, int speed, size_t header_size,
-		      fw_iso_callback_t callback, void *callback_data)
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+		int type, int channel, int speed, size_t header_size,
+		fw_iso_callback_t callback, void *callback_data)
 {
 	struct fw_iso_context *ctx;
 
-	ctx = card->driver->allocate_iso_context(card, type, header_size);
+	ctx = card->driver->allocate_iso_context(card,
+						 type, channel, header_size);
 	if (IS_ERR(ctx))
 		return ctx;
 
@@ -134,25 +142,186 @@
 	card->driver->free_iso_context(ctx);
 }
 
-int
-fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
+int fw_iso_context_start(struct fw_iso_context *ctx,
+			 int cycle, int sync, int tags)
 {
 	return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
 }
 
-int
-fw_iso_context_queue(struct fw_iso_context *ctx,
-		     struct fw_iso_packet *packet,
-		     struct fw_iso_buffer *buffer,
-		     unsigned long payload)
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+			 struct fw_iso_packet *packet,
+			 struct fw_iso_buffer *buffer,
+			 unsigned long payload)
 {
 	struct fw_card *card = ctx->card;
 
 	return card->driver->queue_iso(ctx, packet, buffer, payload);
 }
 
-int
-fw_iso_context_stop(struct fw_iso_context *ctx)
+int fw_iso_context_stop(struct fw_iso_context *ctx)
 {
 	return ctx->card->driver->stop_iso(ctx);
 }
+
+/*
+ * Isochronous bus resource management (channels, bandwidth), client side
+ */
+
+static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
+			    int bandwidth, bool allocate)
+{
+	__be32 data[2];
+	int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
+
+	/*
+	 * On a 1394a IRM with low contention, try < 1 is enough.
+	 * On a 1394-1995 IRM, we need at least try < 2.
+	 * Let's just do try < 5.
+	 */
+	for (try = 0; try < 5; try++) {
+		new = allocate ? old - bandwidth : old + bandwidth;
+		if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
+			break;
+
+		data[0] = cpu_to_be32(old);
+		data[1] = cpu_to_be32(new);
+		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+				irm_id, generation, SCODE_100,
+				CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
+				data, sizeof(data))) {
+		case RCODE_GENERATION:
+			/* A generation change frees all bandwidth. */
+			return allocate ? -EAGAIN : bandwidth;
+
+		case RCODE_COMPLETE:
+			if (be32_to_cpup(data) == old)
+				return bandwidth;
+
+			old = be32_to_cpup(data);
+			/* Fall through. */
+		}
+	}
+
+	return -EIO;
+}
+
+static int manage_channel(struct fw_card *card, int irm_id, int generation,
+			  u32 channels_mask, u64 offset, bool allocate)
+{
+	__be32 data[2], c, all, old;
+	int i, retry = 5;
+
+	old = all = allocate ? cpu_to_be32(~0) : 0;
+
+	for (i = 0; i < 32; i++) {
+		if (!(channels_mask & 1 << i))
+			continue;
+
+		c = cpu_to_be32(1 << (31 - i));
+		if ((old & c) != (all & c))
+			continue;
+
+		data[0] = old;
+		data[1] = old ^ c;
+		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+					   irm_id, generation, SCODE_100,
+					   offset, data, sizeof(data))) {
+		case RCODE_GENERATION:
+			/* A generation change frees all channels. */
+			return allocate ? -EAGAIN : i;
+
+		case RCODE_COMPLETE:
+			if (data[0] == old)
+				return i;
+
+			old = data[0];
+
+			/* Is the IRM 1394a-2000 compliant? */
+			if ((data[0] & c) == (data[1] & c))
+				continue;
+
+			/* 1394-1995 IRM, fall through to retry. */
+		default:
+			if (retry--)
+				i--;
+		}
+	}
+
+	return -EIO;
+}
+
+static void deallocate_channel(struct fw_card *card, int irm_id,
+			       int generation, int channel)
+{
+	u32 mask;
+	u64 offset;
+
+	mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
+	offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
+				CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
+
+	manage_channel(card, irm_id, generation, mask, offset, false);
+}
+
+/**
+ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ *
+ * In parameters: card, generation, channels_mask, bandwidth, allocate
+ * Out parameters: channel, bandwidth
+ * This function blocks (sleeps) during communication with the IRM.
+ *
+ * Allocates or deallocates at most one channel out of channels_mask.
+ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
+ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
+ * channel 0 and LSB for channel 63.)
+ * Allocates or deallocates as many bandwidth allocation units as specified.
+ *
+ * Returns channel < 0 if no channel was allocated or deallocated.
+ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
+ *
+ * If generation is stale, deallocations succeed but allocations fail with
+ * channel = -EAGAIN.
+ *
+ * If channel allocation fails, no bandwidth will be allocated either.
+ * If bandwidth allocation fails, no channel will be allocated either.
+ * But deallocations of channel and bandwidth are tried independently
+ * of each other's success.
+ */
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+			    u64 channels_mask, int *channel, int *bandwidth,
+			    bool allocate)
+{
+	u32 channels_hi = channels_mask;	/* channels 31...0 */
+	u32 channels_lo = channels_mask >> 32;	/* channels 63...32 */
+	int irm_id, ret, c = -EINVAL;
+
+	spin_lock_irq(&card->lock);
+	irm_id = card->irm_node->node_id;
+	spin_unlock_irq(&card->lock);
+
+	if (channels_hi)
+		c = manage_channel(card, irm_id, generation, channels_hi,
+		    CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+	if (channels_lo && c < 0) {
+		c = manage_channel(card, irm_id, generation, channels_lo,
+		    CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+		if (c >= 0)
+			c += 32;
+	}
+	*channel = c;
+
+	if (allocate && channels_mask != 0 && c < 0)
+		*bandwidth = 0;
+
+	if (*bandwidth == 0)
+		return;
+
+	ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+	if (ret < 0)
+		*bandwidth = 0;
+
+	if (allocate && ret < 0 && c >= 0) {
+		deallocate_channel(card, irm_id, generation, c);
+		*channel = ret;
+	}
+}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 6d19828..1180d0b 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -205,6 +205,7 @@
 
 	u32 it_context_mask;
 	struct iso_context *it_context_list;
+	u64 ir_context_channels;
 	u32 ir_context_mask;
 	struct iso_context *ir_context_list;
 };
@@ -441,9 +442,8 @@
 	reg_read(ohci, OHCI1394_Version);
 }
 
-static int
-ohci_update_phy_reg(struct fw_card *card, int addr,
-		    int clear_bits, int set_bits)
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+			       int clear_bits, int set_bits)
 {
 	struct fw_ohci *ohci = fw_ohci(card);
 	u32 val, old;
@@ -658,8 +658,8 @@
 	}
 }
 
-static int
-ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
+static int ar_context_init(struct ar_context *ctx,
+			   struct fw_ohci *ohci, u32 regs)
 {
 	struct ar_buffer ab;
 
@@ -690,8 +690,7 @@
 	flush_writes(ctx->ohci);
 }
 
-static struct descriptor *
-find_branch_descriptor(struct descriptor *d, int z)
+static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
 {
 	int b, key;
 
@@ -751,8 +750,7 @@
  * Allocate a new buffer and add it to the list of free buffers for this
  * context.  Must be called with ohci->lock held.
  */
-static int
-context_add_buffer(struct context *ctx)
+static int context_add_buffer(struct context *ctx)
 {
 	struct descriptor_buffer *desc;
 	dma_addr_t uninitialized_var(bus_addr);
@@ -781,9 +779,8 @@
 	return 0;
 }
 
-static int
-context_init(struct context *ctx, struct fw_ohci *ohci,
-	     u32 regs, descriptor_callback_t callback)
+static int context_init(struct context *ctx, struct fw_ohci *ohci,
+			u32 regs, descriptor_callback_t callback)
 {
 	ctx->ohci = ohci;
 	ctx->regs = regs;
@@ -814,8 +811,7 @@
 	return 0;
 }
 
-static void
-context_release(struct context *ctx)
+static void context_release(struct context *ctx)
 {
 	struct fw_card *card = &ctx->ohci->card;
 	struct descriptor_buffer *desc, *tmp;
@@ -827,8 +823,8 @@
 }
 
 /* Must be called with ohci->lock held */
-static struct descriptor *
-context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+static struct descriptor *context_get_descriptors(struct context *ctx,
+						  int z, dma_addr_t *d_bus)
 {
 	struct descriptor *d = NULL;
 	struct descriptor_buffer *desc = ctx->buffer_tail;
@@ -912,8 +908,8 @@
  * Must always be called with the ochi->lock held to ensure proper
  * generation handling and locking around packet queue manipulation.
  */
-static int
-at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
+static int at_context_queue_packet(struct context *ctx,
+				   struct fw_packet *packet)
 {
 	struct fw_ohci *ohci = ctx->ohci;
 	dma_addr_t d_bus, uninitialized_var(payload_bus);
@@ -940,7 +936,9 @@
 	 */
 
 	header = (__le32 *) &d[1];
-	if (packet->header_length > 8) {
+	switch (packet->header_length) {
+	case 16:
+	case 12:
 		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
 					(packet->speed << 16));
 		header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@@ -954,12 +952,27 @@
 			header[3] = (__force __le32) packet->header[3];
 
 		d[0].req_count = cpu_to_le16(packet->header_length);
-	} else {
+		break;
+
+	case 8:
 		header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
 					(packet->speed << 16));
 		header[1] = cpu_to_le32(packet->header[0]);
 		header[2] = cpu_to_le32(packet->header[1]);
 		d[0].req_count = cpu_to_le16(12);
+		break;
+
+	case 4:
+		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+					(packet->speed << 16));
+		header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+		d[0].req_count = cpu_to_le16(8);
+		break;
+
+	default:
+		/* BUG(); */
+		packet->ack = RCODE_SEND_ERROR;
+		return -1;
 	}
 
 	driver_data = (struct driver_data *) &d[3];
@@ -1095,8 +1108,8 @@
 #define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
 #define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
 
-static void
-handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+static void handle_local_rom(struct fw_ohci *ohci,
+			     struct fw_packet *packet, u32 csr)
 {
 	struct fw_packet response;
 	int tcode, length, i;
@@ -1122,8 +1135,8 @@
 	fw_core_handle_response(&ohci->card, &response);
 }
 
-static void
-handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+static void handle_local_lock(struct fw_ohci *ohci,
+			      struct fw_packet *packet, u32 csr)
 {
 	struct fw_packet response;
 	int tcode, length, ext_tcode, sel;
@@ -1164,8 +1177,7 @@
 	fw_core_handle_response(&ohci->card, &response);
 }
 
-static void
-handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct context *ctx, struct fw_packet *packet)
 {
 	u64 offset;
 	u32 csr;
@@ -1205,11 +1217,10 @@
 	}
 }
 
-static void
-at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
 {
 	unsigned long flags;
-	int retval;
+	int ret;
 
 	spin_lock_irqsave(&ctx->ohci->lock, flags);
 
@@ -1220,10 +1231,10 @@
 		return;
 	}
 
-	retval = at_context_queue_packet(ctx, packet);
+	ret = at_context_queue_packet(ctx, packet);
 	spin_unlock_irqrestore(&ctx->ohci->lock, flags);
 
-	if (retval < 0)
+	if (ret < 0)
 		packet->callback(packet, &ctx->ohci->card, packet->ack);
 
 }
@@ -1590,12 +1601,12 @@
 	return 0;
 }
 
-static int
-ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
+static int ohci_set_config_rom(struct fw_card *card,
+			       u32 *config_rom, size_t length)
 {
 	struct fw_ohci *ohci;
 	unsigned long flags;
-	int retval = -EBUSY;
+	int ret = -EBUSY;
 	__be32 *next_config_rom;
 	dma_addr_t uninitialized_var(next_config_rom_bus);
 
@@ -1649,7 +1660,7 @@
 
 		reg_write(ohci, OHCI1394_ConfigROMmap,
 			  ohci->next_config_rom_bus);
-		retval = 0;
+		ret = 0;
 	}
 
 	spin_unlock_irqrestore(&ohci->lock, flags);
@@ -1661,13 +1672,13 @@
 	 * controller could need to access it before the bus reset
 	 * takes effect.
 	 */
-	if (retval == 0)
+	if (ret == 0)
 		fw_core_initiate_bus_reset(&ohci->card, 1);
 	else
 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
 				  next_config_rom, next_config_rom_bus);
 
-	return retval;
+	return ret;
 }
 
 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -1689,7 +1700,7 @@
 	struct fw_ohci *ohci = fw_ohci(card);
 	struct context *ctx = &ohci->at_request_ctx;
 	struct driver_data *driver_data = packet->driver_data;
-	int retval = -ENOENT;
+	int ret = -ENOENT;
 
 	tasklet_disable(&ctx->tasklet);
 
@@ -1704,23 +1715,22 @@
 	driver_data->packet = NULL;
 	packet->ack = RCODE_CANCELLED;
 	packet->callback(packet, &ohci->card, packet->ack);
-	retval = 0;
-
+	ret = 0;
  out:
 	tasklet_enable(&ctx->tasklet);
 
-	return retval;
+	return ret;
 }
 
-static int
-ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
+static int ohci_enable_phys_dma(struct fw_card *card,
+				int node_id, int generation)
 {
 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
 	return 0;
 #else
 	struct fw_ohci *ohci = fw_ohci(card);
 	unsigned long flags;
-	int n, retval = 0;
+	int n, ret = 0;
 
 	/*
 	 * FIXME:  Make sure this bitmask is cleared when we clear the busReset
@@ -1730,7 +1740,7 @@
 	spin_lock_irqsave(&ohci->lock, flags);
 
 	if (ohci->generation != generation) {
-		retval = -ESTALE;
+		ret = -ESTALE;
 		goto out;
 	}
 
@@ -1748,12 +1758,12 @@
 	flush_writes(ohci);
  out:
 	spin_unlock_irqrestore(&ohci->lock, flags);
-	return retval;
+
+	return ret;
 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
 }
 
-static u64
-ohci_get_bus_time(struct fw_card *card)
+static u64 ohci_get_bus_time(struct fw_card *card)
 {
 	struct fw_ohci *ohci = fw_ohci(card);
 	u32 cycle_time;
@@ -1765,6 +1775,28 @@
 	return bus_time;
 }
 
+static void copy_iso_headers(struct iso_context *ctx, void *p)
+{
+	int i = ctx->header_length;
+
+	if (i + ctx->base.header_size > PAGE_SIZE)
+		return;
+
+	/*
+	 * The iso header is byteswapped to little endian by
+	 * the controller, but the remaining header quadlets
+	 * are big endian.  We want to present all the headers
+	 * as big endian, so we have to swap the first quadlet.
+	 */
+	if (ctx->base.header_size > 0)
+		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+	if (ctx->base.header_size > 4)
+		*(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
+	if (ctx->base.header_size > 8)
+		memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
+	ctx->header_length += ctx->base.header_size;
+}
+
 static int handle_ir_dualbuffer_packet(struct context *context,
 				       struct descriptor *d,
 				       struct descriptor *last)
@@ -1775,7 +1807,6 @@
 	__le32 *ir_header;
 	size_t header_length;
 	void *p, *end;
-	int i;
 
 	if (db->first_res_count != 0 && db->second_res_count != 0) {
 		if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@@ -1788,25 +1819,14 @@
 	header_length = le16_to_cpu(db->first_req_count) -
 		le16_to_cpu(db->first_res_count);
 
-	i = ctx->header_length;
 	p = db + 1;
 	end = p + header_length;
-	while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
-		/*
-		 * The iso header is byteswapped to little endian by
-		 * the controller, but the remaining header quadlets
-		 * are big endian.  We want to present all the headers
-		 * as big endian, so we have to swap the first
-		 * quadlet.
-		 */
-		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
-		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
-		i += ctx->base.header_size;
+	while (p < end) {
+		copy_iso_headers(ctx, p);
 		ctx->excess_bytes +=
 			(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
-		p += ctx->base.header_size + 4;
+		p += max(ctx->base.header_size, (size_t)8);
 	}
-	ctx->header_length = i;
 
 	ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
 		le16_to_cpu(db->second_res_count);
@@ -1832,7 +1852,6 @@
 	struct descriptor *pd;
 	__le32 *ir_header;
 	void *p;
-	int i;
 
 	for (pd = d; pd <= last; pd++) {
 		if (pd->transfer_status)
@@ -1842,21 +1861,8 @@
 		/* Descriptor(s) not done yet, stop iteration */
 		return 0;
 
-	i   = ctx->header_length;
-	p   = last + 1;
-
-	if (ctx->base.header_size > 0 &&
-			i + ctx->base.header_size <= PAGE_SIZE) {
-		/*
-		 * The iso header is byteswapped to little endian by
-		 * the controller, but the remaining header quadlets
-		 * are big endian.  We want to present all the headers
-		 * as big endian, so we have to swap the first quadlet.
-		 */
-		*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
-		memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
-		ctx->header_length += ctx->base.header_size;
-	}
+	p = last + 1;
+	copy_iso_headers(ctx, p);
 
 	if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
 		ir_header = (__le32 *) p;
@@ -1888,21 +1894,24 @@
 	return 1;
 }
 
-static struct fw_iso_context *
-ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
+				int type, int channel, size_t header_size)
 {
 	struct fw_ohci *ohci = fw_ohci(card);
 	struct iso_context *ctx, *list;
 	descriptor_callback_t callback;
+	u64 *channels, dont_care = ~0ULL;
 	u32 *mask, regs;
 	unsigned long flags;
-	int index, retval = -ENOMEM;
+	int index, ret = -ENOMEM;
 
 	if (type == FW_ISO_CONTEXT_TRANSMIT) {
+		channels = &dont_care;
 		mask = &ohci->it_context_mask;
 		list = ohci->it_context_list;
 		callback = handle_it_packet;
 	} else {
+		channels = &ohci->ir_context_channels;
 		mask = &ohci->ir_context_mask;
 		list = ohci->ir_context_list;
 		if (ohci->use_dualbuffer)
@@ -1912,9 +1921,11 @@
 	}
 
 	spin_lock_irqsave(&ohci->lock, flags);
-	index = ffs(*mask) - 1;
-	if (index >= 0)
+	index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+	if (index >= 0) {
+		*channels &= ~(1ULL << channel);
 		*mask &= ~(1 << index);
+	}
 	spin_unlock_irqrestore(&ohci->lock, flags);
 
 	if (index < 0)
@@ -1932,8 +1943,8 @@
 	if (ctx->header == NULL)
 		goto out;
 
-	retval = context_init(&ctx->context, ohci, regs, callback);
-	if (retval < 0)
+	ret = context_init(&ctx->context, ohci, regs, callback);
+	if (ret < 0)
 		goto out_with_header;
 
 	return &ctx->base;
@@ -1945,7 +1956,7 @@
 	*mask |= 1 << index;
 	spin_unlock_irqrestore(&ohci->lock, flags);
 
-	return ERR_PTR(retval);
+	return ERR_PTR(ret);
 }
 
 static int ohci_start_iso(struct fw_iso_context *base,
@@ -2024,16 +2035,16 @@
 	} else {
 		index = ctx - ohci->ir_context_list;
 		ohci->ir_context_mask |= 1 << index;
+		ohci->ir_context_channels |= 1ULL << base->channel;
 	}
 
 	spin_unlock_irqrestore(&ohci->lock, flags);
 }
 
-static int
-ohci_queue_iso_transmit(struct fw_iso_context *base,
-			struct fw_iso_packet *packet,
-			struct fw_iso_buffer *buffer,
-			unsigned long payload)
+static int ohci_queue_iso_transmit(struct fw_iso_context *base,
+				   struct fw_iso_packet *packet,
+				   struct fw_iso_buffer *buffer,
+				   unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
 	struct descriptor *d, *last, *pd;
@@ -2128,11 +2139,10 @@
 	return 0;
 }
 
-static int
-ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
-				  struct fw_iso_packet *packet,
-				  struct fw_iso_buffer *buffer,
-				  unsigned long payload)
+static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+					     struct fw_iso_packet *packet,
+					     struct fw_iso_buffer *buffer,
+					     unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
 	struct db_descriptor *db = NULL;
@@ -2151,11 +2161,11 @@
 	z = 2;
 
 	/*
-	 * The OHCI controller puts the status word in the header
-	 * buffer too, so we need 4 extra bytes per packet.
+	 * The OHCI controller puts the isochronous header and trailer in the
+	 * buffer, so we need at least 8 bytes.
 	 */
 	packet_count = p->header_length / ctx->base.header_size;
-	header_size = packet_count * (ctx->base.header_size + 4);
+	header_size = packet_count * max(ctx->base.header_size, (size_t)8);
 
 	/* Get header size in number of descriptors. */
 	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2173,7 +2183,8 @@
 		db = (struct db_descriptor *) d;
 		db->control = cpu_to_le16(DESCRIPTOR_STATUS |
 					  DESCRIPTOR_BRANCH_ALWAYS);
-		db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+		db->first_size =
+		    cpu_to_le16(max(ctx->base.header_size, (size_t)8));
 		if (p->skip && rest == p->payload_length) {
 			db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
 			db->first_req_count = db->first_size;
@@ -2208,11 +2219,10 @@
 	return 0;
 }
 
-static int
-ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
-					 struct fw_iso_packet *packet,
-					 struct fw_iso_buffer *buffer,
-					 unsigned long payload)
+static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+					struct fw_iso_packet *packet,
+					struct fw_iso_buffer *buffer,
+					unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
 	struct descriptor *d = NULL, *pd = NULL;
@@ -2223,11 +2233,11 @@
 	int page, offset, packet_count, header_size, payload_per_buffer;
 
 	/*
-	 * The OHCI controller puts the status word in the
-	 * buffer too, so we need 4 extra bytes per packet.
+	 * The OHCI controller puts the isochronous header and trailer in the
+	 * buffer, so we need at least 8 bytes.
 	 */
 	packet_count = p->header_length / ctx->base.header_size;
-	header_size  = ctx->base.header_size + 4;
+	header_size  = max(ctx->base.header_size, (size_t)8);
 
 	/* Get header size in number of descriptors. */
 	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2286,29 +2296,27 @@
 	return 0;
 }
 
-static int
-ohci_queue_iso(struct fw_iso_context *base,
-	       struct fw_iso_packet *packet,
-	       struct fw_iso_buffer *buffer,
-	       unsigned long payload)
+static int ohci_queue_iso(struct fw_iso_context *base,
+			  struct fw_iso_packet *packet,
+			  struct fw_iso_buffer *buffer,
+			  unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
 	unsigned long flags;
-	int retval;
+	int ret;
 
 	spin_lock_irqsave(&ctx->context.ohci->lock, flags);
 	if (base->type == FW_ISO_CONTEXT_TRANSMIT)
-		retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
+		ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
 	else if (ctx->context.ohci->use_dualbuffer)
-		retval = ohci_queue_iso_receive_dualbuffer(base, packet,
-							 buffer, payload);
+		ret = ohci_queue_iso_receive_dualbuffer(base, packet,
+							buffer, payload);
 	else
-		retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
-								buffer,
-								payload);
+		ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+							buffer, payload);
 	spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
 
-	return retval;
+	return ret;
 }
 
 static const struct fw_card_driver ohci_driver = {
@@ -2357,8 +2365,8 @@
 #define ohci_pmac_off(dev)
 #endif /* CONFIG_PPC_PMAC */
 
-static int __devinit
-pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit pci_probe(struct pci_dev *dev,
+			       const struct pci_device_id *ent)
 {
 	struct fw_ohci *ohci;
 	u32 bus_options, max_receive, link_speed, version;
@@ -2440,6 +2448,7 @@
 	ohci->it_context_list = kzalloc(size, GFP_KERNEL);
 
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+	ohci->ir_context_channels = ~0ULL;
 	ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
 	size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@@ -2467,11 +2476,12 @@
 		reg_read(ohci, OHCI1394_GUIDLo);
 
 	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
-	if (err < 0)
+	if (err)
 		goto fail_self_id;
 
 	fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
 		  dev_name(&dev->dev), version >> 16, version & 0xff);
+
 	return 0;
 
  fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index c71c441..2bcf515 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -392,20 +392,18 @@
 	}
 };
 
-static void
-free_orb(struct kref *kref)
+static void free_orb(struct kref *kref)
 {
 	struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
 
 	kfree(orb);
 }
 
-static void
-sbp2_status_write(struct fw_card *card, struct fw_request *request,
-		  int tcode, int destination, int source,
-		  int generation, int speed,
-		  unsigned long long offset,
-		  void *payload, size_t length, void *callback_data)
+static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
+			      int tcode, int destination, int source,
+			      int generation, int speed,
+			      unsigned long long offset,
+			      void *payload, size_t length, void *callback_data)
 {
 	struct sbp2_logical_unit *lu = callback_data;
 	struct sbp2_orb *orb;
@@ -451,9 +449,8 @@
 	fw_send_response(card, request, RCODE_COMPLETE);
 }
 
-static void
-complete_transaction(struct fw_card *card, int rcode,
-		     void *payload, size_t length, void *data)
+static void complete_transaction(struct fw_card *card, int rcode,
+				 void *payload, size_t length, void *data)
 {
 	struct sbp2_orb *orb = data;
 	unsigned long flags;
@@ -482,9 +479,8 @@
 	kref_put(&orb->kref, free_orb);
 }
 
-static void
-sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
-	      int node_id, int generation, u64 offset)
+static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
+			  int node_id, int generation, u64 offset)
 {
 	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	unsigned long flags;
@@ -531,8 +527,8 @@
 	return retval;
 }
 
-static void
-complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+static void complete_management_orb(struct sbp2_orb *base_orb,
+				    struct sbp2_status *status)
 {
 	struct sbp2_management_orb *orb =
 		container_of(base_orb, struct sbp2_management_orb, base);
@@ -542,10 +538,9 @@
 	complete(&orb->done);
 }
 
-static int
-sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
-			 int generation, int function, int lun_or_login_id,
-			 void *response)
+static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+				    int generation, int function,
+				    int lun_or_login_id, void *response)
 {
 	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	struct sbp2_management_orb *orb;
@@ -652,9 +647,8 @@
 			   &d, sizeof(d));
 }
 
-static void
-complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
-				   void *payload, size_t length, void *data)
+static void complete_agent_reset_write_no_wait(struct fw_card *card,
+		int rcode, void *payload, size_t length, void *data)
 {
 	kfree(data);
 }
@@ -1299,8 +1293,7 @@
 				 sizeof(orb->page_table), DMA_TO_DEVICE);
 }
 
-static unsigned int
-sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
 {
 	int sam_status;
 
@@ -1337,8 +1330,8 @@
 	}
 }
 
-static void
-complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+static void complete_command_orb(struct sbp2_orb *base_orb,
+				 struct sbp2_status *status)
 {
 	struct sbp2_command_orb *orb =
 		container_of(base_orb, struct sbp2_command_orb, base);
@@ -1384,9 +1377,8 @@
 	orb->done(orb->cmd);
 }
 
-static int
-sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
-		     struct sbp2_logical_unit *lu)
+static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
+		struct fw_device *device, struct sbp2_logical_unit *lu)
 {
 	struct scatterlist *sg = scsi_sglist(orb->cmd);
 	int i, n;
@@ -1584,9 +1576,8 @@
  * This is the concatenation of target port identifier and logical unit
  * identifier as per SAM-2...SAM-4 annex A.
  */
-static ssize_t
-sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
-			    char *buf)
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct sbp2_logical_unit *lu;
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 8dd6703..d0deecc 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -314,9 +314,8 @@
 				   struct fw_node * node,
 				   struct fw_node * parent);
 
-static void
-for_each_fw_node(struct fw_card *card, struct fw_node *root,
-		 fw_node_callback_t callback)
+static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
+			     fw_node_callback_t callback)
 {
 	struct list_head list;
 	struct fw_node *node, *next, *child, *parent;
@@ -349,9 +348,8 @@
 		fw_node_put(node);
 }
 
-static void
-report_lost_node(struct fw_card *card,
-		 struct fw_node *node, struct fw_node *parent)
+static void report_lost_node(struct fw_card *card,
+			     struct fw_node *node, struct fw_node *parent)
 {
 	fw_node_event(card, node, FW_NODE_DESTROYED);
 	fw_node_put(node);
@@ -360,9 +358,8 @@
 	card->bm_retries = 0;
 }
 
-static void
-report_found_node(struct fw_card *card,
-		  struct fw_node *node, struct fw_node *parent)
+static void report_found_node(struct fw_card *card,
+			      struct fw_node *node, struct fw_node *parent)
 {
 	int b_path = (node->phy_speed == SCODE_BETA);
 
@@ -415,8 +412,7 @@
  * found, lost or updated.  Update the nodes in the card topology tree
  * as we go.
  */
-static void
-update_tree(struct fw_card *card, struct fw_node *root)
+static void update_tree(struct fw_card *card, struct fw_node *root)
 {
 	struct list_head list0, list1;
 	struct fw_node *node0, *node1, *next1;
@@ -497,8 +493,8 @@
 	}
 }
 
-static void
-update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
+static void update_topology_map(struct fw_card *card,
+				u32 *self_ids, int self_id_count)
 {
 	int node_count;
 
@@ -510,10 +506,8 @@
 	fw_compute_block_crc(card->topology_map);
 }
 
-void
-fw_core_handle_bus_reset(struct fw_card *card,
-			 int node_id, int generation,
-			 int self_id_count, u32 * self_ids)
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
+			      int self_id_count, u32 *self_ids)
 {
 	struct fw_node *local_node;
 	unsigned long flags;
@@ -532,6 +526,7 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 
+	card->broadcast_channel_allocated = false;
 	card->node_id = node_id;
 	/*
 	 * Update node_id before generation to prevent anybody from using
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index addb9f8..3c497bb 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -19,6 +19,11 @@
 #ifndef __fw_topology_h
 #define __fw_topology_h
 
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+
 enum {
 	FW_NODE_CREATED,
 	FW_NODE_UPDATED,
@@ -51,26 +56,22 @@
 	struct fw_node *ports[0];
 };
 
-static inline struct fw_node *
-fw_node_get(struct fw_node *node)
+static inline struct fw_node *fw_node_get(struct fw_node *node)
 {
 	atomic_inc(&node->ref_count);
 
 	return node;
 }
 
-static inline void
-fw_node_put(struct fw_node *node)
+static inline void fw_node_put(struct fw_node *node)
 {
 	if (atomic_dec_and_test(&node->ref_count))
 		kfree(node);
 }
 
-void
-fw_destroy_nodes(struct fw_card *card);
+struct fw_card;
+void fw_destroy_nodes(struct fw_card *card);
 
-int
-fw_compute_block_crc(u32 *block);
-
+int fw_compute_block_crc(u32 *block);
 
 #endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 699ac04..283dac6 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -64,10 +64,8 @@
 #define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
 #define PHY_IDENTIFIER(id)		((id) << 30)
 
-static int
-close_transaction(struct fw_transaction *transaction,
-		  struct fw_card *card, int rcode,
-		  u32 *payload, size_t length)
+static int close_transaction(struct fw_transaction *transaction,
+			     struct fw_card *card, int rcode)
 {
 	struct fw_transaction *t;
 	unsigned long flags;
@@ -83,7 +81,7 @@
 	spin_unlock_irqrestore(&card->lock, flags);
 
 	if (&t->link != &card->transaction_list) {
-		t->callback(card, rcode, payload, length, t->callback_data);
+		t->callback(card, rcode, NULL, 0, t->callback_data);
 		return 0;
 	}
 
@@ -94,9 +92,8 @@
  * Only valid for transactions that are potentially pending (ie have
  * been sent).
  */
-int
-fw_cancel_transaction(struct fw_card *card,
-		      struct fw_transaction *transaction)
+int fw_cancel_transaction(struct fw_card *card,
+			  struct fw_transaction *transaction)
 {
 	/*
 	 * Cancel the packet transmission if it's still queued.  That
@@ -112,20 +109,19 @@
 	 * if the transaction is still pending and remove it in that case.
 	 */
 
-	return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
+	return close_transaction(transaction, card, RCODE_CANCELLED);
 }
 EXPORT_SYMBOL(fw_cancel_transaction);
 
-static void
-transmit_complete_callback(struct fw_packet *packet,
-			   struct fw_card *card, int status)
+static void transmit_complete_callback(struct fw_packet *packet,
+				       struct fw_card *card, int status)
 {
 	struct fw_transaction *t =
 	    container_of(packet, struct fw_transaction, packet);
 
 	switch (status) {
 	case ACK_COMPLETE:
-		close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
+		close_transaction(t, card, RCODE_COMPLETE);
 		break;
 	case ACK_PENDING:
 		t->timestamp = packet->timestamp;
@@ -133,31 +129,42 @@
 	case ACK_BUSY_X:
 	case ACK_BUSY_A:
 	case ACK_BUSY_B:
-		close_transaction(t, card, RCODE_BUSY, NULL, 0);
+		close_transaction(t, card, RCODE_BUSY);
 		break;
 	case ACK_DATA_ERROR:
-		close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
+		close_transaction(t, card, RCODE_DATA_ERROR);
 		break;
 	case ACK_TYPE_ERROR:
-		close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
+		close_transaction(t, card, RCODE_TYPE_ERROR);
 		break;
 	default:
 		/*
 		 * In this case the ack is really a juju specific
 		 * rcode, so just forward that to the callback.
 		 */
-		close_transaction(t, card, status, NULL, 0);
+		close_transaction(t, card, status);
 		break;
 	}
 }
 
-static void
-fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
 		int destination_id, int source_id, int generation, int speed,
 		unsigned long long offset, void *payload, size_t length)
 {
 	int ext_tcode;
 
+	if (tcode == TCODE_STREAM_DATA) {
+		packet->header[0] =
+			HEADER_DATA_LENGTH(length) |
+			destination_id |
+			HEADER_TCODE(TCODE_STREAM_DATA);
+		packet->header_length = 4;
+		packet->payload = payload;
+		packet->payload_length = length;
+
+		goto common;
+	}
+
 	if (tcode > 0x10) {
 		ext_tcode = tcode & ~0x10;
 		tcode = TCODE_LOCK_REQUEST;
@@ -204,7 +211,7 @@
 		packet->payload_length = 0;
 		break;
 	}
-
+ common:
 	packet->speed = speed;
 	packet->generation = generation;
 	packet->ack = 0;
@@ -246,13 +253,14 @@
  * @param callback function to be called when the transaction is completed
  * @param callback_data pointer to arbitrary data, which will be
  *   passed to the callback
+ *
+ * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * needs to synthesize @destination_id with fw_stream_packet_destination_id().
  */
-void
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
-		int tcode, int destination_id, int generation, int speed,
-		unsigned long long offset,
-		void *payload, size_t length,
-		fw_transaction_callback_t callback, void *callback_data)
+void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+		     int destination_id, int generation, int speed,
+		     unsigned long long offset, void *payload, size_t length,
+		     fw_transaction_callback_t callback, void *callback_data)
 {
 	unsigned long flags;
 	int tlabel;
@@ -322,16 +330,16 @@
  * Returns the RCODE.
  */
 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-		int generation, int speed, unsigned long long offset,
-		void *data, size_t length)
+		       int generation, int speed, unsigned long long offset,
+		       void *payload, size_t length)
 {
 	struct transaction_callback_data d;
 	struct fw_transaction t;
 
 	init_completion(&d.done);
-	d.payload = data;
+	d.payload = payload;
 	fw_send_request(card, &t, tcode, destination_id, generation, speed,
-			offset, data, length, transaction_callback, &d);
+			offset, payload, length, transaction_callback, &d);
 	wait_for_completion(&d.done);
 
 	return d.rcode;
@@ -399,9 +407,8 @@
 	}
 }
 
-static struct fw_address_handler *
-lookup_overlapping_address_handler(struct list_head *list,
-				   unsigned long long offset, size_t length)
+static struct fw_address_handler *lookup_overlapping_address_handler(
+	struct list_head *list, unsigned long long offset, size_t length)
 {
 	struct fw_address_handler *handler;
 
@@ -414,9 +421,8 @@
 	return NULL;
 }
 
-static struct fw_address_handler *
-lookup_enclosing_address_handler(struct list_head *list,
-				 unsigned long long offset, size_t length)
+static struct fw_address_handler *lookup_enclosing_address_handler(
+	struct list_head *list, unsigned long long offset, size_t length)
 {
 	struct fw_address_handler *handler;
 
@@ -449,36 +455,44 @@
 #endif  /*  0  */
 
 /**
- * Allocate a range of addresses in the node space of the OHCI
- * controller.  When a request is received that falls within the
- * specified address range, the specified callback is invoked.  The
- * parameters passed to the callback give the details of the
- * particular request.
+ * fw_core_add_address_handler - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
+ *
+ * region->start, ->end, and handler->length have to be quadlet-aligned.
+ *
+ * When a request is received that falls within the specified address range,
+ * the specified callback is invoked.  The parameters passed to the callback
+ * give the details of the particular request.
  *
  * Return value:  0 on success, non-zero otherwise.
  * The start offset of the handler's address region is determined by
  * fw_core_add_address_handler() and is returned in handler->offset.
- * The offset is quadlet-aligned.
  */
-int
-fw_core_add_address_handler(struct fw_address_handler *handler,
-			    const struct fw_address_region *region)
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+				const struct fw_address_region *region)
 {
 	struct fw_address_handler *other;
 	unsigned long flags;
 	int ret = -EBUSY;
 
+	if (region->start & 0xffff000000000003ULL ||
+	    region->end   & 0xffff000000000003ULL ||
+	    region->start >= region->end ||
+	    handler->length & 3 ||
+	    handler->length == 0)
+		return -EINVAL;
+
 	spin_lock_irqsave(&address_handler_lock, flags);
 
-	handler->offset = roundup(region->start, 4);
+	handler->offset = region->start;
 	while (handler->offset + handler->length <= region->end) {
 		other =
 		    lookup_overlapping_address_handler(&address_handler_list,
 						       handler->offset,
 						       handler->length);
 		if (other != NULL) {
-			handler->offset =
-			    roundup(other->offset + other->length, 4);
+			handler->offset += other->length;
 		} else {
 			list_add_tail(&handler->link, &address_handler_list);
 			ret = 0;
@@ -493,12 +507,7 @@
 EXPORT_SYMBOL(fw_core_add_address_handler);
 
 /**
- * Deallocate a range of addresses allocated with fw_allocate.  This
- * will call the associated callback one last time with a the special
- * tcode TCODE_DEALLOCATE, to let the client destroy the registered
- * callback data.  For convenience, the callback parameters offset and
- * length are set to the start and the length respectively for the
- * deallocated region, payload is set to NULL.
+ * fw_core_remove_address_handler - unregister an address handler
  */
 void fw_core_remove_address_handler(struct fw_address_handler *handler)
 {
@@ -518,9 +527,8 @@
 	u32 data[0];
 };
 
-static void
-free_response_callback(struct fw_packet *packet,
-		       struct fw_card *card, int status)
+static void free_response_callback(struct fw_packet *packet,
+				   struct fw_card *card, int status)
 {
 	struct fw_request *request;
 
@@ -528,9 +536,8 @@
 	kfree(request);
 }
 
-void
-fw_fill_response(struct fw_packet *response, u32 *request_header,
-		 int rcode, void *payload, size_t length)
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+		      int rcode, void *payload, size_t length)
 {
 	int tcode, tlabel, extended_tcode, source, destination;
 
@@ -588,8 +595,7 @@
 }
 EXPORT_SYMBOL(fw_fill_response);
 
-static struct fw_request *
-allocate_request(struct fw_packet *p)
+static struct fw_request *allocate_request(struct fw_packet *p)
 {
 	struct fw_request *request;
 	u32 *data, length;
@@ -649,8 +655,8 @@
 	return request;
 }
 
-void
-fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
+void fw_send_response(struct fw_card *card,
+		      struct fw_request *request, int rcode)
 {
 	/* unified transaction or broadcast transaction: don't respond */
 	if (request->ack != ACK_PENDING ||
@@ -670,8 +676,7 @@
 }
 EXPORT_SYMBOL(fw_send_response);
 
-void
-fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
 {
 	struct fw_address_handler *handler;
 	struct fw_request *request;
@@ -719,8 +724,7 @@
 }
 EXPORT_SYMBOL(fw_core_handle_request);
 
-void
-fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
 {
 	struct fw_transaction *t;
 	unsigned long flags;
@@ -793,12 +797,10 @@
 	{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
 	  .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
 
-static void
-handle_topology_map(struct fw_card *card, struct fw_request *request,
-		    int tcode, int destination, int source,
-		    int generation, int speed,
-		    unsigned long long offset,
-		    void *payload, size_t length, void *callback_data)
+static void handle_topology_map(struct fw_card *card, struct fw_request *request,
+		int tcode, int destination, int source, int generation,
+		int speed, unsigned long long offset,
+		void *payload, size_t length, void *callback_data)
 {
 	int i, start, end;
 	__be32 *map;
@@ -832,12 +834,10 @@
 	{ .start = CSR_REGISTER_BASE,
 	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
 
-static void
-handle_registers(struct fw_card *card, struct fw_request *request,
-		 int tcode, int destination, int source,
-		 int generation, int speed,
-		 unsigned long long offset,
-		 void *payload, size_t length, void *callback_data)
+static void handle_registers(struct fw_card *card, struct fw_request *request,
+		int tcode, int destination, int source, int generation,
+		int speed, unsigned long long offset,
+		void *payload, size_t length, void *callback_data)
 {
 	int reg = offset & ~CSR_REGISTER_BASE;
 	unsigned long long bus_time;
@@ -939,11 +939,11 @@
 
 static int __init fw_core_init(void)
 {
-	int retval;
+	int ret;
 
-	retval = bus_register(&fw_bus_type);
-	if (retval < 0)
-		return retval;
+	ret = bus_register(&fw_bus_type);
+	if (ret < 0)
+		return ret;
 
 	fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
 	if (fw_cdev_major < 0) {
@@ -951,19 +951,10 @@
 		return fw_cdev_major;
 	}
 
-	retval = fw_core_add_address_handler(&topology_map,
-					     &topology_map_region);
-	BUG_ON(retval < 0);
-
-	retval = fw_core_add_address_handler(&registers,
-					     &registers_region);
-	BUG_ON(retval < 0);
-
-	/* Add the vendor textual descriptor. */
-	retval = fw_core_add_descriptor(&vendor_id_descriptor);
-	BUG_ON(retval < 0);
-	retval = fw_core_add_descriptor(&model_id_descriptor);
-	BUG_ON(retval < 0);
+	fw_core_add_address_handler(&topology_map, &topology_map_region);
+	fw_core_add_address_handler(&registers, &registers_region);
+	fw_core_add_descriptor(&vendor_id_descriptor);
+	fw_core_add_descriptor(&model_id_descriptor);
 
 	return 0;
 }
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 1d78e9c..dfa7990 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -82,14 +82,14 @@
 #define CSR_SPEED_MAP			0x2000
 #define CSR_SPEED_MAP_END		0x3000
 
+#define BANDWIDTH_AVAILABLE_INITIAL	4915
 #define BROADCAST_CHANNEL_INITIAL	(1 << 31 | 31)
 #define BROADCAST_CHANNEL_VALID		(1 << 30)
 
 #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
 #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
 
-static inline void
-fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
 {
 	u32    *dst = _dst;
 	__be32 *src = _src;
@@ -99,8 +99,7 @@
 		dst[i] = be32_to_cpu(src[i]);
 }
 
-static inline void
-fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
 {
 	fw_memcpy_from_be32(_dst, _src, size);
 }
@@ -125,8 +124,7 @@
 				     struct fw_card *card, int status);
 
 typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
-					  void *data,
-					  size_t length,
+					  void *data, size_t length,
 					  void *callback_data);
 
 /*
@@ -141,12 +139,6 @@
 				      void *data, size_t length,
 				      void *callback_data);
 
-typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
-					int node_id, int generation,
-					u32 *self_ids,
-					int self_id_count,
-					void *callback_data);
-
 struct fw_packet {
 	int speed;
 	int generation;
@@ -187,12 +179,6 @@
 	void *callback_data;
 };
 
-static inline struct fw_packet *
-fw_packet(struct list_head *l)
-{
-	return list_entry(l, struct fw_packet, link);
-}
-
 struct fw_address_handler {
 	u64 offset;
 	size_t length;
@@ -201,7 +187,6 @@
 	struct list_head link;
 };
 
-
 struct fw_address_region {
 	u64 start;
 	u64 end;
@@ -255,6 +240,7 @@
 	int bm_retries;
 	int bm_generation;
 
+	bool broadcast_channel_allocated;
 	u32 broadcast_channel;
 	u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
 };
@@ -315,10 +301,8 @@
 struct fw_iso_context;
 
 typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
-				  u32 cycle,
-				  size_t header_length,
-				  void *header,
-				  void *data);
+				  u32 cycle, size_t header_length,
+				  void *header, void *data);
 
 /*
  * An iso buffer is just a set of pages mapped for DMA in the
@@ -344,36 +328,25 @@
 	void *callback_data;
 };
 
-int
-fw_iso_buffer_init(struct fw_iso_buffer *buffer,
-		   struct fw_card *card,
-		   int page_count,
-		   enum dma_data_direction direction);
-int
-fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void
-fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+		       int page_count, enum dma_data_direction direction);
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
 
-struct fw_iso_context *
-fw_iso_context_create(struct fw_card *card, int type,
-		      int channel, int speed, size_t header_size,
-		      fw_iso_callback_t callback, void *callback_data);
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+		int type, int channel, int speed, size_t header_size,
+		fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+			 struct fw_iso_packet *packet,
+			 struct fw_iso_buffer *buffer,
+			 unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+			 int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
 
-void
-fw_iso_context_destroy(struct fw_iso_context *ctx);
-
-int
-fw_iso_context_queue(struct fw_iso_context *ctx,
-		     struct fw_iso_packet *packet,
-		     struct fw_iso_buffer *buffer,
-		     unsigned long payload);
-
-int
-fw_iso_context_start(struct fw_iso_context *ctx,
-		     int cycle, int sync, int tags);
-
-int
-fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+		u64 channels_mask, int *channel, int *bandwidth, bool allocate);
 
 struct fw_card_driver {
 	/*
@@ -415,7 +388,7 @@
 
 	struct fw_iso_context *
 	(*allocate_iso_context)(struct fw_card *card,
-				int type, size_t header_size);
+				int type, int channel, size_t header_size);
 	void (*free_iso_context)(struct fw_iso_context *ctx);
 
 	int (*start_iso)(struct fw_iso_context *ctx,
@@ -429,54 +402,45 @@
 	int (*stop_iso)(struct fw_iso_context *ctx);
 };
 
-int
-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
 
-void
-fw_send_request(struct fw_card *card, struct fw_transaction *t,
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
 		int tcode, int destination_id, int generation, int speed,
-		unsigned long long offset, void *data, size_t length,
+		unsigned long long offset, void *payload, size_t length,
 		fw_transaction_callback_t callback, void *callback_data);
-
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-		       int generation, int speed, unsigned long long offset,
-		       void *data, size_t length);
-
 int fw_cancel_transaction(struct fw_card *card,
 			  struct fw_transaction *transaction);
-
 void fw_flush_transactions(struct fw_card *card);
-
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+		       int generation, int speed, unsigned long long offset,
+		       void *payload, size_t length);
 void fw_send_phy_config(struct fw_card *card,
 			int node_id, int generation, int gap_count);
 
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+	return tag << 14 | channel << 8 | sy;
+}
+
 /*
  * Called by the topology code to inform the device code of node
  * activity; found, lost, or updated nodes.
  */
-void
-fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
 
 /* API used by card level drivers */
 
-void
-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
-		   struct device *device);
-int
-fw_card_add(struct fw_card *card,
-	    u32 max_receive, u32 link_speed, u64 guid);
+void fw_card_initialize(struct fw_card *card,
+		const struct fw_card_driver *driver, struct device *device);
+int fw_card_add(struct fw_card *card,
+		u32 max_receive, u32 link_speed, u64 guid);
+void fw_core_remove_card(struct fw_card *card);
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
+		int generation, int self_id_count, u32 *self_ids);
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
 
-void
-fw_core_remove_card(struct fw_card *card);
-
-void
-fw_core_handle_bus_reset(struct fw_card *card,
-			 int node_id, int generation,
-			 int self_id_count, u32 *self_ids);
-void
-fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
-
-void
-fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+extern int fw_irm_set_broadcast_channel_register(struct device *dev,
+						 void *data);
 
 #endif /* __fw_transaction_h */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4..4ec5061 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
 		drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
-		drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
+		drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
+		drm_info.o drm_debugfs.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 0000000..c77c6c6
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+	{"name", drm_name_info, 0},
+	{"vm", drm_vm_info, 0},
+	{"clients", drm_clients_info, 0},
+	{"queues", drm_queues_info, 0},
+	{"bufs", drm_bufs_info, 0},
+	{"gem_names", drm_gem_name_info, DRIVER_GEM},
+	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+	{"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct drm_info_node *node = inode->i_private;
+
+	return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+			     struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct drm_info_node *tmp;
+	char name[64];
+	int i, ret;
+
+	for (i = 0; i < count; i++) {
+		u32 features = files[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
+		tmp = drm_alloc(sizeof(struct drm_info_node),
+				_DRM_DRIVER);
+		ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+					  root, tmp, &drm_debugfs_fops);
+		if (!ent) {
+			DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+				  name, files[i].name);
+			drm_free(tmp, sizeof(struct drm_info_node),
+				 _DRM_DRIVER);
+			ret = -1;
+			goto fail;
+		}
+
+		tmp->minor = minor;
+		tmp->dent = ent;
+		tmp->info_ent = &files[i];
+		list_add(&(tmp->list), &(minor->debugfs_nodes.list));
+	}
+	return 0;
+
+fail:
+	drm_debugfs_remove_files(files, count, minor);
+	return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
+ * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
+ * "/debugfs/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+		     struct dentry *root)
+{
+	struct drm_device *dev = minor->dev;
+	char name[64];
+	int ret;
+
+	INIT_LIST_HEAD(&minor->debugfs_nodes.list);
+	sprintf(name, "%d", minor_id);
+	minor->debugfs_root = debugfs_create_dir(name, root);
+	if (!minor->debugfs_root) {
+		DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+		return -1;
+	}
+
+	ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret) {
+		debugfs_remove(minor->debugfs_root);
+		minor->debugfs_root = NULL;
+		DRM_ERROR("Failed to create core drm debugfs files\n");
+		return ret;
+	}
+
+	if (dev->driver->debugfs_init) {
+		ret = dev->driver->debugfs_init(minor);
+		if (ret) {
+			DRM_ERROR("DRM: Driver failed to initialize "
+				  "/debugfs/dri.\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+			     struct drm_minor *minor)
+{
+	struct list_head *pos, *q;
+	struct drm_info_node *tmp;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
+			tmp = list_entry(pos, struct drm_info_node, list);
+			if (tmp->info_ent == &files[i]) {
+				debugfs_remove(tmp->dent);
+				list_del(pos);
+				drm_free(tmp, sizeof(struct drm_info_node),
+					 _DRM_DRIVER);
+			}
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+
+	if (!minor->debugfs_root)
+		return 0;
+
+	if (dev->driver->debugfs_cleanup)
+		dev->driver->debugfs_cleanup(minor);
+
+	drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+	debugfs_remove(minor->debugfs_root);
+	minor->debugfs_root = NULL;
+
+	return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23..ed32edb 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/debugfs.h>
 #include "drmP.h"
 #include "drm_core.h"
 
+
 static int drm_version(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
 
@@ -178,7 +180,7 @@
 
 	/* Clear AGP information */
 	if (drm_core_has_AGP(dev) && dev->agp &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+			!drm_core_check_feature(dev, DRIVER_MODESET)) {
 		struct drm_agp_mem *entry, *tempe;
 
 		/* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@
 		goto err_p3;
 	}
 
+	drm_debugfs_root = debugfs_create_dir("dri", NULL);
+	if (!drm_debugfs_root) {
+		DRM_ERROR("Cannot create /debugfs/dri\n");
+		ret = -1;
+		goto err_p3;
+	}
+
 	drm_mem_init();
 
 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@
 static void __exit drm_core_exit(void)
 {
 	remove_proc_entry("dri", NULL);
+	debugfs_remove(drm_debugfs_root);
 	drm_sysfs_destroy();
 
 	unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 0000000..fc98952
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_minor *minor = node->minor;
+	struct drm_device *dev = minor->dev;
+	struct drm_master *master = minor->master;
+
+	if (!master)
+		return 0;
+
+	if (master->unique) {
+		seq_printf(m, "%s %s %s\n",
+			   dev->driver->pci_driver.name,
+			   pci_name(dev->pdev), master->unique);
+	} else {
+		seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+			   pci_name(dev->pdev));
+	}
+
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_map *map;
+	struct drm_map_list *r_list;
+
+	/* Hardcoded from _DRM_FRAME_BUFFER,
+	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+	   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+	const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+	const char *type;
+	int i;
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "slot	 offset	      size type flags	 address mtrr\n\n");
+	i = 0;
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		map = r_list->map;
+		if (!map)
+			continue;
+		if (map->type < 0 || map->type > 5)
+			type = "??";
+		else
+			type = types[map->type];
+
+		seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+			   i,
+			   map->offset,
+			   map->size, type, map->flags,
+			   (unsigned long) r_list->user_token);
+		if (map->mtrr < 0)
+			seq_printf(m, "none\n");
+		else
+			seq_printf(m, "%4d\n", map->mtrr);
+		i++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ */
+int drm_queues_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+	struct drm_queue *q;
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "  ctx/flags   use   fin"
+		   "   blk/rw/rwf  wait    flushed	   queued"
+		   "      locks\n\n");
+	for (i = 0; i < dev->queue_count; i++) {
+		q = dev->queuelist[i];
+		atomic_inc(&q->use_count);
+		seq_printf(m,   "%5d/0x%03x %5d %5d"
+			   " %5d/%c%c/%c%c%c %5Zd\n",
+			   i,
+			   q->flags,
+			   atomic_read(&q->use_count),
+			   atomic_read(&q->finalization),
+			   atomic_read(&q->block_count),
+			   atomic_read(&q->block_read) ? 'r' : '-',
+			   atomic_read(&q->block_write) ? 'w' : '-',
+			   waitqueue_active(&q->read_queue) ? 'r' : '-',
+			   waitqueue_active(&q->write_queue) ? 'w' : '-',
+			   waitqueue_active(&q->flush_queue) ? 'f' : '-',
+			   DRM_BUFCOUNT(&q->waitlist));
+		atomic_dec(&q->use_count);
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_device_dma *dma;
+	int i, seg_pages;
+
+	mutex_lock(&dev->struct_mutex);
+	dma = dev->dma;
+	if (!dma) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	seq_printf(m, " o     size count  free	 segs pages    kB\n\n");
+	for (i = 0; i <= DRM_MAX_ORDER; i++) {
+		if (dma->bufs[i].buf_count) {
+			seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+			seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+				   i,
+				   dma->bufs[i].buf_size,
+				   dma->bufs[i].buf_count,
+				   atomic_read(&dma->bufs[i].freelist.count),
+				   dma->bufs[i].seg_count,
+				   seg_pages,
+				   seg_pages * PAGE_SIZE / 1024);
+		}
+	}
+	seq_printf(m, "\n");
+	for (i = 0; i < dma->buf_count; i++) {
+		if (i && !(i % 32))
+			seq_printf(m, "\n");
+		seq_printf(m, " %d", dma->buflist[i]->list);
+	}
+	seq_printf(m, "\n");
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int crtc;
+
+	mutex_lock(&dev->struct_mutex);
+	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+		seq_printf(m, "CRTC %d enable:     %d\n",
+			   crtc, atomic_read(&dev->vblank_refcount[crtc]));
+		seq_printf(m, "CRTC %d counter:    %d\n",
+			   crtc, drm_vblank_count(dev, crtc));
+		seq_printf(m, "CRTC %d last wait:  %d\n",
+			   crtc, dev->last_vblank_wait[crtc]);
+		seq_printf(m, "CRTC %d in modeset: %d\n",
+			   crtc, dev->vblank_inmodeset[crtc]);
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_file *priv;
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "a dev	pid    uid	magic	  ioctls\n\n");
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+			   priv->authenticated ? 'y' : 'n',
+			   priv->minor->index,
+			   priv->pid,
+			   priv->uid, priv->magic, priv->ioctl_count);
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+
+int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+	struct seq_file *m = data;
+
+	seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
+
+	seq_printf(m, "%6d %8zd %7d %8d\n",
+		   obj->name, obj->size,
+		   atomic_read(&obj->handlecount.refcount),
+		   atomic_read(&obj->refcount.refcount));
+	return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	seq_printf(m, "  name     size handles refcount\n");
+	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+	return 0;
+}
+
+int drm_gem_object_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
+	seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
+	seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
+	seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
+	seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+	seq_printf(m, "%d gtt total\n", dev->gtt_total);
+	return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_vma_entry *pt;
+	struct vm_area_struct *vma;
+#if defined(__i386__)
+	unsigned int pgprot;
+#endif
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
+		   atomic_read(&dev->vma_count),
+		   high_memory, virt_to_phys(high_memory));
+
+	list_for_each_entry(pt, &dev->vmalist, head) {
+		vma = pt->vma;
+		if (!vma)
+			continue;
+		seq_printf(m,
+			   "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+			   pt->pid, vma->vm_start, vma->vm_end,
+			   vma->vm_flags & VM_READ ? 'r' : '-',
+			   vma->vm_flags & VM_WRITE ? 'w' : '-',
+			   vma->vm_flags & VM_EXEC ? 'x' : '-',
+			   vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+			   vma->vm_flags & VM_LOCKED ? 'l' : '-',
+			   vma->vm_flags & VM_IO ? 'i' : '-',
+			   vma->vm_pgoff);
+
+#if defined(__i386__)
+		pgprot = pgprot_val(vma->vm_page_prot);
+		seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+			   pgprot & _PAGE_PRESENT ? 'p' : '-',
+			   pgprot & _PAGE_RW ? 'w' : 'r',
+			   pgprot & _PAGE_USER ? 'u' : 's',
+			   pgprot & _PAGE_PWT ? 't' : 'b',
+			   pgprot & _PAGE_PCD ? 'u' : 'c',
+			   pgprot & _PAGE_ACCESSED ? 'a' : '-',
+			   pgprot & _PAGE_DIRTY ? 'd' : '-',
+			   pgprot & _PAGE_PSE ? 'm' : 'k',
+			   pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+		seq_printf(m, "\n");
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 8df849f..9b3c5af 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,58 +37,105 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/seq_file.h>
 #include "drmP.h"
 
-static int drm_name_info(char *buf, char **start, off_t offset,
-			 int request, int *eof, void *data);
-static int drm_vm_info(char *buf, char **start, off_t offset,
-		       int request, int *eof, void *data);
-static int drm_clients_info(char *buf, char **start, off_t offset,
-			    int request, int *eof, void *data);
-static int drm_queues_info(char *buf, char **start, off_t offset,
-			   int request, int *eof, void *data);
-static int drm_bufs_info(char *buf, char **start, off_t offset,
-			 int request, int *eof, void *data);
-static int drm_vblank_info(char *buf, char **start, off_t offset,
-			   int request, int *eof, void *data);
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
-			     int request, int *eof, void *data);
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
-			       int request, int *eof, void *data);
-#if DRM_DEBUG_CODE
-static int drm_vma_info(char *buf, char **start, off_t offset,
-			int request, int *eof, void *data);
-#endif
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
 
 /**
  * Proc file list.
  */
-static struct drm_proc_list {
-	const char *name;	/**< file name */
-	int (*f) (char *, char **, off_t, int, int *, void *);		/**< proc callback*/
-	u32 driver_features; /**< Required driver features for this entry */
-} drm_proc_list[] = {
+static struct drm_info_list drm_proc_list[] = {
 	{"name", drm_name_info, 0},
-	{"mem", drm_mem_info, 0},
 	{"vm", drm_vm_info, 0},
 	{"clients", drm_clients_info, 0},
 	{"queues", drm_queues_info, 0},
 	{"bufs", drm_bufs_info, 0},
-	{"vblank", drm_vblank_info, 0},
 	{"gem_names", drm_gem_name_info, DRIVER_GEM},
 	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
-	{"vma", drm_vma_info},
+	{"vma", drm_vma_info, 0},
 #endif
 };
-
 #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
 
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+	struct drm_info_node* node = PDE(inode)->data;
+
+	return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_proc_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
 /**
- * Initialize the DRI proc filesystem for a device.
+ * Initialize a given set of proc files for a device
  *
- * \param dev DRM device.
- * \param minor device minor number.
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI proc dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
+ */
+int drm_proc_create_files(struct drm_info_list *files, int count,
+			  struct proc_dir_entry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct proc_dir_entry *ent;
+	struct drm_info_node *tmp;
+	char name[64];
+	int i, ret;
+
+	for (i = 0; i < count; i++) {
+		u32 features = files[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
+		tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
+		ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
+		if (!ent) {
+			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+				  name, files[i].name);
+			drm_free(tmp, sizeof(struct drm_info_node),
+				 _DRM_DRIVER);
+			ret = -1;
+			goto fail;
+		}
+
+		ent->proc_fops = &drm_proc_fops;
+		ent->data = tmp;
+		tmp->minor = minor;
+		tmp->info_ent = &files[i];
+		list_add(&(tmp->list), &(minor->proc_nodes.list));
+	}
+	return 0;
+
+fail:
+	for (i = 0; i < count; i++)
+		remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
+	return ret;
+}
+
+/**
+ * Initialize the DRI proc filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
  * \param root DRI proc dir entry.
  * \param dev_root resulting DRI device proc dir entry.
  * \return root entry pointer on success, or NULL on failure.
@@ -101,34 +148,24 @@
 		  struct proc_dir_entry *root)
 {
 	struct drm_device *dev = minor->dev;
-	struct proc_dir_entry *ent;
-	int i, j, ret;
 	char name[64];
+	int ret;
 
+	INIT_LIST_HEAD(&minor->proc_nodes.list);
 	sprintf(name, "%d", minor_id);
-	minor->dev_root = proc_mkdir(name, root);
-	if (!minor->dev_root) {
+	minor->proc_root = proc_mkdir(name, root);
+	if (!minor->proc_root) {
 		DRM_ERROR("Cannot create /proc/dri/%s\n", name);
 		return -1;
 	}
 
-	for (i = 0; i < DRM_PROC_ENTRIES; i++) {
-		u32 features = drm_proc_list[i].driver_features;
-
-		if (features != 0 &&
-		    (dev->driver->driver_features & features) != features)
-			continue;
-
-		ent = create_proc_entry(drm_proc_list[i].name,
-					S_IFREG | S_IRUGO, minor->dev_root);
-		if (!ent) {
-			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
-				  name, drm_proc_list[i].name);
-			ret = -1;
-			goto fail;
-		}
-		ent->read_proc = drm_proc_list[i].f;
-		ent->data = minor;
+	ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+				    minor->proc_root, minor);
+	if (ret) {
+		remove_proc_entry(name, root);
+		minor->proc_root = NULL;
+		DRM_ERROR("Failed to create core drm proc files\n");
+		return ret;
 	}
 
 	if (dev->driver->proc_init) {
@@ -136,19 +173,32 @@
 		if (ret) {
 			DRM_ERROR("DRM: Driver failed to initialize "
 				  "/proc/dri.\n");
-			goto fail;
+			return ret;
 		}
 	}
-
 	return 0;
- fail:
+}
 
-	for (j = 0; j < i; j++)
-		remove_proc_entry(drm_proc_list[i].name,
-				  minor->dev_root);
-	remove_proc_entry(name, root);
-	minor->dev_root = NULL;
-	return ret;
+int drm_proc_remove_files(struct drm_info_list *files, int count,
+			  struct drm_minor *minor)
+{
+	struct list_head *pos, *q;
+	struct drm_info_node *tmp;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+			tmp = list_entry(pos, struct drm_info_node, list);
+			if (tmp->info_ent == &files[i]) {
+				remove_proc_entry(files[i].name,
+						  minor->proc_root);
+				list_del(pos);
+				drm_free(tmp, sizeof(struct drm_info_node),
+					 _DRM_DRIVER);
+			}
+		}
+	}
+	return 0;
 }
 
 /**
@@ -164,570 +214,19 @@
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
 	struct drm_device *dev = minor->dev;
-	int i;
 	char name[64];
 
-	if (!root || !minor->dev_root)
+	if (!root || !minor->proc_root)
 		return 0;
 
 	if (dev->driver->proc_cleanup)
 		dev->driver->proc_cleanup(minor);
 
-	for (i = 0; i < DRM_PROC_ENTRIES; i++)
-		remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
+	drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
+
 	sprintf(name, "%d", minor->index);
 	remove_proc_entry(name, root);
 
 	return 0;
 }
 
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints the device name together with the bus id if available.
- */
-static int drm_name_info(char *buf, char **start, off_t offset, int request,
-			 int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_master *master = minor->master;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	if (!master)
-		return 0;
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	if (master->unique) {
-		DRM_PROC_PRINT("%s %s %s\n",
-			       dev->driver->pci_driver.name,
-			       pci_name(dev->pdev), master->unique);
-	} else {
-		DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
-			       pci_name(dev->pdev));
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-static int drm__vm_info(char *buf, char **start, off_t offset, int request,
-			int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	struct drm_map *map;
-	struct drm_map_list *r_list;
-
-	/* Hardcoded from _DRM_FRAME_BUFFER,
-	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-	   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-	const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
-	const char *type;
-	int i;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	DRM_PROC_PRINT("slot	 offset	      size type flags	 "
-		       "address mtrr\n\n");
-	i = 0;
-	list_for_each_entry(r_list, &dev->maplist, head) {
-		map = r_list->map;
-		if (!map)
-			continue;
-		if (map->type < 0 || map->type > 5)
-			type = "??";
-		else
-			type = types[map->type];
-		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
-			       i,
-			       map->offset,
-			       map->size, type, map->flags,
-			       (unsigned long) r_list->user_token);
-		if (map->mtrr < 0) {
-			DRM_PROC_PRINT("none\n");
-		} else {
-			DRM_PROC_PRINT("%4d\n", map->mtrr);
-		}
-		i++;
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vm_info(char *buf, char **start, off_t offset, int request,
-		       int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__vm_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-/**
- * Called when "/proc/dri/.../queues" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__queues_info(char *buf, char **start, off_t offset,
-			    int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	int i;
-	struct drm_queue *q;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	DRM_PROC_PRINT("  ctx/flags   use   fin"
-		       "   blk/rw/rwf  wait    flushed	   queued"
-		       "      locks\n\n");
-	for (i = 0; i < dev->queue_count; i++) {
-		q = dev->queuelist[i];
-		atomic_inc(&q->use_count);
-		DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
-				   "%5d/0x%03x %5d %5d"
-				   " %5d/%c%c/%c%c%c %5Zd\n",
-				   i,
-				   q->flags,
-				   atomic_read(&q->use_count),
-				   atomic_read(&q->finalization),
-				   atomic_read(&q->block_count),
-				   atomic_read(&q->block_read) ? 'r' : '-',
-				   atomic_read(&q->block_write) ? 'w' : '-',
-				   waitqueue_active(&q->read_queue) ? 'r' : '-',
-				   waitqueue_active(&q->
-						    write_queue) ? 'w' : '-',
-				   waitqueue_active(&q->
-						    flush_queue) ? 'f' : '-',
-				   DRM_BUFCOUNT(&q->waitlist));
-		atomic_dec(&q->use_count);
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_queues_info(char *buf, char **start, off_t offset, int request,
-			   int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__queues_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-/**
- * Called when "/proc/dri/.../bufs" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
-			  int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	struct drm_device_dma *dma = dev->dma;
-	int i;
-
-	if (!dma || offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	DRM_PROC_PRINT(" o     size count  free	 segs pages    kB\n\n");
-	for (i = 0; i <= DRM_MAX_ORDER; i++) {
-		if (dma->bufs[i].buf_count)
-			DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
-				       i,
-				       dma->bufs[i].buf_size,
-				       dma->bufs[i].buf_count,
-				       atomic_read(&dma->bufs[i]
-						   .freelist.count),
-				       dma->bufs[i].seg_count,
-				       dma->bufs[i].seg_count
-				       * (1 << dma->bufs[i].page_order),
-				       (dma->bufs[i].seg_count
-					* (1 << dma->bufs[i].page_order))
-				       * PAGE_SIZE / 1024);
-	}
-	DRM_PROC_PRINT("\n");
-	for (i = 0; i < dma->buf_count; i++) {
-		if (i && !(i % 32))
-			DRM_PROC_PRINT("\n");
-		DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
-	}
-	DRM_PROC_PRINT("\n");
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
-			 int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__bufs_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-/**
- * Called when "/proc/dri/.../vblank" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
-			  int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	int crtc;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
-		DRM_PROC_PRINT("CRTC %d enable:     %d\n",
-			       crtc, atomic_read(&dev->vblank_refcount[crtc]));
-		DRM_PROC_PRINT("CRTC %d counter:    %d\n",
-			       crtc, drm_vblank_count(dev, crtc));
-		DRM_PROC_PRINT("CRTC %d last wait:  %d\n",
-			       crtc, dev->last_vblank_wait[crtc]);
-		DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
-			       crtc, dev->vblank_inmodeset[crtc]);
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
-			 int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__vblank_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param request requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- */
-static int drm__clients_info(char *buf, char **start, off_t offset,
-			     int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	struct drm_file *priv;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	DRM_PROC_PRINT("a dev	pid    uid	magic	  ioctls\n\n");
-	list_for_each_entry(priv, &dev->filelist, lhead) {
-		DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
-			       priv->authenticated ? 'y' : 'n',
-			       priv->minor->index,
-			       priv->pid,
-			       priv->uid, priv->magic, priv->ioctl_count);
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-/**
- * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
- */
-static int drm_clients_info(char *buf, char **start, off_t offset,
-			    int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__clients_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-struct drm_gem_name_info_data {
-       int                     len;
-       char                    *buf;
-       int                     eof;
-};
-
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
-	struct drm_gem_object *obj = ptr;
-	struct drm_gem_name_info_data   *nid = data;
-
-	DRM_INFO("name %d size %zd\n", obj->name, obj->size);
-	if (nid->eof)
-		return 0;
-
-	nid->len += sprintf(&nid->buf[nid->len],
-			    "%6d %8zd %7d %8d\n",
-			    obj->name, obj->size,
-			    atomic_read(&obj->handlecount.refcount),
-			    atomic_read(&obj->refcount.refcount));
-	if (nid->len > DRM_PROC_LIMIT) {
-		nid->eof = 1;
-		return 0;
-	}
-	return 0;
-}
-
-static int drm_gem_name_info(char *buf, char **start, off_t offset,
-			     int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	struct drm_gem_name_info_data nid;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	nid.len = sprintf(buf, "  name     size handles refcount\n");
-	nid.buf = buf;
-	nid.eof = 0;
-	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
-
-	*start = &buf[offset];
-	*eof = 0;
-	if (nid.len > request + offset)
-		return request;
-	*eof = 1;
-	return nid.len - offset;
-}
-
-static int drm_gem_object_info(char *buf, char **start, off_t offset,
-			       int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
-	DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
-	DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
-	DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
-	DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
-	DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-#if DRM_DEBUG_CODE
-
-static int drm__vma_info(char *buf, char **start, off_t offset, int request,
-			 int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int len = 0;
-	struct drm_vma_entry *pt;
-	struct vm_area_struct *vma;
-#if defined(__i386__)
-	unsigned int pgprot;
-#endif
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-
-	DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
-		       atomic_read(&dev->vma_count),
-		       high_memory, virt_to_phys(high_memory));
-	list_for_each_entry(pt, &dev->vmalist, head) {
-		if (!(vma = pt->vma))
-			continue;
-		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-			       pt->pid,
-			       vma->vm_start,
-			       vma->vm_end,
-			       vma->vm_flags & VM_READ ? 'r' : '-',
-			       vma->vm_flags & VM_WRITE ? 'w' : '-',
-			       vma->vm_flags & VM_EXEC ? 'x' : '-',
-			       vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
-			       vma->vm_flags & VM_LOCKED ? 'l' : '-',
-			       vma->vm_flags & VM_IO ? 'i' : '-',
-			       vma->vm_pgoff);
-
-#if defined(__i386__)
-		pgprot = pgprot_val(vma->vm_page_prot);
-		DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
-			       pgprot & _PAGE_PRESENT ? 'p' : '-',
-			       pgprot & _PAGE_RW ? 'w' : 'r',
-			       pgprot & _PAGE_USER ? 'u' : 's',
-			       pgprot & _PAGE_PWT ? 't' : 'b',
-			       pgprot & _PAGE_PCD ? 'u' : 'c',
-			       pgprot & _PAGE_ACCESSED ? 'a' : '-',
-			       pgprot & _PAGE_DIRTY ? 'd' : '-',
-			       pgprot & _PAGE_PSE ? 'm' : 'k',
-			       pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
-		DRM_PROC_PRINT("\n");
-	}
-
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int drm_vma_info(char *buf, char **start, off_t offset, int request,
-			int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm__vma_info(buf, start, offset, request, eof, data);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b..48f33be 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@
 
 struct class *drm_class;
 struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
 
 static int drm_minor_get_id(struct drm_device *dev, int type)
 {
@@ -313,7 +314,15 @@
 			goto err_mem;
 		}
 	} else
-		new_minor->dev_root = NULL;
+		new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+	ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+	if (ret) {
+		DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+		goto err_g2;
+	}
+#endif
 
 	ret = drm_sysfs_device_add(new_minor);
 	if (ret) {
@@ -451,6 +460,10 @@
 
 	if (minor->type == DRM_MINOR_LEGACY)
 		drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_cleanup(minor);
+#endif
+
 	drm_sysfs_device_remove(minor);
 
 	idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba3..51c5a05 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@
           i915_suspend.o \
 	  i915_gem.o \
 	  i915_gem_debug.o \
-	  i915_gem_proc.o \
+	  i915_gem_debugfs.o \
 	  i915_gem_tiling.o \
 	  intel_display.o \
 	  intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e..a818b37 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
 	u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@
 		if (ring->space >= n)
 			return 0;
 
-		if (master_priv->sarea_priv)
-			master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		if (dev->primary->master) {
+			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		}
+
 
 		if (ring->head != last_head)
 			i = 0;
@@ -356,7 +359,7 @@
 	return ret;
 }
 
-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
@@ -370,8 +373,7 @@
 	for (i = 0; i < dwords;) {
 		int cmd, sz;
 
-		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
-			return -EINVAL;
+		cmd = buffer[i];
 
 		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
 			return -EINVAL;
@@ -379,11 +381,7 @@
 		OUT_RING(cmd);
 
 		while (++i, --sz) {
-			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
-							 sizeof(cmd))) {
-				return -EINVAL;
-			}
-			OUT_RING(cmd);
+			OUT_RING(buffer[i]);
 		}
 	}
 
@@ -397,17 +395,13 @@
 
 int
 i915_emit_box(struct drm_device *dev,
-	      struct drm_clip_rect __user *boxes,
+	      struct drm_clip_rect *boxes,
 	      int i, int DR1, int DR4)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_clip_rect box;
+	struct drm_clip_rect box = boxes[i];
 	RING_LOCALS;
 
-	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
-		return -EFAULT;
-	}
-
 	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
 			  box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@
 }
 
 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
-				   drm_i915_cmdbuffer_t * cmd)
+				   drm_i915_cmdbuffer_t *cmd,
+				   struct drm_clip_rect *cliprects,
+				   void *cmdbuf)
 {
 	int nbox = cmd->num_cliprects;
 	int i = 0, count, ret;
@@ -476,13 +472,13 @@
 
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			ret = i915_emit_box(dev, cmd->cliprects, i,
+			ret = i915_emit_box(dev, cliprects, i,
 					    cmd->DR1, cmd->DR4);
 			if (ret)
 				return ret;
 		}
 
-		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
 		if (ret)
 			return ret;
 	}
@@ -492,10 +488,10 @@
 }
 
 static int i915_dispatch_batchbuffer(struct drm_device * dev,
-				     drm_i915_batchbuffer_t * batch)
+				     drm_i915_batchbuffer_t * batch,
+				     struct drm_clip_rect *cliprects)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_clip_rect __user *boxes = batch->cliprects;
 	int nbox = batch->num_cliprects;
 	int i = 0, count;
 	RING_LOCALS;
@@ -511,7 +507,7 @@
 
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			int ret = i915_emit_box(dev, boxes, i,
+			int ret = i915_emit_box(dev, cliprects, i,
 						batch->DR1, batch->DR4);
 			if (ret)
 				return ret;
@@ -626,6 +622,7 @@
 	    master_priv->sarea_priv;
 	drm_i915_batchbuffer_t *batch = data;
 	int ret;
+	struct drm_clip_rect *cliprects = NULL;
 
 	if (!dev_priv->allow_batchbuffer) {
 		DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@
 
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
-						       batch->num_cliprects *
-						       sizeof(struct drm_clip_rect)))
-		return -EFAULT;
+	if (batch->num_cliprects < 0)
+		return -EINVAL;
+
+	if (batch->num_cliprects) {
+		cliprects = drm_calloc(batch->num_cliprects,
+				       sizeof(struct drm_clip_rect),
+				       DRM_MEM_DRIVER);
+		if (cliprects == NULL)
+			return -ENOMEM;
+
+		ret = copy_from_user(cliprects, batch->cliprects,
+				     batch->num_cliprects *
+				     sizeof(struct drm_clip_rect));
+		if (ret != 0)
+			goto fail_free;
+	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_dispatch_batchbuffer(dev, batch);
+	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (sarea_priv)
 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+	drm_free(cliprects,
+		 batch->num_cliprects * sizeof(struct drm_clip_rect),
+		 DRM_MEM_DRIVER);
+
 	return ret;
 }
 
@@ -659,6 +674,8 @@
 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
 	    master_priv->sarea_priv;
 	drm_i915_cmdbuffer_t *cmdbuf = data;
+	struct drm_clip_rect *cliprects = NULL;
+	void *batch_data;
 	int ret;
 
 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@
 
 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-	if (cmdbuf->num_cliprects &&
-	    DRM_VERIFYAREA_READ(cmdbuf->cliprects,
-				cmdbuf->num_cliprects *
-				sizeof(struct drm_clip_rect))) {
-		DRM_ERROR("Fault accessing cliprects\n");
-		return -EFAULT;
+	if (cmdbuf->num_cliprects < 0)
+		return -EINVAL;
+
+	batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
+	if (batch_data == NULL)
+		return -ENOMEM;
+
+	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+	if (ret != 0)
+		goto fail_batch_free;
+
+	if (cmdbuf->num_cliprects) {
+		cliprects = drm_calloc(cmdbuf->num_cliprects,
+				       sizeof(struct drm_clip_rect),
+				       DRM_MEM_DRIVER);
+		if (cliprects == NULL)
+			goto fail_batch_free;
+
+		ret = copy_from_user(cliprects, cmdbuf->cliprects,
+				     cmdbuf->num_cliprects *
+				     sizeof(struct drm_clip_rect));
+		if (ret != 0)
+			goto fail_clip_free;
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
 	mutex_unlock(&dev->struct_mutex);
 	if (ret) {
 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
-		return ret;
+		goto fail_batch_free;
 	}
 
 	if (sarea_priv)
 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-	return 0;
+
+fail_batch_free:
+	drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
+fail_clip_free:
+	drm_free(cliprects,
+		 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
+		 DRM_MEM_DRIVER);
+
+	return ret;
 }
 
 static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0..dcb91f5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@
 	.get_reg_ofs = drm_core_get_reg_ofs,
 	.master_create = i915_master_create,
 	.master_destroy = i915_master_destroy,
-	.proc_init = i915_gem_proc_init,
-	.proc_cleanup = i915_gem_proc_cleanup,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = i915_gem_debugfs_init,
+	.debugfs_cleanup = i915_gem_debugfs_cleanup,
+#endif
 	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc986..c1685d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@
 	/** AGP memory structure for our GTT binding. */
 	DRM_AGP_MEM *agp_mem;
 
-	struct page **page_list;
+	struct page **pages;
+	int pages_refcount;
 
 	/**
 	 * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 extern int i915_emit_box(struct drm_device *dev,
-			 struct drm_clip_rect __user *boxes,
+			 struct drm_clip_rect *boxes,
 			 int i, int DR1, int DR4);
 
 /* i915_irq.c */
@@ -604,8 +605,6 @@
 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
-int i915_gem_proc_init(struct drm_minor *minor);
-void i915_gem_proc_cleanup(struct drm_minor *minor);
 int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@
 			  const char *where, uint32_t mark);
 void i915_dump_lru(struct drm_device *dev, const char *where);
 
+/* i915_debugfs.c */
+int i915_gem_debugfs_init(struct drm_minor *minor);
+void i915_gem_debugfs_cleanup(struct drm_minor *minor);
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@
 		     (dev)->pci_device == 0x2E22 || \
 		     IS_GM45(dev))
 
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
 			(dev)->pci_device == 0x29B2 ||	\
-			(dev)->pci_device == 0x29D2)
+			(dev)->pci_device == 0x29D2 ||  \
+			(IS_IGD(dev)))
 
 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
 		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
 
 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
+			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+			IS_IGD(dev))
 
 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4..b52cba0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@
 						     uint64_t offset,
 						     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_get_pages(struct drm_gem_object *obj);
+static void i915_gem_object_put_pages(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 					   unsigned alignment);
@@ -136,6 +136,224 @@
 	return 0;
 }
 
+static inline int
+fast_shmem_read(struct page **pages,
+		loff_t page_base, int page_offset,
+		char __user *data,
+		int length)
+{
+	char __iomem *vaddr;
+	int ret;
+
+	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+	if (vaddr == NULL)
+		return -ENOMEM;
+	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+	kunmap_atomic(vaddr, KM_USER0);
+
+	return ret;
+}
+
+static inline int
+slow_shmem_copy(struct page *dst_page,
+		int dst_offset,
+		struct page *src_page,
+		int src_offset,
+		int length)
+{
+	char *dst_vaddr, *src_vaddr;
+
+	dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+	if (dst_vaddr == NULL)
+		return -ENOMEM;
+
+	src_vaddr = kmap_atomic(src_page, KM_USER1);
+	if (src_vaddr == NULL) {
+		kunmap_atomic(dst_vaddr, KM_USER0);
+		return -ENOMEM;
+	}
+
+	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+	kunmap_atomic(src_vaddr, KM_USER1);
+	kunmap_atomic(dst_vaddr, KM_USER0);
+
+	return 0;
+}
+
+/**
+ * This is the fast shmem pread path, which attempts to copy_from_user directly
+ * from the backing pages of the object to the user's address space.  On a
+ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+ */
+static int
+i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+			  struct drm_i915_gem_pread *args,
+			  struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	ssize_t remain;
+	loff_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length;
+	int ret;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	remain = args->size;
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret != 0)
+		goto fail_unlock;
+
+	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+							args->size);
+	if (ret != 0)
+		goto fail_put_pages;
+
+	obj_priv = obj->driver_private;
+	offset = args->offset;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = (offset & ~(PAGE_SIZE-1));
+		page_offset = offset & (PAGE_SIZE-1);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		ret = fast_shmem_read(obj_priv->pages,
+				      page_base, page_offset,
+				      user_data, page_length);
+		if (ret)
+			goto fail_put_pages;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+fail_put_pages:
+	i915_gem_object_put_pages(obj);
+fail_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * This is the fallback shmem pread path, which allocates temporary storage
+ * in kernel space to copy_to_user into outside of the struct_mutex, so we
+ * can copy out of the object's backing pages while holding the struct mutex
+ * and not take page faults.
+ */
+static int
+i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+			  struct drm_i915_gem_pread *args,
+			  struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct mm_struct *mm = current->mm;
+	struct page **user_pages;
+	ssize_t remain;
+	loff_t offset, pinned_pages, i;
+	loff_t first_data_page, last_data_page, num_pages;
+	int shmem_page_index, shmem_page_offset;
+	int data_page_index,  data_page_offset;
+	int page_length;
+	int ret;
+	uint64_t data_ptr = args->data_ptr;
+
+	remain = args->size;
+
+	/* Pin the user pages containing the data.  We can't fault while
+	 * holding the struct mutex, yet we want to hold it while
+	 * dereferencing the user data.
+	 */
+	first_data_page = data_ptr / PAGE_SIZE;
+	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+	num_pages = last_data_page - first_data_page + 1;
+
+	user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (user_pages == NULL)
+		return -ENOMEM;
+
+	down_read(&mm->mmap_sem);
+	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+				      num_pages, 0, 0, user_pages, NULL);
+	up_read(&mm->mmap_sem);
+	if (pinned_pages < num_pages) {
+		ret = -EFAULT;
+		goto fail_put_user_pages;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret != 0)
+		goto fail_unlock;
+
+	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+							args->size);
+	if (ret != 0)
+		goto fail_put_pages;
+
+	obj_priv = obj->driver_private;
+	offset = args->offset;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * shmem_page_index = page number within shmem file
+		 * shmem_page_offset = offset within page in shmem file
+		 * data_page_index = page number in get_user_pages return
+		 * data_page_offset = offset with data_page_index page.
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_index = offset / PAGE_SIZE;
+		shmem_page_offset = offset & ~PAGE_MASK;
+		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+		data_page_offset = data_ptr & ~PAGE_MASK;
+
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+		if ((data_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - data_page_offset;
+
+		ret = slow_shmem_copy(user_pages[data_page_index],
+				      data_page_offset,
+				      obj_priv->pages[shmem_page_index],
+				      shmem_page_offset,
+				      page_length);
+		if (ret)
+			goto fail_put_pages;
+
+		remain -= page_length;
+		data_ptr += page_length;
+		offset += page_length;
+	}
+
+fail_put_pages:
+	i915_gem_object_put_pages(obj);
+fail_unlock:
+	mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+	for (i = 0; i < pinned_pages; i++) {
+		SetPageDirty(user_pages[i]);
+		page_cache_release(user_pages[i]);
+	}
+	kfree(user_pages);
+
+	return ret;
+}
+
 /**
  * Reads data from the object referenced by handle.
  *
@@ -148,8 +366,6 @@
 	struct drm_i915_gem_pread *args = data;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
-	ssize_t read;
-	loff_t offset;
 	int ret;
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dev->struct_mutex);
-
-	ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-							args->size);
-	if (ret != 0) {
-		drm_gem_object_unreference(obj);
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
-
-	offset = args->offset;
-
-	read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
-			args->size, &offset);
-	if (read != args->size) {
-		drm_gem_object_unreference(obj);
-		mutex_unlock(&dev->struct_mutex);
-		if (read < 0)
-			return read;
-		else
-			return -EINVAL;
-	}
+	ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+	if (ret != 0)
+		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
 
 	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
 
-	return 0;
+	return ret;
 }
 
 /* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@
  */
 
 static inline int
-slow_user_write(struct io_mapping *mapping,
-		loff_t page_base, int page_offset,
-		char __user *user_data,
-		int length)
+slow_kernel_write(struct io_mapping *mapping,
+		  loff_t gtt_base, int gtt_offset,
+		  struct page *user_page, int user_offset,
+		  int length)
 {
-	char __iomem *vaddr;
+	char *src_vaddr, *dst_vaddr;
 	unsigned long unwritten;
 
-	vaddr = io_mapping_map_wc(mapping, page_base);
-	if (vaddr == NULL)
-		return -EFAULT;
-	unwritten = __copy_from_user(vaddr + page_offset,
-				     user_data, length);
-	io_mapping_unmap(vaddr);
+	dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+	src_vaddr = kmap_atomic(user_page, KM_USER1);
+	unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+						      src_vaddr + user_offset,
+						      length);
+	kunmap_atomic(src_vaddr, KM_USER1);
+	io_mapping_unmap_atomic(dst_vaddr);
 	if (unwritten)
 		return -EFAULT;
 	return 0;
 }
 
+static inline int
+fast_shmem_write(struct page **pages,
+		 loff_t page_base, int page_offset,
+		 char __user *data,
+		 int length)
+{
+	char __iomem *vaddr;
+
+	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+	if (vaddr == NULL)
+		return -ENOMEM;
+	__copy_from_user_inatomic(vaddr + page_offset, data, length);
+	kunmap_atomic(vaddr, KM_USER0);
+
+	return 0;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
 static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-		    struct drm_i915_gem_pwrite *args,
-		    struct drm_file *file_priv)
+i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+			 struct drm_i915_gem_pwrite *args,
+			 struct drm_file *file_priv)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@
 
 	obj_priv = obj->driver_private;
 	offset = obj_priv->gtt_offset + args->offset;
-	obj_priv->dirty = 1;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -292,16 +509,11 @@
 				       page_offset, user_data, page_length);
 
 		/* If we get a fault while copying data, then (presumably) our
-		 * source page isn't available. In this case, use the
-		 * non-atomic function
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
 		 */
-		if (ret) {
-			ret = slow_user_write (dev_priv->mm.gtt_mapping,
-					       page_base, page_offset,
-					       user_data, page_length);
-			if (ret)
-				goto fail;
-		}
+		if (ret)
+			goto fail;
 
 		remain -= page_length;
 		user_data += page_length;
@@ -315,39 +527,284 @@
 	return ret;
 }
 
+/**
+ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+ */
 static int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-		      struct drm_i915_gem_pwrite *args,
-		      struct drm_file *file_priv)
+i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+			 struct drm_i915_gem_pwrite *args,
+			 struct drm_file *file_priv)
 {
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	ssize_t remain;
+	loff_t gtt_page_base, offset;
+	loff_t first_data_page, last_data_page, num_pages;
+	loff_t pinned_pages, i;
+	struct page **user_pages;
+	struct mm_struct *mm = current->mm;
+	int gtt_page_offset, data_page_offset, data_page_index, page_length;
 	int ret;
-	loff_t offset;
-	ssize_t written;
+	uint64_t data_ptr = args->data_ptr;
+
+	remain = args->size;
+
+	/* Pin the user pages containing the data.  We can't fault while
+	 * holding the struct mutex, and all of the pwrite implementations
+	 * want to hold it while dereferencing the user data.
+	 */
+	first_data_page = data_ptr / PAGE_SIZE;
+	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+	num_pages = last_data_page - first_data_page + 1;
+
+	user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (user_pages == NULL)
+		return -ENOMEM;
+
+	down_read(&mm->mmap_sem);
+	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+				      num_pages, 0, 0, user_pages, NULL);
+	up_read(&mm->mmap_sem);
+	if (pinned_pages < num_pages) {
+		ret = -EFAULT;
+		goto out_unpin_pages;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gem_object_pin(obj, 0);
+	if (ret)
+		goto out_unlock;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+	if (ret)
+		goto out_unpin_object;
+
+	obj_priv = obj->driver_private;
+	offset = obj_priv->gtt_offset + args->offset;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * gtt_page_base = page offset within aperture
+		 * gtt_page_offset = offset within page in aperture
+		 * data_page_index = page number in get_user_pages return
+		 * data_page_offset = offset with data_page_index page.
+		 * page_length = bytes to copy for this page
+		 */
+		gtt_page_base = offset & PAGE_MASK;
+		gtt_page_offset = offset & ~PAGE_MASK;
+		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+		data_page_offset = data_ptr & ~PAGE_MASK;
+
+		page_length = remain;
+		if ((gtt_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - gtt_page_offset;
+		if ((data_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - data_page_offset;
+
+		ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+					gtt_page_base, gtt_page_offset,
+					user_pages[data_page_index],
+					data_page_offset,
+					page_length);
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (ret)
+			goto out_unpin_object;
+
+		remain -= page_length;
+		offset += page_length;
+		data_ptr += page_length;
+	}
+
+out_unpin_object:
+	i915_gem_object_unpin(obj);
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+out_unpin_pages:
+	for (i = 0; i < pinned_pages; i++)
+		page_cache_release(user_pages[i]);
+	kfree(user_pages);
+
+	return ret;
+}
+
+/**
+ * This is the fast shmem pwrite path, which attempts to directly
+ * copy_from_user into the kmapped pages backing the object.
+ */
+static int
+i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+			   struct drm_i915_gem_pwrite *args,
+			   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	ssize_t remain;
+	loff_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length;
+	int ret;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	remain = args->size;
 
 	mutex_lock(&dev->struct_mutex);
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret != 0)
+		goto fail_unlock;
+
 	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-	if (ret) {
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
+	if (ret != 0)
+		goto fail_put_pages;
 
+	obj_priv = obj->driver_private;
 	offset = args->offset;
+	obj_priv->dirty = 1;
 
-	written = vfs_write(obj->filp,
-			    (char __user *)(uintptr_t) args->data_ptr,
-			    args->size, &offset);
-	if (written != args->size) {
-		mutex_unlock(&dev->struct_mutex);
-		if (written < 0)
-			return written;
-		else
-			return -EINVAL;
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = (offset & ~(PAGE_SIZE-1));
+		page_offset = offset & (PAGE_SIZE-1);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		ret = fast_shmem_write(obj_priv->pages,
+				       page_base, page_offset,
+				       user_data, page_length);
+		if (ret)
+			goto fail_put_pages;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
 	}
 
+fail_put_pages:
+	i915_gem_object_put_pages(obj);
+fail_unlock:
 	mutex_unlock(&dev->struct_mutex);
 
-	return 0;
+	return ret;
+}
+
+/**
+ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This avoids taking mmap_sem for faulting on the user's address while the
+ * struct_mutex is held.
+ */
+static int
+i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+			   struct drm_i915_gem_pwrite *args,
+			   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct mm_struct *mm = current->mm;
+	struct page **user_pages;
+	ssize_t remain;
+	loff_t offset, pinned_pages, i;
+	loff_t first_data_page, last_data_page, num_pages;
+	int shmem_page_index, shmem_page_offset;
+	int data_page_index,  data_page_offset;
+	int page_length;
+	int ret;
+	uint64_t data_ptr = args->data_ptr;
+
+	remain = args->size;
+
+	/* Pin the user pages containing the data.  We can't fault while
+	 * holding the struct mutex, and all of the pwrite implementations
+	 * want to hold it while dereferencing the user data.
+	 */
+	first_data_page = data_ptr / PAGE_SIZE;
+	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+	num_pages = last_data_page - first_data_page + 1;
+
+	user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (user_pages == NULL)
+		return -ENOMEM;
+
+	down_read(&mm->mmap_sem);
+	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+				      num_pages, 0, 0, user_pages, NULL);
+	up_read(&mm->mmap_sem);
+	if (pinned_pages < num_pages) {
+		ret = -EFAULT;
+		goto fail_put_user_pages;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret != 0)
+		goto fail_unlock;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+	if (ret != 0)
+		goto fail_put_pages;
+
+	obj_priv = obj->driver_private;
+	offset = args->offset;
+	obj_priv->dirty = 1;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * shmem_page_index = page number within shmem file
+		 * shmem_page_offset = offset within page in shmem file
+		 * data_page_index = page number in get_user_pages return
+		 * data_page_offset = offset with data_page_index page.
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_index = offset / PAGE_SIZE;
+		shmem_page_offset = offset & ~PAGE_MASK;
+		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+		data_page_offset = data_ptr & ~PAGE_MASK;
+
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+		if ((data_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - data_page_offset;
+
+		ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+				      shmem_page_offset,
+				      user_pages[data_page_index],
+				      data_page_offset,
+				      page_length);
+		if (ret)
+			goto fail_put_pages;
+
+		remain -= page_length;
+		data_ptr += page_length;
+		offset += page_length;
+	}
+
+fail_put_pages:
+	i915_gem_object_put_pages(obj);
+fail_unlock:
+	mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+	for (i = 0; i < pinned_pages; i++)
+		page_cache_release(user_pages[i]);
+	kfree(user_pages);
+
+	return ret;
 }
 
 /**
@@ -388,10 +845,19 @@
 	if (obj_priv->phys_obj)
 		ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
 	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-		 dev->gtt_total != 0)
-		ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
-	else
-		ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+		 dev->gtt_total != 0) {
+		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+		if (ret == -EFAULT) {
+			ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+						       file_priv);
+		}
+	} else {
+		ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+		if (ret == -EFAULT) {
+			ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+							 file_priv);
+		}
+	}
 
 #if WATCH_PWRITE
 	if (ret)
@@ -816,29 +1282,30 @@
 }
 
 static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
+i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 	int page_count = obj->size / PAGE_SIZE;
 	int i;
 
-	if (obj_priv->page_list == NULL)
+	BUG_ON(obj_priv->pages_refcount == 0);
+
+	if (--obj_priv->pages_refcount != 0)
 		return;
 
-
 	for (i = 0; i < page_count; i++)
-		if (obj_priv->page_list[i] != NULL) {
+		if (obj_priv->pages[i] != NULL) {
 			if (obj_priv->dirty)
-				set_page_dirty(obj_priv->page_list[i]);
-			mark_page_accessed(obj_priv->page_list[i]);
-			page_cache_release(obj_priv->page_list[i]);
+				set_page_dirty(obj_priv->pages[i]);
+			mark_page_accessed(obj_priv->pages[i]);
+			page_cache_release(obj_priv->pages[i]);
 		}
 	obj_priv->dirty = 0;
 
-	drm_free(obj_priv->page_list,
+	drm_free(obj_priv->pages,
 		 page_count * sizeof(struct page *),
 		 DRM_MEM_DRIVER);
-	obj_priv->page_list = NULL;
+	obj_priv->pages = NULL;
 }
 
 static void
@@ -1290,7 +1757,7 @@
 	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
 		i915_gem_clear_fence_reg(obj);
 
-	i915_gem_object_free_page_list(obj);
+	i915_gem_object_put_pages(obj);
 
 	if (obj_priv->gtt_space) {
 		atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@
 }
 
 static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 	int page_count, i;
@@ -1418,18 +1885,19 @@
 	struct page *page;
 	int ret;
 
-	if (obj_priv->page_list)
+	if (obj_priv->pages_refcount++ != 0)
 		return 0;
 
 	/* Get the list of pages out of our struct file.  They'll be pinned
 	 * at this point until we release them.
 	 */
 	page_count = obj->size / PAGE_SIZE;
-	BUG_ON(obj_priv->page_list != NULL);
-	obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
-					 DRM_MEM_DRIVER);
-	if (obj_priv->page_list == NULL) {
+	BUG_ON(obj_priv->pages != NULL);
+	obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
+				     DRM_MEM_DRIVER);
+	if (obj_priv->pages == NULL) {
 		DRM_ERROR("Faled to allocate page list\n");
+		obj_priv->pages_refcount--;
 		return -ENOMEM;
 	}
 
@@ -1440,10 +1908,10 @@
 		if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			DRM_ERROR("read_mapping_page failed: %d\n", ret);
-			i915_gem_object_free_page_list(obj);
+			i915_gem_object_put_pages(obj);
 			return ret;
 		}
-		obj_priv->page_list[i] = page;
+		obj_priv->pages[i] = page;
 	}
 	return 0;
 }
@@ -1766,7 +2234,7 @@
 	DRM_INFO("Binding object of size %d at 0x%08x\n",
 		 obj->size, obj_priv->gtt_offset);
 #endif
-	ret = i915_gem_object_get_page_list(obj);
+	ret = i915_gem_object_get_pages(obj);
 	if (ret) {
 		drm_mm_put_block(obj_priv->gtt_space);
 		obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@
 	 * into the GTT.
 	 */
 	obj_priv->agp_mem = drm_agp_bind_pages(dev,
-					       obj_priv->page_list,
+					       obj_priv->pages,
 					       page_count,
 					       obj_priv->gtt_offset,
 					       obj_priv->agp_type);
 	if (obj_priv->agp_mem == NULL) {
-		i915_gem_object_free_page_list(obj);
+		i915_gem_object_put_pages(obj);
 		drm_mm_put_block(obj_priv->gtt_space);
 		obj_priv->gtt_space = NULL;
 		return -ENOMEM;
@@ -1810,10 +2278,10 @@
 	 * to GPU, and we can ignore the cache flush because it'll happen
 	 * again at bind time.
 	 */
-	if (obj_priv->page_list == NULL)
+	if (obj_priv->pages == NULL)
 		return;
 
-	drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+	drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@
 static int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
-	struct drm_device *dev = obj->dev;
 	int ret;
 
 	i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@
 	/* Flush the CPU cache if it's still invalid. */
 	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
 		i915_gem_clflush_object(obj);
-		drm_agp_chipset_flush(dev);
 
 		obj->read_domains |= I915_GEM_DOMAIN_CPU;
 	}
@@ -2144,7 +2610,6 @@
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
 	if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@
 		for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
 			if (obj_priv->page_cpu_valid[i])
 				continue;
-			drm_clflush_pages(obj_priv->page_list + i, 1);
+			drm_clflush_pages(obj_priv->pages + i, 1);
 		}
-		drm_agp_chipset_flush(dev);
 	}
 
 	/* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@
 		if (obj_priv->page_cpu_valid[i])
 			continue;
 
-		drm_clflush_pages(obj_priv->page_list + i, 1);
+		drm_clflush_pages(obj_priv->pages + i, 1);
 
 		obj_priv->page_cpu_valid[i] = 1;
 	}
@@ -2245,12 +2709,11 @@
 static int
 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 				 struct drm_file *file_priv,
-				 struct drm_i915_gem_exec_object *entry)
+				 struct drm_i915_gem_exec_object *entry,
+				 struct drm_i915_gem_relocation_entry *relocs)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_relocation_entry reloc;
-	struct drm_i915_gem_relocation_entry __user *relocs;
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 	int i, ret;
 	void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@
 
 	entry->offset = obj_priv->gtt_offset;
 
-	relocs = (struct drm_i915_gem_relocation_entry __user *)
-		 (uintptr_t) entry->relocs_ptr;
 	/* Apply the relocations, using the GTT aperture to avoid cache
 	 * flushing requirements.
 	 */
 	for (i = 0; i < entry->relocation_count; i++) {
+		struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
 		struct drm_gem_object *target_obj;
 		struct drm_i915_gem_object *target_obj_priv;
 		uint32_t reloc_val, reloc_offset;
 		uint32_t __iomem *reloc_entry;
 
-		ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
-		if (ret != 0) {
-			i915_gem_object_unpin(obj);
-			return ret;
-		}
-
 		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-						   reloc.target_handle);
+						   reloc->target_handle);
 		if (target_obj == NULL) {
 			i915_gem_object_unpin(obj);
 			return -EBADF;
@@ -2292,53 +2748,53 @@
 		 */
 		if (target_obj_priv->gtt_space == NULL) {
 			DRM_ERROR("No GTT space found for object %d\n",
-				  reloc.target_handle);
+				  reloc->target_handle);
 			drm_gem_object_unreference(target_obj);
 			i915_gem_object_unpin(obj);
 			return -EINVAL;
 		}
 
-		if (reloc.offset > obj->size - 4) {
+		if (reloc->offset > obj->size - 4) {
 			DRM_ERROR("Relocation beyond object bounds: "
 				  "obj %p target %d offset %d size %d.\n",
-				  obj, reloc.target_handle,
-				  (int) reloc.offset, (int) obj->size);
+				  obj, reloc->target_handle,
+				  (int) reloc->offset, (int) obj->size);
 			drm_gem_object_unreference(target_obj);
 			i915_gem_object_unpin(obj);
 			return -EINVAL;
 		}
-		if (reloc.offset & 3) {
+		if (reloc->offset & 3) {
 			DRM_ERROR("Relocation not 4-byte aligned: "
 				  "obj %p target %d offset %d.\n",
-				  obj, reloc.target_handle,
-				  (int) reloc.offset);
+				  obj, reloc->target_handle,
+				  (int) reloc->offset);
 			drm_gem_object_unreference(target_obj);
 			i915_gem_object_unpin(obj);
 			return -EINVAL;
 		}
 
-		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
-		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+		if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+		    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
 			DRM_ERROR("reloc with read/write CPU domains: "
 				  "obj %p target %d offset %d "
 				  "read %08x write %08x",
-				  obj, reloc.target_handle,
-				  (int) reloc.offset,
-				  reloc.read_domains,
-				  reloc.write_domain);
+				  obj, reloc->target_handle,
+				  (int) reloc->offset,
+				  reloc->read_domains,
+				  reloc->write_domain);
 			drm_gem_object_unreference(target_obj);
 			i915_gem_object_unpin(obj);
 			return -EINVAL;
 		}
 
-		if (reloc.write_domain && target_obj->pending_write_domain &&
-		    reloc.write_domain != target_obj->pending_write_domain) {
+		if (reloc->write_domain && target_obj->pending_write_domain &&
+		    reloc->write_domain != target_obj->pending_write_domain) {
 			DRM_ERROR("Write domain conflict: "
 				  "obj %p target %d offset %d "
 				  "new %08x old %08x\n",
-				  obj, reloc.target_handle,
-				  (int) reloc.offset,
-				  reloc.write_domain,
+				  obj, reloc->target_handle,
+				  (int) reloc->offset,
+				  reloc->write_domain,
 				  target_obj->pending_write_domain);
 			drm_gem_object_unreference(target_obj);
 			i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@
 			 "presumed %08x delta %08x\n",
 			 __func__,
 			 obj,
-			 (int) reloc.offset,
-			 (int) reloc.target_handle,
-			 (int) reloc.read_domains,
-			 (int) reloc.write_domain,
+			 (int) reloc->offset,
+			 (int) reloc->target_handle,
+			 (int) reloc->read_domains,
+			 (int) reloc->write_domain,
 			 (int) target_obj_priv->gtt_offset,
-			 (int) reloc.presumed_offset,
-			 reloc.delta);
+			 (int) reloc->presumed_offset,
+			 reloc->delta);
 #endif
 
-		target_obj->pending_read_domains |= reloc.read_domains;
-		target_obj->pending_write_domain |= reloc.write_domain;
+		target_obj->pending_read_domains |= reloc->read_domains;
+		target_obj->pending_write_domain |= reloc->write_domain;
 
 		/* If the relocation already has the right value in it, no
 		 * more work needs to be done.
 		 */
-		if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+		if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
 			drm_gem_object_unreference(target_obj);
 			continue;
 		}
@@ -2381,32 +2837,26 @@
 		/* Map the page containing the relocation we're going to
 		 * perform.
 		 */
-		reloc_offset = obj_priv->gtt_offset + reloc.offset;
+		reloc_offset = obj_priv->gtt_offset + reloc->offset;
 		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 						      (reloc_offset &
 						       ~(PAGE_SIZE - 1)));
 		reloc_entry = (uint32_t __iomem *)(reloc_page +
 						   (reloc_offset & (PAGE_SIZE - 1)));
-		reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+		reloc_val = target_obj_priv->gtt_offset + reloc->delta;
 
 #if WATCH_BUF
 		DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-			  obj, (unsigned int) reloc.offset,
+			  obj, (unsigned int) reloc->offset,
 			  readl(reloc_entry), reloc_val);
 #endif
 		writel(reloc_val, reloc_entry);
 		io_mapping_unmap_atomic(reloc_page);
 
-		/* Write the updated presumed offset for this entry back out
-		 * to the user.
+		/* The updated presumed offset for this entry will be
+		 * copied back out to the user.
 		 */
-		reloc.presumed_offset = target_obj_priv->gtt_offset;
-		ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
-		if (ret != 0) {
-			drm_gem_object_unreference(target_obj);
-			i915_gem_object_unpin(obj);
-			return ret;
-		}
+		reloc->presumed_offset = target_obj_priv->gtt_offset;
 
 		drm_gem_object_unreference(target_obj);
 	}
@@ -2423,11 +2873,10 @@
 static int
 i915_dispatch_gem_execbuffer(struct drm_device *dev,
 			      struct drm_i915_gem_execbuffer *exec,
+			      struct drm_clip_rect *cliprects,
 			      uint64_t exec_offset)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
-					     (uintptr_t) exec->cliprects_ptr;
 	int nbox = exec->num_cliprects;
 	int i = 0, count;
 	uint32_t	exec_start, exec_len;
@@ -2448,7 +2897,7 @@
 
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			int ret = i915_emit_box(dev, boxes, i,
+			int ret = i915_emit_box(dev, cliprects, i,
 						exec->DR1, exec->DR4);
 			if (ret)
 				return ret;
@@ -2504,6 +2953,75 @@
 	return ret;
 }
 
+static int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+			      uint32_t buffer_count,
+			      struct drm_i915_gem_relocation_entry **relocs)
+{
+	uint32_t reloc_count = 0, reloc_index = 0, i;
+	int ret;
+
+	*relocs = NULL;
+	for (i = 0; i < buffer_count; i++) {
+		if (reloc_count + exec_list[i].relocation_count < reloc_count)
+			return -EINVAL;
+		reloc_count += exec_list[i].relocation_count;
+	}
+
+	*relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+	if (*relocs == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < buffer_count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+		ret = copy_from_user(&(*relocs)[reloc_index],
+				     user_relocs,
+				     exec_list[i].relocation_count *
+				     sizeof(**relocs));
+		if (ret != 0) {
+			drm_free(*relocs, reloc_count * sizeof(**relocs),
+				 DRM_MEM_DRIVER);
+			*relocs = NULL;
+			return ret;
+		}
+
+		reloc_index += exec_list[i].relocation_count;
+	}
+
+	return ret;
+}
+
+static int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+			    uint32_t buffer_count,
+			    struct drm_i915_gem_relocation_entry *relocs)
+{
+	uint32_t reloc_count = 0, i;
+	int ret;
+
+	for (i = 0; i < buffer_count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+		if (ret == 0) {
+			ret = copy_to_user(user_relocs,
+					   &relocs[reloc_count],
+					   exec_list[i].relocation_count *
+					   sizeof(*relocs));
+		}
+
+		reloc_count += exec_list[i].relocation_count;
+	}
+
+	drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+
+	return ret;
+}
+
 int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@
 	struct drm_gem_object **object_list = NULL;
 	struct drm_gem_object *batch_obj;
 	struct drm_i915_gem_object *obj_priv;
-	int ret, i, pinned = 0;
+	struct drm_clip_rect *cliprects = NULL;
+	struct drm_i915_gem_relocation_entry *relocs;
+	int ret, ret2, i, pinned = 0;
 	uint64_t exec_offset;
-	uint32_t seqno, flush_domains;
+	uint32_t seqno, flush_domains, reloc_index;
 	int pin_tries;
 
 #if WATCH_EXEC
@@ -2551,6 +3071,28 @@
 		goto pre_mutex_err;
 	}
 
+	if (args->num_cliprects != 0) {
+		cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
+				       DRM_MEM_DRIVER);
+		if (cliprects == NULL)
+			goto pre_mutex_err;
+
+		ret = copy_from_user(cliprects,
+				     (struct drm_clip_rect __user *)
+				     (uintptr_t) args->cliprects_ptr,
+				     sizeof(*cliprects) * args->num_cliprects);
+		if (ret != 0) {
+			DRM_ERROR("copy %d cliprects failed: %d\n",
+				  args->num_cliprects, ret);
+			goto pre_mutex_err;
+		}
+	}
+
+	ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+					    &relocs);
+	if (ret != 0)
+		goto pre_mutex_err;
+
 	mutex_lock(&dev->struct_mutex);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@
 	/* Pin and relocate */
 	for (pin_tries = 0; ; pin_tries++) {
 		ret = 0;
+		reloc_index = 0;
+
 		for (i = 0; i < args->buffer_count; i++) {
 			object_list[i]->pending_read_domains = 0;
 			object_list[i]->pending_write_domain = 0;
 			ret = i915_gem_object_pin_and_relocate(object_list[i],
 							       file_priv,
-							       &exec_list[i]);
+							       &exec_list[i],
+							       &relocs[reloc_index]);
 			if (ret)
 				break;
 			pinned = i + 1;
+			reloc_index += exec_list[i].relocation_count;
 		}
 		/* success */
 		if (ret == 0)
@@ -2687,7 +3233,7 @@
 #endif
 
 	/* Exec the batchbuffer */
-	ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+	ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
 	if (ret) {
 		DRM_ERROR("dispatch failed %d\n", ret);
 		goto err;
@@ -2751,11 +3297,27 @@
 				  args->buffer_count, ret);
 	}
 
+	/* Copy the updated relocations out regardless of current error
+	 * state.  Failure to update the relocs would mean that the next
+	 * time userland calls execbuf, it would do so with presumed offset
+	 * state that didn't match the actual object state.
+	 */
+	ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+					   relocs);
+	if (ret2 != 0) {
+		DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+		if (ret == 0)
+			ret = ret2;
+	}
+
 pre_mutex_err:
 	drm_free(object_list, sizeof(*object_list) * args->buffer_count,
 		 DRM_MEM_DRIVER);
 	drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
 		 DRM_MEM_DRIVER);
+	drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
+		 DRM_MEM_DRIVER);
 
 	return ret;
 }
@@ -3192,7 +3754,7 @@
 
 	dev_priv->status_gfx_addr = obj_priv->gtt_offset;
 
-	dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+	dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
 	if (dev_priv->hw_status_page == NULL) {
 		DRM_ERROR("Failed to map status page.\n");
 		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@
 	obj = dev_priv->hws_obj;
 	obj_priv = obj->driver_private;
 
-	kunmap(obj_priv->page_list[0]);
+	kunmap(obj_priv->pages[0]);
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(obj);
 	dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@
 	if (!obj_priv->phys_obj)
 		return;
 
-	ret = i915_gem_object_get_page_list(obj);
+	ret = i915_gem_object_get_pages(obj);
 	if (ret)
 		goto out;
 
 	page_count = obj->size / PAGE_SIZE;
 
 	for (i = 0; i < page_count; i++) {
-		char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+		char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
 		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
 		memcpy(dst, src, PAGE_SIZE);
 		kunmap_atomic(dst, KM_USER0);
 	}
-	drm_clflush_pages(obj_priv->page_list, page_count);
+	drm_clflush_pages(obj_priv->pages, page_count);
 	drm_agp_chipset_flush(dev);
 out:
 	obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@
 	obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
 	obj_priv->phys_obj->cur_obj = obj;
 
-	ret = i915_gem_object_get_page_list(obj);
+	ret = i915_gem_object_get_pages(obj);
 	if (ret) {
 		DRM_ERROR("failed to get page list\n");
 		goto out;
@@ -3590,7 +4152,7 @@
 	page_count = obj->size / PAGE_SIZE;
 
 	for (i = 0; i < page_count; i++) {
-		char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+		char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
 		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
 		memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 0000000..455ec97
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define DRM_I915_RING_DEBUG 1
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define ACTIVE_LIST	1
+#define FLUSHING_LIST	2
+#define INACTIVE_LIST	3
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+{
+	if (obj_priv->user_pin_count > 0)
+		return "P";
+	else if (obj_priv->pin_count > 0)
+		return "p";
+	else
+		return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+{
+    switch (obj_priv->tiling_mode) {
+    default:
+    case I915_TILING_NONE: return " ";
+    case I915_TILING_X: return "X";
+    case I915_TILING_Y: return "Y";
+    }
+}
+
+static int i915_gem_object_list_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
+	struct list_head *head;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+
+	switch (list) {
+	case ACTIVE_LIST:
+		seq_printf(m, "Active:\n");
+		head = &dev_priv->mm.active_list;
+		break;
+	case INACTIVE_LIST:
+		seq_printf(m, "Inctive:\n");
+		head = &dev_priv->mm.inactive_list;
+		break;
+	case FLUSHING_LIST:
+		seq_printf(m, "Flushing:\n");
+		head = &dev_priv->mm.flushing_list;
+		break;
+	default:
+		DRM_INFO("Ooops, unexpected list\n");
+		return 0;
+	}
+
+	list_for_each_entry(obj_priv, head, list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+
+		seq_printf(m, "    %p: %s %08x %08x %d",
+			   obj,
+			   get_pin_flag(obj_priv),
+			   obj->read_domains, obj->write_domain,
+			   obj_priv->last_rendering_seqno);
+
+		if (obj->name)
+			seq_printf(m, " (name: %d)", obj->name);
+		if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+			seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
+		seq_printf(m, "\n");
+	}
+	return 0;
+}
+
+static int i915_gem_request_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *gem_request;
+
+	seq_printf(m, "Request:\n");
+	list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+		seq_printf(m, "    %d @ %d\n",
+			   gem_request->seqno,
+			   (int) (jiffies - gem_request->emitted_jiffies));
+	}
+	return 0;
+}
+
+static int i915_gem_seqno_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->hw_status_page != NULL) {
+		seq_printf(m, "Current sequence: %d\n",
+			   i915_get_gem_seqno(dev));
+	} else {
+		seq_printf(m, "Current sequence: hws uninitialized\n");
+	}
+	seq_printf(m, "Waiter sequence:  %d\n",
+			dev_priv->mm.waiting_gem_seqno);
+	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+	return 0;
+}
+
+
+static int i915_interrupt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	seq_printf(m, "Interrupt enable:    %08x\n",
+		   I915_READ(IER));
+	seq_printf(m, "Interrupt identity:  %08x\n",
+		   I915_READ(IIR));
+	seq_printf(m, "Interrupt mask:      %08x\n",
+		   I915_READ(IMR));
+	seq_printf(m, "Pipe A stat:         %08x\n",
+		   I915_READ(PIPEASTAT));
+	seq_printf(m, "Pipe B stat:         %08x\n",
+		   I915_READ(PIPEBSTAT));
+	seq_printf(m, "Interrupts received: %d\n",
+		   atomic_read(&dev_priv->irq_received));
+	if (dev_priv->hw_status_page != NULL) {
+		seq_printf(m, "Current sequence:    %d\n",
+			   i915_get_gem_seqno(dev));
+	} else {
+		seq_printf(m, "Current sequence:    hws uninitialized\n");
+	}
+	seq_printf(m, "Waiter sequence:     %d\n",
+		   dev_priv->mm.waiting_gem_seqno);
+	seq_printf(m, "IRQ sequence:        %d\n",
+		   dev_priv->mm.irq_gem_seqno);
+	return 0;
+}
+
+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+		if (obj == NULL) {
+			seq_printf(m, "Fenced object[%2d] = unused\n", i);
+		} else {
+			struct drm_i915_gem_object *obj_priv;
+
+			obj_priv = obj->driver_private;
+			seq_printf(m, "Fenced object[%2d] = %p: %s "
+				   "%08x %08zx %08x %s %08x %08x %d",
+				   i, obj, get_pin_flag(obj_priv),
+				   obj_priv->gtt_offset,
+				   obj->size, obj_priv->stride,
+				   get_tiling_flag(obj_priv),
+				   obj->read_domains, obj->write_domain,
+				   obj_priv->last_rendering_seqno);
+			if (obj->name)
+				seq_printf(m, " (name: %d)", obj->name);
+			seq_printf(m, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int i915_hws_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+	volatile u32 *hws;
+
+	hws = (volatile u32 *)dev_priv->hw_status_page;
+	if (hws == NULL)
+		return 0;
+
+	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			   i * 4,
+			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+	}
+	return 0;
+}
+
+static struct drm_info_list i915_gem_debugfs_list[] = {
+	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+	{"i915_gem_request", i915_gem_request_info, 0},
+	{"i915_gem_seqno", i915_gem_seqno_info, 0},
+	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+	{"i915_gem_interrupt", i915_interrupt_info, 0},
+	{"i915_gem_hws", i915_hws_info, 0},
+};
+#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
+
+int i915_gem_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(i915_gem_debugfs_list,
+					I915_GEM_DEBUGFS_ENTRIES,
+					minor->debugfs_root, minor);
+}
+
+void i915_gem_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(i915_gem_debugfs_list,
+				 I915_GEM_DEBUGFS_ENTRIES, minor);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de..0000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@anholt.net>
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-static int i915_gem_active_info(char *buf, char **start, off_t offset,
-				int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("Active:\n");
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-			    list)
-	{
-		struct drm_gem_object *obj = obj_priv->obj;
-		if (obj->name) {
-			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-				       obj, obj->name,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		} else {
-			DRM_PROC_PRINT("       %p: %08x %08x %d\n",
-				       obj,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		}
-	}
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
-				  int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("Flushing:\n");
-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-			    list)
-	{
-		struct drm_gem_object *obj = obj_priv->obj;
-		if (obj->name) {
-			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-				       obj, obj->name,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		} else {
-			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		}
-	}
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
-				  int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("Inactive:\n");
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
-			    list)
-	{
-		struct drm_gem_object *obj = obj_priv->obj;
-		if (obj->name) {
-			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
-				       obj, obj->name,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		} else {
-			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
-				       obj->read_domains, obj->write_domain,
-				       obj_priv->last_rendering_seqno);
-		}
-	}
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int i915_gem_request_info(char *buf, char **start, off_t offset,
-				 int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_request *gem_request;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->mm.request_list,
-			    list)
-	{
-		DRM_PROC_PRINT("    %d @ %d\n",
-			       gem_request->seqno,
-			       (int) (jiffies - gem_request->emitted_jiffies));
-	}
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
-			       int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	if (dev_priv->hw_status_page != NULL) {
-		DRM_PROC_PRINT("Current sequence: %d\n",
-			       i915_get_gem_seqno(dev));
-	} else {
-		DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
-	}
-	DRM_PROC_PRINT("Waiter sequence:  %d\n",
-		       dev_priv->mm.waiting_gem_seqno);
-	DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-
-static int i915_interrupt_info(char *buf, char **start, off_t offset,
-			       int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int len = 0;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	DRM_PROC_PRINT("Interrupt enable:    %08x\n",
-		       I915_READ(IER));
-	DRM_PROC_PRINT("Interrupt identity:  %08x\n",
-		       I915_READ(IIR));
-	DRM_PROC_PRINT("Interrupt mask:      %08x\n",
-		       I915_READ(IMR));
-	DRM_PROC_PRINT("Pipe A stat:         %08x\n",
-		       I915_READ(PIPEASTAT));
-	DRM_PROC_PRINT("Pipe B stat:         %08x\n",
-		       I915_READ(PIPEBSTAT));
-	DRM_PROC_PRINT("Interrupts received: %d\n",
-		       atomic_read(&dev_priv->irq_received));
-	if (dev_priv->hw_status_page != NULL) {
-		DRM_PROC_PRINT("Current sequence:    %d\n",
-			       i915_get_gem_seqno(dev));
-	} else {
-		DRM_PROC_PRINT("Current sequence:    hws uninitialized\n");
-	}
-	DRM_PROC_PRINT("Waiter sequence:     %d\n",
-		       dev_priv->mm.waiting_gem_seqno);
-	DRM_PROC_PRINT("IRQ sequence:        %d\n",
-		       dev_priv->mm.irq_gem_seqno);
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static int i915_hws_info(char *buf, char **start, off_t offset,
-			 int request, int *eof, void *data)
-{
-	struct drm_minor *minor = (struct drm_minor *) data;
-	struct drm_device *dev = minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int len = 0, i;
-	volatile u32 *hws;
-
-	if (offset > DRM_PROC_LIMIT) {
-		*eof = 1;
-		return 0;
-	}
-
-	hws = (volatile u32 *)dev_priv->hw_status_page;
-	if (hws == NULL) {
-		*eof = 1;
-		return 0;
-	}
-
-	*start = &buf[offset];
-	*eof = 0;
-	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
-		DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			       i * 4,
-			       hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
-	}
-	if (len > request + offset)
-		return request;
-	*eof = 1;
-	return len - offset;
-}
-
-static struct drm_proc_list {
-	/** file name */
-	const char *name;
-	/** proc callback*/
-	int (*f) (char *, char **, off_t, int, int *, void *);
-} i915_gem_proc_list[] = {
-	{"i915_gem_active", i915_gem_active_info},
-	{"i915_gem_flushing", i915_gem_flushing_info},
-	{"i915_gem_inactive", i915_gem_inactive_info},
-	{"i915_gem_request", i915_gem_request_info},
-	{"i915_gem_seqno", i915_gem_seqno_info},
-	{"i915_gem_interrupt", i915_interrupt_info},
-	{"i915_gem_hws", i915_hws_info},
-};
-
-#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-
-int i915_gem_proc_init(struct drm_minor *minor)
-{
-	struct proc_dir_entry *ent;
-	int i, j;
-
-	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
-		ent = create_proc_entry(i915_gem_proc_list[i].name,
-					S_IFREG | S_IRUGO, minor->dev_root);
-		if (!ent) {
-			DRM_ERROR("Cannot create /proc/dri/.../%s\n",
-				  i915_gem_proc_list[i].name);
-			for (j = 0; j < i; j++)
-				remove_proc_entry(i915_gem_proc_list[i].name,
-						  minor->dev_root);
-			return -1;
-		}
-		ent->read_proc = i915_gem_proc_list[i].f;
-		ent->data = minor;
-	}
-	return 0;
-}
-
-void i915_gem_proc_cleanup(struct drm_minor *minor)
-{
-	int i;
-
-	if (!minor->dev_root)
-		return;
-
-	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
-		remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191..4cce1ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
-		   IS_GM45(dev)) {
+	} else if (IS_MOBILE(dev)) {
 		uint32_t dcc;
 
-		/* On 915-945 and GM965, channel interleave by the CPU is
-		 * determined by DCC.  The CPU will alternate based on bit 6
-		 * in interleaved mode, and the GPU will then also alternate
-		 * on bit 6, 9, and 10 for X, but the CPU may also optionally
-		 * alternate based on bit 17 (XOR not disabled and XOR
-		 * bit == 17).
+		/* On mobile 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
 		 */
 		dcc = I915_READ(DCC);
 		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@
 			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
 			break;
 		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-			if (IS_I915G(dev) || IS_I915GM(dev) ||
-			    dcc & DCC_CHANNEL_XOR_DISABLE) {
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
 				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
-				   (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-				/* GM965/GM45 does either bit 11 or bit 17
-				 * swizzling.
-				 */
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
 				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
 				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
 			} else {
-				/* Bit 17 or perhaps other swizzling */
+				/* Bit 17 swizzling by the CPU in addition. */
 				swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 				swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 			}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d8..377cc58 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
 #define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
 #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD	0x00ff8000 /* IGD */
 
 #define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define I915_CRC_ERROR_ENABLE			(1UL<<29)
@@ -435,6 +436,7 @@
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
 #define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
 /* i830, required in DVO non-gang */
 #define   PLL_P2_DIVIDE_BY_4		(1 << 23)
 #define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
@@ -501,10 +503,12 @@
 #define FPB0	0x06048
 #define FPB1	0x0604c
 #define   FP_N_DIV_MASK		0x003f0000
+#define   FP_N_IGD_DIV_MASK	0x00ff0000
 #define   FP_N_DIV_SHIFT		16
 #define   FP_M1_DIV_MASK	0x00003f00
 #define   FP_M1_DIV_SHIFT		 8
 #define   FP_M2_DIV_MASK	0x0000003f
+#define   FP_M2_IGD_DIV_MASK	0x000000ff
 #define   FP_M2_DIV_SHIFT		 0
 #define DPLL_TEST	0x606c
 #define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
@@ -629,6 +633,22 @@
 #define   TV_HOTPLUG_INT_EN			(1 << 18)
 #define   CRT_HOTPLUG_INT_EN			(1 << 9)
 #define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
+#define CRT_HOTPLUG_MASK			(0x3fc) /* Bits 9-2 */
+
 
 #define PORT_HOTPLUG_STAT	0x61114
 #define   HDMIB_HOTPLUG_INT_STATUS		(1 << 29)
@@ -856,7 +876,7 @@
  */
 # define TV_ENC_C0_FIX			(1 << 10)
 /** Bits that must be preserved by software */
-# define TV_CTL_SAVE			((3 << 8) | (3 << 6))
+# define TV_CTL_SAVE			((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
 # define TV_FUSE_STATE_MASK		(3 << 4)
 /** Read-only state that reports all features enabled */
 # define TV_FUSE_STATE_ENABLED		(0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715a..de621aa 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@
 	u8 panel_type;
 	u8 rsvd1;
 	/* LVDS capabilities, stored in a dword */
-	u8 rsvd2:1;
-	u8 lvds_edid:1;
-	u8 pixel_dither:1;
-	u8 pfit_ratio_auto:1;
-	u8 pfit_gfx_mode_enhanced:1;
-	u8 pfit_text_mode_enhanced:1;
 	u8 pfit_mode:2;
+	u8 pfit_text_mode_enhanced:1;
+	u8 pfit_gfx_mode_enhanced:1;
+	u8 pfit_ratio_auto:1;
+	u8 pixel_dither:1;
+	u8 lvds_edid:1;
+	u8 rsvd2:1;
 	u8 rsvd4;
 } __attribute__((packed));
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed34..2b6d443 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@
 static int intel_crt_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
+	struct drm_device *dev = connector->dev;
+
+	int max_clock = 0;
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
-	if (mode->clock > 400000 || mode->clock < 25000)
-		return MODE_CLOCK_RANGE;
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+
+	if (!IS_I9XX(dev))
+		max_clock = 350000;
+	else
+		max_clock = 400000;
+	if (mode->clock > max_clock)
+		return MODE_CLOCK_HIGH;
 
 	return MODE_OK;
 }
@@ -113,10 +123,13 @@
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
-	if (intel_crtc->pipe == 0)
+	if (intel_crtc->pipe == 0) {
 		adpa |= ADPA_PIPE_A_SELECT;
-	else
+		I915_WRITE(BCLRPAT_A, 0);
+	} else {
 		adpa |= ADPA_PIPE_B_SELECT;
+		I915_WRITE(BCLRPAT_B, 0);
+	}
 
 	I915_WRITE(ADPA, adpa);
 }
@@ -133,20 +146,39 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 temp;
+	u32 hotplug_en;
+	int i, tries = 0;
+	/*
+	 * On 4 series desktop, CRT detect sequence need to be done twice
+	 * to get a reliable result.
+	 */
 
-	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+	if (IS_G4X(dev) && !IS_GM45(dev))
+		tries = 2;
+	else
+		tries = 1;
+	hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+	hotplug_en &= ~(CRT_HOTPLUG_MASK);
+	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
 
-	temp = I915_READ(PORT_HOTPLUG_EN);
+	if (IS_GM45(dev))
+		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
 
-	I915_WRITE(PORT_HOTPLUG_EN,
-		   temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
-	do {
-		if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
-			break;
-		msleep(1);
-	} while (time_after(timeout, jiffies));
+	for (i = 0; i < tries ; i++) {
+		unsigned long timeout;
+		/* turn on the FORCE_DETECT */
+		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+		timeout = jiffies + msecs_to_jiffies(1000);
+		/* wait for FORCE_DETECT to go off */
+		do {
+			if (!(I915_READ(PORT_HOTPLUG_EN) &
+					CRT_HOTPLUG_FORCE_DETECT))
+				break;
+			msleep(1);
+		} while (time_after(timeout, jiffies));
+	}
 
 	if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
 	    CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a283427..d9c50ff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@
 } intel_p2_t;
 
 #define INTEL_P2_NUM		      2
-
-typedef struct {
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
     intel_range_t   dot, vco, n, m, m1, m2, p, p1;
     intel_p2_t	    p2;
-} intel_limit_t;
+    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+		      int, int, intel_clock_t *);
+};
 
 #define I8XX_DOT_MIN		  25000
 #define I8XX_DOT_MAX		 350000
@@ -90,18 +92,32 @@
 #define I9XX_DOT_MAX		 400000
 #define I9XX_VCO_MIN		1400000
 #define I9XX_VCO_MAX		2800000
+#define IGD_VCO_MIN		1700000
+#define IGD_VCO_MAX		3500000
 #define I9XX_N_MIN		      1
 #define I9XX_N_MAX		      6
+/* IGD's Ncounter is a ring counter */
+#define IGD_N_MIN		      3
+#define IGD_N_MAX		      6
 #define I9XX_M_MIN		     70
 #define I9XX_M_MAX		    120
+#define IGD_M_MIN		      2
+#define IGD_M_MAX		    256
 #define I9XX_M1_MIN		     10
 #define I9XX_M1_MAX		     22
 #define I9XX_M2_MIN		      5
 #define I9XX_M2_MAX		      9
+/* IGD M1 is reserved, and must be 0 */
+#define IGD_M1_MIN		      0
+#define IGD_M1_MAX		      0
+#define IGD_M2_MIN		      0
+#define IGD_M2_MAX		      254
 #define I9XX_P_SDVO_DAC_MIN	      5
 #define I9XX_P_SDVO_DAC_MAX	     80
 #define I9XX_P_LVDS_MIN		      7
 #define I9XX_P_LVDS_MAX		     98
+#define IGD_P_LVDS_MIN		      7
+#define IGD_P_LVDS_MAX		     112
 #define I9XX_P1_MIN		      1
 #define I9XX_P1_MAX		      8
 #define I9XX_P2_SDVO_DAC_SLOW		     10
@@ -115,6 +131,97 @@
 #define INTEL_LIMIT_I8XX_LVDS	    1
 #define INTEL_LIMIT_I9XX_SDVO_DAC   2
 #define INTEL_LIMIT_I9XX_LVDS	    3
+#define INTEL_LIMIT_G4X_SDVO	    4
+#define INTEL_LIMIT_G4X_HDMI_DAC   5
+#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS   6
+#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS   7
+#define INTEL_LIMIT_IGD_SDVO_DAC    8
+#define INTEL_LIMIT_IGD_LVDS	    9
+
+/*The parameter is for SDVO on G4x platform*/
+#define G4X_DOT_SDVO_MIN           25000
+#define G4X_DOT_SDVO_MAX           270000
+#define G4X_VCO_MIN                1750000
+#define G4X_VCO_MAX                3500000
+#define G4X_N_SDVO_MIN             1
+#define G4X_N_SDVO_MAX             4
+#define G4X_M_SDVO_MIN             104
+#define G4X_M_SDVO_MAX             138
+#define G4X_M1_SDVO_MIN            17
+#define G4X_M1_SDVO_MAX            23
+#define G4X_M2_SDVO_MIN            5
+#define G4X_M2_SDVO_MAX            11
+#define G4X_P_SDVO_MIN             10
+#define G4X_P_SDVO_MAX             30
+#define G4X_P1_SDVO_MIN            1
+#define G4X_P1_SDVO_MAX            3
+#define G4X_P2_SDVO_SLOW           10
+#define G4X_P2_SDVO_FAST           10
+#define G4X_P2_SDVO_LIMIT          270000
+
+/*The parameter is for HDMI_DAC on G4x platform*/
+#define G4X_DOT_HDMI_DAC_MIN           22000
+#define G4X_DOT_HDMI_DAC_MAX           400000
+#define G4X_N_HDMI_DAC_MIN             1
+#define G4X_N_HDMI_DAC_MAX             4
+#define G4X_M_HDMI_DAC_MIN             104
+#define G4X_M_HDMI_DAC_MAX             138
+#define G4X_M1_HDMI_DAC_MIN            16
+#define G4X_M1_HDMI_DAC_MAX            23
+#define G4X_M2_HDMI_DAC_MIN            5
+#define G4X_M2_HDMI_DAC_MAX            11
+#define G4X_P_HDMI_DAC_MIN             5
+#define G4X_P_HDMI_DAC_MAX             80
+#define G4X_P1_HDMI_DAC_MIN            1
+#define G4X_P1_HDMI_DAC_MAX            8
+#define G4X_P2_HDMI_DAC_SLOW           10
+#define G4X_P2_HDMI_DAC_FAST           5
+#define G4X_P2_HDMI_DAC_LIMIT          165000
+
+/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN           20000
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX           115000
+#define G4X_N_SINGLE_CHANNEL_LVDS_MIN             1
+#define G4X_N_SINGLE_CHANNEL_LVDS_MAX             3
+#define G4X_M_SINGLE_CHANNEL_LVDS_MIN             104
+#define G4X_M_SINGLE_CHANNEL_LVDS_MAX             138
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN            17
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX            23
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN            5
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX            11
+#define G4X_P_SINGLE_CHANNEL_LVDS_MIN             28
+#define G4X_P_SINGLE_CHANNEL_LVDS_MAX             112
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN            2
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX            8
+#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW           14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST           14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT          0
+
+/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN           80000
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX           224000
+#define G4X_N_DUAL_CHANNEL_LVDS_MIN             1
+#define G4X_N_DUAL_CHANNEL_LVDS_MAX             3
+#define G4X_M_DUAL_CHANNEL_LVDS_MIN             104
+#define G4X_M_DUAL_CHANNEL_LVDS_MAX             138
+#define G4X_M1_DUAL_CHANNEL_LVDS_MIN            17
+#define G4X_M1_DUAL_CHANNEL_LVDS_MAX            23
+#define G4X_M2_DUAL_CHANNEL_LVDS_MIN            5
+#define G4X_M2_DUAL_CHANNEL_LVDS_MAX            11
+#define G4X_P_DUAL_CHANNEL_LVDS_MIN             14
+#define G4X_P_DUAL_CHANNEL_LVDS_MAX             42
+#define G4X_P1_DUAL_CHANNEL_LVDS_MIN            2
+#define G4X_P1_DUAL_CHANNEL_LVDS_MAX            6
+#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW           7
+#define G4X_P2_DUAL_CHANNEL_LVDS_FAST           7
+#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT          0
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+		    int target, int refclk, intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *best_clock);
 
 static const intel_limit_t intel_limits[] = {
     { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@
         .p1  = { .min = I8XX_P1_MIN,		.max = I8XX_P1_MAX },
 	.p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
 		 .p2_slow = I8XX_P2_SLOW,	.p2_fast = I8XX_P2_FAST },
+	.find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I8XX_LVDS */
         .dot = { .min = I8XX_DOT_MIN,		.max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@
         .p1  = { .min = I8XX_P1_LVDS_MIN,	.max = I8XX_P1_LVDS_MAX },
 	.p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
 		 .p2_slow = I8XX_P2_LVDS_SLOW,	.p2_fast = I8XX_P2_LVDS_FAST },
+	.find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I9XX_SDVO_DAC */
         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@
         .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
 	.p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
 		 .p2_slow = I9XX_P2_SDVO_DAC_SLOW,	.p2_fast = I9XX_P2_SDVO_DAC_FAST },
+	.find_pll = intel_find_best_PLL,
     },
     { /* INTEL_LIMIT_I9XX_LVDS */
         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@
 	 */
 	.p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
 		 .p2_slow = I9XX_P2_LVDS_SLOW,	.p2_fast = I9XX_P2_LVDS_FAST },
+	.find_pll = intel_find_best_PLL,
     },
+    /* below parameter and function is for G4X Chipset Family*/
+    { /* INTEL_LIMIT_G4X_SDVO */
+	.dot = { .min = G4X_DOT_SDVO_MIN,	.max = G4X_DOT_SDVO_MAX },
+	.vco = { .min = G4X_VCO_MIN,	        .max = G4X_VCO_MAX},
+	.n   = { .min = G4X_N_SDVO_MIN,	        .max = G4X_N_SDVO_MAX },
+	.m   = { .min = G4X_M_SDVO_MIN,         .max = G4X_M_SDVO_MAX },
+	.m1  = { .min = G4X_M1_SDVO_MIN,	.max = G4X_M1_SDVO_MAX },
+	.m2  = { .min = G4X_M2_SDVO_MIN,	.max = G4X_M2_SDVO_MAX },
+	.p   = { .min = G4X_P_SDVO_MIN,         .max = G4X_P_SDVO_MAX },
+	.p1  = { .min = G4X_P1_SDVO_MIN,	.max = G4X_P1_SDVO_MAX},
+	.p2  = { .dot_limit = G4X_P2_SDVO_LIMIT,
+		 .p2_slow = G4X_P2_SDVO_SLOW,
+		 .p2_fast = G4X_P2_SDVO_FAST
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_HDMI_DAC */
+	.dot = { .min = G4X_DOT_HDMI_DAC_MIN,	.max = G4X_DOT_HDMI_DAC_MAX },
+	.vco = { .min = G4X_VCO_MIN,	        .max = G4X_VCO_MAX},
+	.n   = { .min = G4X_N_HDMI_DAC_MIN,	.max = G4X_N_HDMI_DAC_MAX },
+	.m   = { .min = G4X_M_HDMI_DAC_MIN,	.max = G4X_M_HDMI_DAC_MAX },
+	.m1  = { .min = G4X_M1_HDMI_DAC_MIN,	.max = G4X_M1_HDMI_DAC_MAX },
+	.m2  = { .min = G4X_M2_HDMI_DAC_MIN,	.max = G4X_M2_HDMI_DAC_MAX },
+	.p   = { .min = G4X_P_HDMI_DAC_MIN,	.max = G4X_P_HDMI_DAC_MAX },
+	.p1  = { .min = G4X_P1_HDMI_DAC_MIN,	.max = G4X_P1_HDMI_DAC_MAX},
+	.p2  = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
+		 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
+		 .p2_fast = G4X_P2_HDMI_DAC_FAST
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+	.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
+	.vco = { .min = G4X_VCO_MIN,
+		 .max = G4X_VCO_MAX },
+	.n   = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
+	.m   = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
+	.m1  = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
+	.m2  = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
+	.p   = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
+	.p1  = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
+		 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
+	.p2  = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
+		 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
+		 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+	.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
+	.vco = { .min = G4X_VCO_MIN,
+		 .max = G4X_VCO_MAX },
+	.n   = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
+	.m   = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
+	.m1  = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
+	.m2  = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
+	.p   = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
+	.p1  = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
+		 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
+	.p2  = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
+		 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
+		 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+    },
+    { /* INTEL_LIMIT_IGD_SDVO */
+        .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX},
+        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
+        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
+        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
+        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
+        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
+        .p   = { .min = I9XX_P_SDVO_DAC_MIN,    .max = I9XX_P_SDVO_DAC_MAX },
+        .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
+	.p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+		 .p2_slow = I9XX_P2_SDVO_DAC_SLOW,	.p2_fast = I9XX_P2_SDVO_DAC_FAST },
+    },
+    { /* INTEL_LIMIT_IGD_LVDS */
+        .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX },
+        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
+        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
+        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
+        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
+        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
+        .p   = { .min = IGD_P_LVDS_MIN,	.max = IGD_P_LVDS_MAX },
+        .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
+	/* IGD only supports single-channel mode. */
+	.p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+		 .p2_slow = I9XX_P2_LVDS_SLOW,	.p2_fast = I9XX_P2_LVDS_SLOW },
+    },
+
 };
 
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const intel_limit_t *limit;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+		    LVDS_CLKB_POWER_UP)
+			/* LVDS with dual channel */
+			limit = &intel_limits
+					[INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+		else
+			/* LVDS with dual channel */
+			limit = &intel_limits
+					[INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+		limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+		limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+	} else /* The option is for other outputs */
+		limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+
+	return limit;
+}
+
 static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	const intel_limit_t *limit;
 
-	if (IS_I9XX(dev)) {
+	if (IS_G4X(dev)) {
+		limit = intel_g4x_limit(crtc);
+	} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
 		else
 			limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+	} else if (IS_IGD(dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+		else
+			limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
 	} else {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@
 	return limit;
 }
 
-static void intel_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in IGD, n is a ring counter */
+static void igd_clock(int refclk, intel_clock_t *clock)
 {
+	clock->m = clock->m2 + 2;
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = refclk * clock->m / clock->n;
+	clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+	if (IS_IGD(dev)) {
+		igd_clock(refclk, clock);
+		return;
+	}
 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
 	clock->p = clock->p1 * clock->p2;
 	clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@
 static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
 {
 	const intel_limit_t *limit = intel_limit (crtc);
+	struct drm_device *dev = crtc->dev;
 
 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 		INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@
 		INTELPllInvalid ("m2 out of range\n");
 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 		INTELPllInvalid ("m1 out of range\n");
-	if (clock->m1 <= clock->m2)
+	if (clock->m1 <= clock->m2 && !IS_IGD(dev))
 		INTELPllInvalid ("m1 <= m2\n");
 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
 		INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@
 	return true;
 }
 
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE.  The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
-				int refclk, intel_clock_t *best_clock)
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+		    int target, int refclk, intel_clock_t *best_clock)
+
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	intel_clock_t clock;
-	const intel_limit_t *limit = intel_limit(crtc);
 	int err = target;
 
 	if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@
 	memset (best_clock, 0, sizeof (*best_clock));
 
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-		for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
-			     clock.m2 <= limit->m2.max; clock.m2++) {
+		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+			/* m1 is always 0 in IGD */
+			if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+				break;
 			for (clock.n = limit->n.min; clock.n <= limit->n.max;
 			     clock.n++) {
 				for (clock.p1 = limit->p1.min;
 				     clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					intel_clock(refclk, &clock);
+					intel_clock(dev, refclk, &clock);
 
 					if (!intel_PLL_is_valid(crtc, &clock))
 						continue;
@@ -315,6 +575,63 @@
 	return (err != target);
 }
 
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	intel_clock_t clock;
+	int max_n;
+	bool found;
+	/* approximately equals target * 0.00488 */
+	int err_most = (target >> 8) + (target >> 10);
+	found = false;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+		    LVDS_CLKB_POWER_UP)
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	max_n = limit->n.max;
+	/* based on hardware requriment prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		/* based on hardware requirment prefere larger m1,m2, p1 */
+		for (clock.m1 = limit->m1.max;
+		     clock.m1 >= limit->m1.min; clock.m1--) {
+			for (clock.m2 = limit->m2.max;
+			     clock.m2 >= limit->m2.min; clock.m2--) {
+				for (clock.p1 = limit->p1.max;
+				     clock.p1 >= limit->p1.min; clock.p1--) {
+					int this_err;
+
+					intel_clock(dev, refclk, &clock);
+					if (!intel_PLL_is_valid(crtc, &clock))
+						continue;
+					this_err = abs(clock.dot - target) ;
+					if (this_err < err_most) {
+						*best_clock = clock;
+						err_most = this_err;
+						max_n = clock.n;
+						found = true;
+					}
+				}
+			}
+		}
+	}
+
+	return found;
+}
+
 void
 intel_wait_for_vblank(struct drm_device *dev)
 {
@@ -634,7 +951,7 @@
 		return 400000;
 	else if (IS_I915G(dev))
 		return 333000;
-	else if (IS_I945GM(dev) || IS_845G(dev))
+	else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
 		return 200000;
 	else if (IS_I915GM(dev)) {
 		u16 gcfgc = 0;
@@ -733,6 +1050,7 @@
 	bool is_crt = false, is_lvds = false, is_tv = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
+	const intel_limit_t *limit;
 	int ret;
 
 	drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@
 		refclk = 48000;
 	}
 
-	ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc);
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 	if (!ok) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
-	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+	if (IS_IGD(dev))
+		fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+	else
+		fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 
 	dpll = DPLL_VGA_MODE_DIS;
 	if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@
 		}
 
 		/* compute bitmask from p1 value */
-		dpll |= (1 << (clock.p1 - 1)) << 16;
+		if (IS_IGD(dev))
+			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+		else
+			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 		switch (clock.p2) {
 		case 5:
 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@
 		fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
-	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+	if (IS_IGD(dev)) {
+		clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+		clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	} else {
+		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	}
+
 	if (IS_I9XX(dev)) {
-		clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+		if (IS_IGD(dev))
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
+				DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+		else
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
 
 		switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@
 		}
 
 		/* XXX: Handle the 100Mhz refclk */
-		intel_clock(96000, &clock);
+		intel_clock(dev, 96000, &clock);
 	} else {
 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
@@ -1313,9 +1653,9 @@
 			if ((dpll & PLL_REF_INPUT_MASK) ==
 			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
 				/* XXX: might not be 66MHz */
-				intel_clock(66000, &clock);
+				intel_clock(dev, 66000, &clock);
 			} else
-				intel_clock(48000, &clock);
+				intel_clock(dev, 48000, &clock);
 		} else {
 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
 				clock.p1 = 2;
@@ -1328,7 +1668,7 @@
 			else
 				clock.p2 = 2;
 
-			intel_clock(48000, &clock);
+			intel_clock(dev, 48000, &clock);
 		}
 	}
 
@@ -1474,13 +1814,21 @@
 
 	if (IS_I9XX(dev)) {
 		int found;
+		u32 reg;
 
 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
 			found = intel_sdvo_init(dev, SDVOB);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
 				intel_hdmi_init(dev, SDVOB);
 		}
-		if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
+
+		/* Before G4X SDVOC doesn't have its own detect register */
+		if (IS_G4X(dev))
+			reg = SDVOC;
+		else
+			reg = SDVOB;
+
+		if (I915_READ(reg) & SDVO_DETECTED) {
 			found = intel_sdvo_init(dev, SDVOC);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
 				intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af..6619f26 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@
 		pfit_control = 0;
 
 	if (!IS_I965G(dev)) {
-		if (dev_priv->panel_wants_dither)
+		if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
 			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 	}
 	else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d6..ceca947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@
  */
 static const struct color_conversion ntsc_m_csc_composite = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@
 };
 
 static const struct color_conversion ntsc_m_csc_svideo = {
-	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@
 
 static const struct color_conversion ntsc_j_csc_composite = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
-	.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
-	.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+	.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+	.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@
 
 static const struct color_conversion ntsc_j_csc_svideo = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
-	.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
-	.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+	.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+	.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
 };
 
 static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@
 
 static const struct color_conversion pal_csc_composite = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
-	.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
-	.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+	.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+	.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
 };
 
 static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@
 
 static const struct color_conversion pal_csc_svideo = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
-	.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
-	.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+	.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+	.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
 };
 
 static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@
 
 static const struct color_conversion pal_m_csc_composite = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@
 };
 
 static const struct color_conversion pal_m_csc_svideo = {
-	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@
 
 static const struct color_conversion pal_n_csc_composite = {
 	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
-	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
-	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
 };
 
 static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@
 };
 
 static const struct color_conversion pal_n_csc_svideo = {
-	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
-	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
-	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
 };
 
 static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@
  * Component connections
  */
 static const struct color_conversion sdtv_csc_yprpb = {
-	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
-	.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
-	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+	.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
 };
 
 static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@
 };
 
 static const struct color_conversion hdtv_csc_yprpb = {
-	.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
-	.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
-	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+	.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+	.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
 };
 
 static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@
 static const struct tv_mode tv_modes[] = {
 	{
 		.name		= "NTSC-M",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 29970,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
@@ -442,8 +442,8 @@
 		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
 
 		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
-		.dda1_inc	=    136,
-		.dda2_inc	=   7624,	    .dda2_size		=  20013,
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
 		.dda3_inc	=      0,	    .dda3_size		=      0,
 		.sc_reset	= TV_SC_RESET_EVERY_4,
 		.pal_burst	= false,
@@ -457,7 +457,7 @@
 	},
 	{
 		.name		= "NTSC-443",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 29970,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
@@ -485,10 +485,10 @@
 
 		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
 		.dda1_inc       =    168,
-		.dda2_inc       =  18557,       .dda2_size      =  20625,
-		.dda3_inc       =      0,       .dda3_size      =      0,
-		.sc_reset   = TV_SC_RESET_EVERY_8,
-		.pal_burst  = true,
+		.dda2_inc       =   4093,       .dda2_size      =  27456,
+		.dda3_inc       =    310,       .dda3_size      =    525,
+		.sc_reset   = TV_SC_RESET_NEVER,
+		.pal_burst  = false,
 
 		.composite_levels = &ntsc_m_levels_composite,
 		.composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@
 	},
 	{
 		.name		= "NTSC-J",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 29970,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
@@ -527,8 +527,8 @@
 		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
 
 		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
-		.dda1_inc	=    136,
-		.dda2_inc	=   7624,	    .dda2_size		=  20013,
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
 		.dda3_inc	=      0,	    .dda3_size		=      0,
 		.sc_reset	= TV_SC_RESET_EVERY_4,
 		.pal_burst	= false,
@@ -542,7 +542,7 @@
 	},
 	{
 		.name		= "PAL-M",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 29970,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
@@ -570,11 +570,11 @@
 		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
 
 		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
-		.dda1_inc	=    136,
-		.dda2_inc	=    7624,	    .dda2_size		=  20013,
+		.dda1_inc	=    135,
+		.dda2_inc	=  16704,	    .dda2_size		=  27456,
 		.dda3_inc	=      0,	    .dda3_size		=      0,
-		.sc_reset	= TV_SC_RESET_EVERY_4,
-		.pal_burst  = false,
+		.sc_reset	= TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
 
 		.composite_levels = &pal_m_levels_composite,
 		.composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@
 	{
 		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
 		.name	    = "PAL-N",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 25000,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
@@ -615,9 +615,9 @@
 
 
 		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
-		.dda1_inc       =    168,
-		.dda2_inc       =  18557,       .dda2_size      =  20625,
-		.dda3_inc       =      0,       .dda3_size      =      0,
+		.dda1_inc       =    135,
+		.dda2_inc       =  23578,       .dda2_size      =  27648,
+		.dda3_inc       =    134,       .dda3_size      =    625,
 		.sc_reset   = TV_SC_RESET_EVERY_8,
 		.pal_burst  = true,
 
@@ -631,12 +631,12 @@
 	{
 		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
 		.name	    = "PAL",
-		.clock		= 107520,
+		.clock		= 108000,
 		.refresh	= 25000,
 		.oversample	= TV_OVERSAMPLE_8X,
 		.component_only = 0,
 
-		.hsync_end	= 64,		    .hblank_end		= 128,
+		.hsync_end	= 64,		    .hblank_end		= 142,
 		.hblank_start	= 844,	    .htotal		= 863,
 
 		.progressive	= false,    .trilevel_sync = false,
@@ -659,8 +659,8 @@
 
 		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
 		.dda1_inc       =    168,
-		.dda2_inc       =  18557,       .dda2_size      =  20625,
-		.dda3_inc       =      0,       .dda3_size      =      0,
+		.dda2_inc       =   4122,       .dda2_size      =  27648,
+		.dda3_inc       =     67,       .dda3_size      =    625,
 		.sc_reset   = TV_SC_RESET_EVERY_8,
 		.pal_burst  = true,
 
@@ -689,7 +689,7 @@
 		.veq_ena        = false,
 
 		.vi_end_f1      = 44,               .vi_end_f2          = 44,
-		.nbr_end        = 496,
+		.nbr_end        = 479,
 
 		.burst_ena      = false,
 
@@ -713,7 +713,7 @@
 		.veq_ena        = false,
 
 		.vi_end_f1      = 44,               .vi_end_f2          = 44,
-		.nbr_end        = 496,
+		.nbr_end        = 479,
 
 		.burst_ena      = false,
 
@@ -876,7 +876,7 @@
 		.component_only = 1,
 
 		.hsync_end      = 88,               .hblank_end         = 235,
-		.hblank_start   = 2155,             .htotal             = 2200,
+		.hblank_start   = 2155,             .htotal             = 2201,
 
 		.progressive    = false, 	    .trilevel_sync = true,
 
@@ -1082,7 +1082,7 @@
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
 
 	/* Ensure TV refresh is close to desired refresh */
-	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
 		return MODE_OK;
 	return MODE_CLOCK_RANGE;
 }
@@ -1135,7 +1135,8 @@
 	if (!tv_mode)
 		return;	/* can't happen (mode_prepare prevents this) */
 
-	tv_ctl = 0;
+	tv_ctl = I915_READ(TV_CTL);
+	tv_ctl &= TV_CTL_SAVE;
 
 	switch (tv_priv->type) {
 	default:
@@ -1215,7 +1216,6 @@
 	/* dda1 implies valid video levels */
 	if (tv_mode->dda1_inc) {
 		scctl1 |= TV_SC_DDA1_EN;
-		scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
 	}
 
 	if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@
 		scctl1 |= TV_SC_DDA3_EN;
 
 	scctl1 |= tv_mode->sc_reset;
+	scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
 	scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
 
 	scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@
 			   color_conversion->av);
 	}
 
-	I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+	if (IS_I965G(dev))
+		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+	else
+		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
 	if (video_levels)
 		I915_WRITE(TV_CLR_LEVEL,
 			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@
 		tv_dac = I915_READ(TV_DAC);
 		I915_WRITE(TV_DAC, save_tv_dac);
 		I915_WRITE(TV_CTL, save_tv_ctl);
+		intel_wait_for_vblank(dev);
 	}
 	/*
 	 *  A B C
@@ -1451,7 +1457,7 @@
 	mode = reported_modes[0];
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
-	if (encoder->crtc) {
+	if (encoder->crtc && encoder->crtc->enabled) {
 		type = intel_tv_detect_type(encoder->crtc, intel_output);
 	} else {
 		crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@
 			type = -1;
 	}
 
+	tv_priv->type = type;
+
 	if (type < 0)
 		return connector_status_disconnected;
 
@@ -1495,7 +1503,8 @@
 	struct drm_display_mode *mode_ptr;
 	struct intel_output *intel_output = to_intel_output(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
-	int j;
+	int j, count = 0;
+	u64 tmp;
 
 	for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
 	     j++) {
@@ -1510,8 +1519,9 @@
 					&& !tv_mode->component_only))
 			continue;
 
-		mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
-				      DRM_MEM_DRIVER);
+		mode_ptr = drm_mode_create(connector->dev);
+		if (!mode_ptr)
+			continue;
 		strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
 
 		mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@
 			mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
 		mode_ptr->vtotal = vactive_s + 33;
 
-		mode_ptr->clock = (int) (tv_mode->refresh *
-					 mode_ptr->vtotal *
-					 mode_ptr->htotal / 1000) / 1000;
+		tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+		tmp *= mode_ptr->htotal;
+		tmp = div_u64(tmp, 1000000);
+		mode_ptr->clock = (int) tmp;
 
 		mode_ptr->type = DRM_MODE_TYPE_DRIVER;
 		drm_mode_probed_add(connector, mode_ptr);
+		count++;
 	}
 
-	return 0;
+	return count;
 }
 
 static void
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 31400c8..d696f69 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@
 	.host_reset =	host_reset,
 };
 
-const static struct hpsb_address_ops map_ops = {
+static const struct hpsb_address_ops map_ops = {
 	.read = read_maps,
 };
 
-const static struct hpsb_address_ops fcp_ops = {
+static const struct hpsb_address_ops fcp_ops = {
 	.write = write_fcp,
 };
 
-const static struct hpsb_address_ops reg_ops = {
+static const struct hpsb_address_ops reg_ops = {
 	.read = read_regs,
 	.write = write_regs,
 	.lock = lock_regs,
 	.lock64 = lock64_regs,
 };
 
-const static struct hpsb_address_ops config_rom_ops = {
+static const struct hpsb_address_ops config_rom_ops = {
 	.read = read_config_rom,
 };
 
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cb15bfa..823a629 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2171,7 +2171,7 @@
  * Export information about protocols/devices supported by this driver.
  */
 #ifdef MODULE
-static struct ieee1394_device_id dv1394_id_table[] = {
+static const struct ieee1394_device_id dv1394_id_table[] = {
 	{
 		.match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
 		.specifier_id	= AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 1a919df..4ca1035 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -181,7 +181,7 @@
 static void ether1394_host_reset(struct hpsb_host *host);
 
 /* Function for incoming 1394 packets */
-const static struct hpsb_address_ops addr_ops = {
+static const struct hpsb_address_ops addr_ops = {
 	.write =	ether1394_write,
 };
 
@@ -438,7 +438,7 @@
 	return eth1394_new_node(hi, ud);
 }
 
-static struct ieee1394_device_id eth1394_id_table[] = {
+static const struct ieee1394_device_id eth1394_id_table[] = {
 	{
 		.match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
 				IEEE1394_MATCH_VERSION),
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 600e391..4bc4435 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -478,7 +478,7 @@
 	return retval;
 }
 
-const static struct hpsb_address_ops dummy_ops;
+static const struct hpsb_address_ops dummy_ops;
 
 /* dummy address spaces as lower and upper bounds of the host's a.s. list */
 static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 53aada5..a6d55be 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -484,7 +484,7 @@
 static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
 {
 	struct hpsb_protocol_driver *driver;
-	struct ieee1394_device_id *id;
+	const struct ieee1394_device_id *id;
 	int length = 0;
 	char *scratch = buf;
 
@@ -658,7 +658,7 @@
 {
 	struct hpsb_protocol_driver *driver;
 	struct unit_directory *ud;
-	struct ieee1394_device_id *id;
+	const struct ieee1394_device_id *id;
 
 	/* We only match unit directories */
 	if (dev->platform_data != &nodemgr_ud_platform_data)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index ee5acdb..749b271 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -125,7 +125,7 @@
 	 * probe function below can implement further protocol
 	 * dependent or vendor dependent checking.
 	 */
-	struct ieee1394_device_id *id_table;
+	const struct ieee1394_device_id *id_table;
 
 	/*
 	 * The update function is called when the node has just
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bad66c6..da5f882 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@
 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
 		      u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
 		      u16 flags);
-const static struct hpsb_address_ops arm_ops = {
+static const struct hpsb_address_ops arm_ops = {
 	.read = arm_read,
 	.write = arm_write,
 	.lock = arm_lock,
@@ -369,6 +369,7 @@
 {
 	struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
 	struct raw1394_request __user *r;
+
 	r = compat_alloc_user_space(sizeof(struct raw1394_request));
 
 #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@@ -378,7 +379,8 @@
 	    C(tag) ||
 	    C(sendb) ||
 	    C(recvb))
-		return ERR_PTR(-EFAULT);
+		return (__force const char __user *)ERR_PTR(-EFAULT);
+
 	return (const char __user *)r;
 }
 #undef C
@@ -389,6 +391,7 @@
 raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
 {
 	struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
+
 	if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
 	    P(type) ||
 	    P(error) ||
@@ -400,6 +403,7 @@
 	    P(sendb) ||
 	    P(recvb))
 		return -EFAULT;
+
 	return sizeof(struct compat_raw1394_req);
 }
 #undef P
@@ -2249,8 +2253,8 @@
    	    sizeof(struct compat_raw1394_req) !=
 			sizeof(struct raw1394_request)) {
 		buffer = raw1394_compat_write(buffer);
-		if (IS_ERR(buffer))
-			return PTR_ERR(buffer);
+		if (IS_ERR((__force void *)buffer))
+			return PTR_ERR((__force void *)buffer);
 	} else
 #endif
 	if (count != sizeof(struct raw1394_request)) {
@@ -2978,7 +2982,7 @@
  * Export information about protocols/devices supported by this driver.
  */
 #ifdef MODULE
-static struct ieee1394_device_id raw1394_id_table[] = {
+static const struct ieee1394_device_id raw1394_id_table[] = {
 	{
 	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
 	 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f3fd865..a51ab23 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@
 	.host_reset	= sbp2_host_reset,
 };
 
-const static struct hpsb_address_ops sbp2_ops = {
+static const struct hpsb_address_ops sbp2_ops = {
 	.write		= sbp2_handle_status_write
 };
 
@@ -275,7 +275,7 @@
 static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
 				    size_t, u16);
 
-const static struct hpsb_address_ops sbp2_physdma_ops = {
+static const struct hpsb_address_ops sbp2_physdma_ops = {
 	.read		= sbp2_handle_physdma_read,
 	.write		= sbp2_handle_physdma_write,
 };
@@ -285,7 +285,7 @@
 /*
  * Interface to driver core and IEEE 1394 core
  */
-static struct ieee1394_device_id sbp2_id_table[] = {
+static const struct ieee1394_device_id sbp2_id_table[] = {
 	{
 	 .match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
 	 .specifier_id	= SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@@ -1413,8 +1413,7 @@
 			  "(firmware_revision 0x%06x, vendor_id 0x%06x,"
 			  " model_id 0x%06x)",
 			  NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
-			  workarounds, firmware_revision,
-			  ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+			  workarounds, firmware_revision, ud->vendor_id,
 			  model);
 
 	/* We would need one SCSI host template for each target to adjust
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 679a918..d287ba7 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1294,7 +1294,7 @@
  * Export information about protocols/devices supported by this driver.
  */
 #ifdef MODULE
-static struct ieee1394_device_id video1394_id_table[] = {
+static const struct ieee1394_device_id video1394_id_table[] = {
 	{
 		.match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
 		.specifier_id	= CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index b55d9cc..32526f1 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -115,7 +115,7 @@
 }
 
 static const char *debug_fcp_opcode(unsigned int opcode,
-				    const u8 *data, size_t length)
+				    const u8 *data, int length)
 {
 	switch (opcode) {
 	case AVC_OPCODE_VENDOR:			break;
@@ -135,13 +135,14 @@
 	case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL:	return "RegisterRC";
 	case SFE_VENDOR_OPCODE_LNB_CONTROL:		return "LNBControl";
 	case SFE_VENDOR_OPCODE_TUNE_QPSK:		return "TuneQPSK";
+	case SFE_VENDOR_OPCODE_TUNE_QPSK2:		return "TuneQPSK2";
 	case SFE_VENDOR_OPCODE_HOST2CA:			return "Host2CA";
 	case SFE_VENDOR_OPCODE_CA2HOST:			return "CA2Host";
 	}
 	return "Vendor";
 }
 
-static void debug_fcp(const u8 *data, size_t length)
+static void debug_fcp(const u8 *data, int length)
 {
 	unsigned int subunit_type, subunit_id, op;
 	const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@@ -266,7 +267,10 @@
 	c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
 	c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
 	c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
-	c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
+	if (fdtv->type == FIREDTV_DVB_S2)
+		c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
+	else
+		c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
 
 	c->operand[4] = (params->frequency >> 24) & 0xff;
 	c->operand[5] = (params->frequency >> 16) & 0xff;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 00d46e1..92285d0 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -81,13 +81,16 @@
 
 	/* go */
 	sb->s_flags |= MS_ACTIVE;
-	return simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+
+	return 0;
 
 	/* new mountpoint for an already mounted superblock */
 already_mounted:
 	DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
 	      mtd->index, mtd->name);
-	ret = simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+	ret = 0;
 	goto out_put;
 
 out_error:
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c295ba7..f0c7de7 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -41,8 +41,8 @@
 extern const struct address_space_operations v9fs_addr_operations;
 extern const struct file_operations v9fs_file_operations;
 extern const struct file_operations v9fs_dir_operations;
-extern struct dentry_operations v9fs_dentry_operations;
-extern struct dentry_operations v9fs_cached_dentry_operations;
+extern const struct dentry_operations v9fs_dentry_operations;
+extern const struct dentry_operations v9fs_cached_dentry_operations;
 
 struct inode *v9fs_get_inode(struct super_block *sb, int mode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 06dcc7c..d743252 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -104,12 +104,12 @@
 	}
 }
 
-struct dentry_operations v9fs_cached_dentry_operations = {
+const struct dentry_operations v9fs_cached_dentry_operations = {
 	.d_delete = v9fs_cached_dentry_delete,
 	.d_release = v9fs_dentry_release,
 };
 
-struct dentry_operations v9fs_dentry_operations = {
+const struct dentry_operations v9fs_dentry_operations = {
 	.d_delete = v9fs_dentry_delete,
 	.d_release = v9fs_dentry_release,
 };
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 93212e4..5f8ab8a 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,9 @@
 	p9stat_free(st);
 	kfree(st);
 
-P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n");
-	return simple_set_mnt(mnt, sb);
+P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+	simple_set_mnt(mnt, sb);
+	return 0;
 
 release_sb:
 	if (sb) {
diff --git a/fs/Kconfig b/fs/Kconfig
index 93945dd..cef8b18 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -56,61 +56,7 @@
 
 source "fs/notify/Kconfig"
 
-config QUOTA
-	bool "Quota support"
-	help
-	  If you say Y here, you will be able to set per user limits for disk
-	  usage (also called disk quotas). Currently, it works for the
-	  ext2, ext3, and reiserfs file system. ext3 also supports journalled
-	  quotas for which you don't need to run quotacheck(8) after an unclean
-	  shutdown.
-	  For further details, read the Quota mini-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>, or the documentation provided
-	  with the quota tools. Probably the quota support is only useful for
-	  multi user systems. If unsure, say N.
-
-config QUOTA_NETLINK_INTERFACE
-	bool "Report quota messages through netlink interface"
-	depends on QUOTA && NET
-	help
-	  If you say Y here, quota warnings (about exceeding softlimit, reaching
-	  hardlimit, etc.) will be reported through netlink interface. If unsure,
-	  say Y.
-
-config PRINT_QUOTA_WARNING
-	bool "Print quota warnings to console (OBSOLETE)"
-	depends on QUOTA
-	default y
-	help
-	  If you say Y here, quota warnings (about exceeding softlimit, reaching
-	  hardlimit, etc.) will be printed to the process' controlling terminal.
-	  Note that this behavior is currently deprecated and may go away in
-	  future. Please use notification via netlink socket instead.
-
-# Generic support for tree structured quota files. Seleted when needed.
-config QUOTA_TREE
-	 tristate
-
-config QFMT_V1
-	tristate "Old quota format support"
-	depends on QUOTA
-	help
-	  This quota format was (is) used by kernels earlier than 2.4.22. If
-	  you have quota working and you don't want to convert to new quota
-	  format say Y here.
-
-config QFMT_V2
-	tristate "Quota format v2 support"
-	depends on QUOTA
-	select QUOTA_TREE
-	help
-	  This quota format allows using quotas with 32-bit UIDs/GIDs. If you
-	  need this functionality say Y here.
-
-config QUOTACTL
-	bool
-	depends on XFS_QUOTA || QUOTA
-	default y
+source "fs/quota/Kconfig"
 
 source "fs/autofs/Kconfig"
 source "fs/autofs4/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dc20db3..6e82a30 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -51,11 +51,7 @@
 obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
 obj-$(CONFIG_GENERIC_ACL)	+= generic_acl.o
 
-obj-$(CONFIG_QUOTA)		+= dquot.o
-obj-$(CONFIG_QFMT_V1)		+= quota_v1.o
-obj-$(CONFIG_QFMT_V2)		+= quota_v2.o
-obj-$(CONFIG_QUOTA_TREE)	+= quota_tree.o
-obj-$(CONFIG_QUOTACTL)		+= quota.o
+obj-y				+= quota/
 
 obj-$(CONFIG_PROC_FS)		+= proc/
 obj-y				+= partitions/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 8311575..e0a85db 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -86,7 +86,7 @@
 /* dir_*.c */
 extern const struct inode_operations adfs_dir_inode_operations;
 extern const struct file_operations adfs_dir_operations;
-extern struct dentry_operations adfs_dentry_operations;
+extern const struct dentry_operations adfs_dentry_operations;
 extern struct adfs_dir_ops adfs_f_dir_ops;
 extern struct adfs_dir_ops adfs_fplus_dir_ops;
 
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 85a30e9..e867ccf 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -263,7 +263,7 @@
 	return 0;
 }
 
-struct dentry_operations adfs_dentry_operations = {
+const struct dentry_operations adfs_dentry_operations = {
 	.d_hash		= adfs_hash,
 	.d_compare	= adfs_compare,
 };
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e9ec915..1a2d5e3 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -199,8 +199,7 @@
 extern const struct address_space_operations	 affs_aops;
 extern const struct address_space_operations	 affs_aops_ofs;
 
-extern struct dentry_operations	 affs_dentry_operations;
-extern struct dentry_operations	 affs_dentry_operations_intl;
+extern const struct dentry_operations	 affs_dentry_operations;
 
 static inline void
 affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 8055730..7d0f0a3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -179,14 +179,18 @@
 		affs_lock_dir(dir);
 		affs_fix_dcache(dentry, link_ino);
 		retval = affs_remove_hash(dir, link_bh);
-		if (retval)
+		if (retval) {
+			affs_unlock_dir(dir);
 			goto done;
+		}
 		mark_buffer_dirty_inode(link_bh, inode);
 
 		memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
 		retval = affs_insert_hash(dir, bh);
-		if (retval)
+		if (retval) {
+			affs_unlock_dir(dir);
 			goto done;
+		}
 		mark_buffer_dirty_inode(bh, inode);
 
 		affs_unlock_dir(dir);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index cfcf1b6..960d336 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -19,12 +19,12 @@
 static int	 affs_intl_hash_dentry(struct dentry *, struct qstr *);
 static int       affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
 
-struct dentry_operations affs_dentry_operations = {
+const struct dentry_operations affs_dentry_operations = {
 	.d_hash		= affs_hash_dentry,
 	.d_compare	= affs_compare_dentry,
 };
 
-static struct dentry_operations affs_intl_dentry_operations = {
+static const struct dentry_operations affs_intl_dentry_operations = {
 	.d_hash		= affs_intl_hash_dentry,
 	.d_compare	= affs_intl_compare_dentry,
 };
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 99cf390..9bd7577 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@
 	.setattr	= afs_setattr,
 };
 
-static struct dentry_operations afs_fs_dentry_operations = {
+static const struct dentry_operations afs_fs_dentry_operations = {
 	.d_revalidate	= afs_d_revalidate,
 	.d_delete	= afs_d_delete,
 	.d_release	= afs_d_release,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3bbdb9d..1dd96d4 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -48,7 +48,7 @@
 	.get_sb		= anon_inodefs_get_sb,
 	.kill_sb	= kill_anon_super,
 };
-static struct dentry_operations anon_inodefs_dentry_operations = {
+static const struct dentry_operations anon_inodefs_dentry_operations = {
 	.d_delete	= anon_inodefs_delete_dentry,
 };
 
diff --git a/fs/attr.c b/fs/attr.c
index f436019..9fe1b1b 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@
 		if (!error) {
 			if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
 			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
-				error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+				error = vfs_dq_transfer(inode, attr) ?
+					-EDQUOT : 0;
 			if (!error)
 				error = inode_setattr(inode, attr);
 		}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8aacade..4a1401c 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -192,7 +192,7 @@
 	return 1;
 }
 
-static struct dentry_operations autofs_dentry_operations = {
+static const struct dentry_operations autofs_dentry_operations = {
 	.d_revalidate	= autofs_revalidate,
 };
 
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 716e12b..69c8142 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -310,7 +310,7 @@
 	return ino;
 }
 
-static struct dentry_operations autofs4_sb_dentry_operations = {
+static const struct dentry_operations autofs4_sb_dentry_operations = {
 	.d_release      = autofs4_dentry_release,
 };
 
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a41c2a..74b1469 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -349,13 +349,13 @@
 }
 
 /* For dentries of directories in the root dir */
-static struct dentry_operations autofs4_root_dentry_operations = {
+static const struct dentry_operations autofs4_root_dentry_operations = {
 	.d_revalidate	= autofs4_revalidate,
 	.d_release	= autofs4_dentry_release,
 };
 
 /* For other dentries */
-static struct dentry_operations autofs4_dentry_operations = {
+static const struct dentry_operations autofs4_dentry_operations = {
 	.d_revalidate	= autofs4_revalidate,
 	.d_release	= autofs4_dentry_release,
 };
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b3c1eff..8c3c689 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/blkpg.h>
 #include <linux/buffer_head.h>
+#include <linux/pagevec.h>
 #include <linux/writeback.h>
 #include <linux/mpage.h>
 #include <linux/mount.h>
@@ -174,6 +175,151 @@
 				iov, offset, nr_segs, blkdev_get_blocks, NULL);
 }
 
+/*
+ * Write out and wait upon all the dirty data associated with a block
+ * device via its mapping.  Does not take the superblock lock.
+ */
+int sync_blockdev(struct block_device *bdev)
+{
+	int ret = 0;
+
+	if (bdev)
+		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
+	return ret;
+}
+EXPORT_SYMBOL(sync_blockdev);
+
+/*
+ * Write out and wait upon all dirty data associated with this
+ * device.   Filesystem data as well as the underlying block
+ * device.  Takes the superblock lock.
+ */
+int fsync_bdev(struct block_device *bdev)
+{
+	struct super_block *sb = get_super(bdev);
+	if (sb) {
+		int res = fsync_super(sb);
+		drop_super(sb);
+		return res;
+	}
+	return sync_blockdev(bdev);
+}
+
+/**
+ * freeze_bdev  --  lock a filesystem and force it into a consistent state
+ * @bdev:	blockdevice to lock
+ *
+ * This takes the block device bd_mount_sem to make sure no new mounts
+ * happen on bdev until thaw_bdev() is called.
+ * If a superblock is found on this device, we take the s_umount semaphore
+ * on it to make sure nobody unmounts until the snapshot creation is done.
+ * The reference counter (bd_fsfreeze_count) guarantees that only the last
+ * unfreeze process can unfreeze the frozen filesystem actually when multiple
+ * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
+ * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
+ * actually.
+ */
+struct super_block *freeze_bdev(struct block_device *bdev)
+{
+	struct super_block *sb;
+	int error = 0;
+
+	mutex_lock(&bdev->bd_fsfreeze_mutex);
+	if (bdev->bd_fsfreeze_count > 0) {
+		bdev->bd_fsfreeze_count++;
+		sb = get_super(bdev);
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return sb;
+	}
+	bdev->bd_fsfreeze_count++;
+
+	down(&bdev->bd_mount_sem);
+	sb = get_super(bdev);
+	if (sb && !(sb->s_flags & MS_RDONLY)) {
+		sb->s_frozen = SB_FREEZE_WRITE;
+		smp_wmb();
+
+		__fsync_super(sb);
+
+		sb->s_frozen = SB_FREEZE_TRANS;
+		smp_wmb();
+
+		sync_blockdev(sb->s_bdev);
+
+		if (sb->s_op->freeze_fs) {
+			error = sb->s_op->freeze_fs(sb);
+			if (error) {
+				printk(KERN_ERR
+					"VFS:Filesystem freeze failed\n");
+				sb->s_frozen = SB_UNFROZEN;
+				drop_super(sb);
+				up(&bdev->bd_mount_sem);
+				bdev->bd_fsfreeze_count--;
+				mutex_unlock(&bdev->bd_fsfreeze_mutex);
+				return ERR_PTR(error);
+			}
+		}
+	}
+
+	sync_blockdev(bdev);
+	mutex_unlock(&bdev->bd_fsfreeze_mutex);
+
+	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
+}
+EXPORT_SYMBOL(freeze_bdev);
+
+/**
+ * thaw_bdev  -- unlock filesystem
+ * @bdev:	blockdevice to unlock
+ * @sb:		associated superblock
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_bdev().
+ */
+int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+	int error = 0;
+
+	mutex_lock(&bdev->bd_fsfreeze_mutex);
+	if (!bdev->bd_fsfreeze_count) {
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return -EINVAL;
+	}
+
+	bdev->bd_fsfreeze_count--;
+	if (bdev->bd_fsfreeze_count > 0) {
+		if (sb)
+			drop_super(sb);
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return 0;
+	}
+
+	if (sb) {
+		BUG_ON(sb->s_bdev != bdev);
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (sb->s_op->unfreeze_fs) {
+				error = sb->s_op->unfreeze_fs(sb);
+				if (error) {
+					printk(KERN_ERR
+						"VFS:Filesystem thaw failed\n");
+					sb->s_frozen = SB_FREEZE_TRANS;
+					bdev->bd_fsfreeze_count++;
+					mutex_unlock(&bdev->bd_fsfreeze_mutex);
+					return error;
+				}
+			}
+			sb->s_frozen = SB_UNFROZEN;
+			smp_wmb();
+			wake_up(&sb->s_wait_unfrozen);
+		}
+		drop_super(sb);
+	}
+
+	up(&bdev->bd_mount_sem);
+	mutex_unlock(&bdev->bd_fsfreeze_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(thaw_bdev);
+
 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
 {
 	return block_write_full_page(page, blkdev_get_block, wbc);
diff --git a/fs/buffer.c b/fs/buffer.c
index 891e1c7..a2fd743 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@
 }
 
 /*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping.  Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
-	int ret = 0;
-
-	if (bdev)
-		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
-	return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device.   Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
-	struct super_block *sb = get_super(bdev);
-	if (sb) {
-		int res = fsync_super(sb);
-		drop_super(sb);
-		return res;
-	}
-	return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev  --  lock a filesystem and force it into a consistent state
- * @bdev:	blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- * The reference counter (bd_fsfreeze_count) guarantees that only the last
- * unfreeze process can unfreeze the frozen filesystem actually when multiple
- * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
- * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
- * actually.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
-	struct super_block *sb;
-	int error = 0;
-
-	mutex_lock(&bdev->bd_fsfreeze_mutex);
-	if (bdev->bd_fsfreeze_count > 0) {
-		bdev->bd_fsfreeze_count++;
-		sb = get_super(bdev);
-		mutex_unlock(&bdev->bd_fsfreeze_mutex);
-		return sb;
-	}
-	bdev->bd_fsfreeze_count++;
-
-	down(&bdev->bd_mount_sem);
-	sb = get_super(bdev);
-	if (sb && !(sb->s_flags & MS_RDONLY)) {
-		sb->s_frozen = SB_FREEZE_WRITE;
-		smp_wmb();
-
-		__fsync_super(sb);
-
-		sb->s_frozen = SB_FREEZE_TRANS;
-		smp_wmb();
-
-		sync_blockdev(sb->s_bdev);
-
-		if (sb->s_op->freeze_fs) {
-			error = sb->s_op->freeze_fs(sb);
-			if (error) {
-				printk(KERN_ERR
-					"VFS:Filesystem freeze failed\n");
-				sb->s_frozen = SB_UNFROZEN;
-				drop_super(sb);
-				up(&bdev->bd_mount_sem);
-				bdev->bd_fsfreeze_count--;
-				mutex_unlock(&bdev->bd_fsfreeze_mutex);
-				return ERR_PTR(error);
-			}
-		}
-	}
-
-	sync_blockdev(bdev);
-	mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
-	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev  -- unlock filesystem
- * @bdev:	blockdevice to unlock
- * @sb:		associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
-	int error = 0;
-
-	mutex_lock(&bdev->bd_fsfreeze_mutex);
-	if (!bdev->bd_fsfreeze_count) {
-		mutex_unlock(&bdev->bd_fsfreeze_mutex);
-		return -EINVAL;
-	}
-
-	bdev->bd_fsfreeze_count--;
-	if (bdev->bd_fsfreeze_count > 0) {
-		if (sb)
-			drop_super(sb);
-		mutex_unlock(&bdev->bd_fsfreeze_mutex);
-		return 0;
-	}
-
-	if (sb) {
-		BUG_ON(sb->s_bdev != bdev);
-		if (!(sb->s_flags & MS_RDONLY)) {
-			if (sb->s_op->unfreeze_fs) {
-				error = sb->s_op->unfreeze_fs(sb);
-				if (error) {
-					printk(KERN_ERR
-						"VFS:Filesystem thaw failed\n");
-					sb->s_frozen = SB_FREEZE_TRANS;
-					bdev->bd_fsfreeze_count++;
-					mutex_unlock(&bdev->bd_fsfreeze_mutex);
-					return error;
-				}
-			}
-			sb->s_frozen = SB_UNFROZEN;
-			smp_wmb();
-			wake_up(&sb->s_wait_unfrozen);
-		}
-		drop_super(sb);
-	}
-
-	up(&bdev->bd_mount_sem);
-	mutex_unlock(&bdev->bd_fsfreeze_mutex);
-	return 0;
-}
-EXPORT_SYMBOL(thaw_bdev);
-
-/*
  * Various filesystems appear to want __find_get_block to be non-blocking.
  * But it's the page lock which protects the buffers.  To get around this,
  * we get exclusion from try_to_free_buffers with the blockdev mapping's
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 13ea532..38491fd 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -606,7 +606,8 @@
 		return rc;
 	}
 	sb->s_flags |= MS_ACTIVE;
-	return simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+	return 0;
 }
 
 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2b1d28a..77e190d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -78,8 +78,8 @@
 extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
 
 /* Functions related to dir entries */
-extern struct dentry_operations cifs_dentry_ops;
-extern struct dentry_operations cifs_ci_dentry_ops;
+extern const struct dentry_operations cifs_dentry_ops;
+extern const struct dentry_operations cifs_ci_dentry_ops;
 
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9b6f68..2f35ccc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -701,7 +701,7 @@
 	return rc;
 }     */
 
-struct dentry_operations cifs_dentry_ops = {
+const struct dentry_operations cifs_dentry_ops = {
 	.d_revalidate = cifs_d_revalidate,
 /* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
 };
@@ -739,7 +739,7 @@
 	return 1;
 }
 
-struct dentry_operations cifs_ci_dentry_ops = {
+const struct dentry_operations cifs_ci_dentry_ops = {
 	.d_revalidate = cifs_d_revalidate,
 	.d_hash = cifs_ci_hash,
 	.d_compare = cifs_ci_compare,
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 75b1fa9..4bb9d0a 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -59,7 +59,7 @@
 }
 #define CODA_EIO_ERROR ((void *) (coda_return_EIO))
 
-static struct dentry_operations coda_dentry_operations =
+static const struct dentry_operations coda_dentry_operations =
 {
 	.d_revalidate	= coda_dentry_revalidate,
 	.d_delete	= coda_dentry_delete,
diff --git a/fs/compat.c b/fs/compat.c
index 0949b43..5e374aa 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -378,6 +378,34 @@
 	return error;
 }
 
+/*
+ * This is a copy of sys_ustat, just dealing with a structure layout.
+ * Given how simple this syscall is that apporach is more maintainable
+ * than the various conversion hacks.
+ */
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
+{
+	struct super_block *sb;
+	struct compat_ustat tmp;
+	struct kstatfs sbuf;
+	int err;
+
+	sb = user_get_super(new_decode_dev(dev));
+	if (!sb)
+		return -EINVAL;
+	err = vfs_statfs(sb->s_root, &sbuf);
+	drop_super(sb);
+	if (err)
+		return err;
+
+	memset(&tmp, 0, sizeof(struct compat_ustat));
+	tmp.f_tfree = sbuf.f_bfree;
+	tmp.f_tinode = sbuf.f_ffree;
+	if (copy_to_user(u, &tmp, sizeof(struct compat_ustat)))
+		return -EFAULT;
+	return 0;
+}
+
 static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
 {
 	if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341..05373db 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@
 	return 1;
 }
 
-static struct dentry_operations configfs_dentry_ops = {
+static const struct dentry_operations configfs_dentry_ops = {
 	.d_iput		= configfs_d_iput,
 	/* simple_delete_dentry() isn't exported */
 	.d_delete	= configfs_d_delete,
diff --git a/fs/dcache.c b/fs/dcache.c
index 07e2d4a..90bbd7e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,15 +1247,18 @@
 	struct dentry *found;
 	struct dentry *new;
 
-	/* Does a dentry matching the name exist already? */
+	/*
+	 * First check if a dentry matching the name already exists,
+	 * if not go ahead and create it now.
+	 */
 	found = d_hash_and_lookup(dentry->d_parent, name);
-	/* If not, create it now and return */
 	if (!found) {
 		new = d_alloc(dentry->d_parent, name);
 		if (!new) {
 			error = -ENOMEM;
 			goto err_out;
 		}
+
 		found = d_splice_alias(inode, new);
 		if (found) {
 			dput(new);
@@ -1263,61 +1266,46 @@
 		}
 		return new;
 	}
-	/* Matching dentry exists, check if it is negative. */
+
+	/*
+	 * If a matching dentry exists, and it's not negative use it.
+	 *
+	 * Decrement the reference count to balance the iget() done
+	 * earlier on.
+	 */
 	if (found->d_inode) {
 		if (unlikely(found->d_inode != inode)) {
 			/* This can't happen because bad inodes are unhashed. */
 			BUG_ON(!is_bad_inode(inode));
 			BUG_ON(!is_bad_inode(found->d_inode));
 		}
-		/*
-		 * Already have the inode and the dentry attached, decrement
-		 * the reference count to balance the iget() done
-		 * earlier on.  We found the dentry using d_lookup() so it
-		 * cannot be disconnected and thus we do not need to worry
-		 * about any NFS/disconnectedness issues here.
-		 */
 		iput(inode);
 		return found;
 	}
+
 	/*
 	 * Negative dentry: instantiate it unless the inode is a directory and
-	 * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
-	 * in which case d_move() that in place of the found dentry.
+	 * already has a dentry.
 	 */
-	if (!S_ISDIR(inode->i_mode)) {
-		/* Not a directory; everything is easy. */
-		d_instantiate(found, inode);
-		return found;
-	}
 	spin_lock(&dcache_lock);
-	if (list_empty(&inode->i_dentry)) {
-		/*
-		 * Directory without a 'disconnected' dentry; we need to do
-		 * d_instantiate() by hand because it takes dcache_lock which
-		 * we already hold.
-		 */
+	if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
 		__d_instantiate(found, inode);
 		spin_unlock(&dcache_lock);
 		security_d_instantiate(found, inode);
 		return found;
 	}
+
 	/*
-	 * Directory with a 'disconnected' dentry; get a reference to the
-	 * 'disconnected' dentry.
+	 * In case a directory already has a (disconnected) entry grab a
+	 * reference to it, move it in place and use it.
 	 */
 	new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
 	dget_locked(new);
 	spin_unlock(&dcache_lock);
-	/* Do security vodoo. */
 	security_d_instantiate(found, inode);
-	/* Move new in place of found. */
 	d_move(new, found);
-	/* Balance the iget() we did above. */
 	iput(inode);
-	/* Throw away found. */
 	dput(found);
-	/* Use new as the actual dentry. */
 	return new;
 
 err_out:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bff4052..63a4a59 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -322,177 +322,81 @@
 }
 
 /*
- * Safely parse the mount options in @data and update @opts.
+ * devpts_get_sb()
  *
- * devpts ends up parsing options two times during mount, due to the
- * two modes of operation it supports. The first parse occurs in
- * devpts_get_sb() when determining the mode (single-instance or
- * multi-instance mode). The second parse happens in devpts_remount()
- * or new_pts_mount() depending on the mode.
+ *     If the '-o newinstance' mount option was specified, mount a new
+ *     (private) instance of devpts.  PTYs created in this instance are
+ *     independent of the PTYs in other devpts instances.
  *
- * Parsing of options modifies the @data making subsequent parsing
- * incorrect. So make a local copy of @data and parse it.
+ *     If the '-o newinstance' option was not specified, mount/remount the
+ *     initial kernel mount of devpts.  This type of mount gives the
+ *     legacy, single-instance semantics.
  *
- * Return: 0 On success, -errno on error
- */
-static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts)
-{
-	int rc;
-	void *datacp;
-
-	if (!data)
-		return 0;
-
-	/* Use kstrdup() ?  */
-	datacp = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!datacp)
-		return -ENOMEM;
-
-	memcpy(datacp, data, PAGE_SIZE);
-	rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts);
-	kfree(datacp);
-
-	return rc;
-}
-
-/*
- * Mount a new (private) instance of devpts.  PTYs created in this
- * instance are independent of the PTYs in other devpts instances.
- */
-static int new_pts_mount(struct file_system_type *fs_type, int flags,
-		void *data, struct vfsmount *mnt)
-{
-	int err;
-	struct pts_fs_info *fsi;
-	struct pts_mount_opts *opts;
-
-	err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
-	if (err)
-		return err;
-
-	fsi = DEVPTS_SB(mnt->mnt_sb);
-	opts = &fsi->mount_opts;
-
-	err = parse_mount_options(data, PARSE_MOUNT, opts);
-	if (err)
-		goto fail;
-
-	err = mknod_ptmx(mnt->mnt_sb);
-	if (err)
-		goto fail;
-
-	return 0;
-
-fail:
-	dput(mnt->mnt_sb->s_root);
-	deactivate_super(mnt->mnt_sb);
-	return err;
-}
-
-/*
- * Check if 'newinstance' mount option was specified in @data.
+ *     The 'newinstance' option is needed to support multiple namespace
+ *     semantics in devpts while preserving backward compatibility of the
+ *     current 'single-namespace' semantics. i.e all mounts of devpts
+ *     without the 'newinstance' mount option should bind to the initial
+ *     kernel mount, like get_sb_single().
  *
- * Return: -errno  	on error (eg: invalid mount options specified)
- * 	 : 1 		if 'newinstance' mount option was specified
- * 	 : 0 		if 'newinstance' mount option was NOT specified
- */
-static int is_new_instance_mount(void *data)
-{
-	int rc;
-	struct pts_mount_opts opts;
-
-	if (!data)
-		return 0;
-
-	rc = safe_parse_mount_options(data, &opts);
-	if (!rc)
-		rc = opts.newinstance;
-
-	return rc;
-}
-
-/*
- * get_init_pts_sb()
+ *     Mounts with 'newinstance' option create a new, private namespace.
  *
- *     This interface is needed to support multiple namespace semantics in
- *     devpts while preserving backward compatibility of the current 'single-
- *     namespace' semantics. i.e all mounts of devpts without the 'newinstance'
- *     mount option should bind to the initial kernel mount, like
- *     get_sb_single().
+ *     NOTE:
  *
- *     Mounts with 'newinstance' option create a new private namespace.
- *
- *     But for single-mount semantics, devpts cannot use get_sb_single(),
+ *     For single-mount semantics, devpts cannot use get_sb_single(),
  *     because get_sb_single()/sget() find and use the super-block from
  *     the most recent mount of devpts. But that recent mount may be a
  *     'newinstance' mount and get_sb_single() would pick the newinstance
  *     super-block instead of the initial super-block.
- *
- *     This interface is identical to get_sb_single() except that it
- *     consistently selects the 'single-namespace' superblock even in the
- *     presence of the private namespace (i.e 'newinstance') super-blocks.
  */
-static int get_init_pts_sb(struct file_system_type *fs_type, int flags,
-		void *data, struct vfsmount *mnt)
+static int devpts_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
 {
-	struct super_block *s;
 	int error;
+	struct pts_mount_opts opts;
+	struct super_block *s;
 
-	s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
+	memset(&opts, 0, sizeof(opts));
+	if (data) {
+		error = parse_mount_options(data, PARSE_MOUNT, &opts);
+		if (error)
+			return error;
+	}
+
+	if (opts.newinstance)
+		s = sget(fs_type, NULL, set_anon_super, NULL);
+	else
+		s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
+
 	if (IS_ERR(s))
 		return PTR_ERR(s);
 
 	if (!s->s_root) {
 		s->s_flags = flags;
 		error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
-		if (error) {
-			up_write(&s->s_umount);
-			deactivate_super(s);
-			return error;
-		}
+		if (error)
+			goto out_undo_sget;
 		s->s_flags |= MS_ACTIVE;
 	}
-	do_remount_sb(s, flags, data, 0);
-	return simple_set_mnt(mnt, s);
+
+	simple_set_mnt(mnt, s);
+
+	memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
+
+	error = mknod_ptmx(s);
+	if (error)
+		goto out_dput;
+
+	return 0;
+
+out_dput:
+	dput(s->s_root);
+
+out_undo_sget:
+	up_write(&s->s_umount);
+	deactivate_super(s);
+	return error;
 }
 
-/*
- * Mount or remount the initial kernel mount of devpts. This type of
- * mount maintains the legacy, single-instance semantics, while the
- * kernel still allows multiple-instances.
- */
-static int init_pts_mount(struct file_system_type *fs_type, int flags,
-		void *data, struct vfsmount *mnt)
-{
-	int err;
-
-	err = get_init_pts_sb(fs_type, flags, data, mnt);
-	if (err)
-		return err;
-
-	err = mknod_ptmx(mnt->mnt_sb);
-	if (err) {
-		dput(mnt->mnt_sb->s_root);
-		deactivate_super(mnt->mnt_sb);
-	}
-
-	return err;
-}
-
-static int devpts_get_sb(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
-{
-	int new;
-
-	new = is_new_instance_mount(data);
-	if (new < 0)
-		return new;
-
-	if (new)
-		return new_pts_mount(fs_type, flags, data, mnt);
-
-	return init_pts_mount(fs_type, flags, data, mnt);
-}
 #else
 /*
  * This supports only the legacy single-instance semantics (no
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f8..858fba1 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@
 
 	bucket = dir_hash(ls, name, namelen);
 
-	write_lock(&ls->ls_dirtbl[bucket].lock);
+	spin_lock(&ls->ls_dirtbl[bucket].lock);
 
 	de = search_bucket(ls, name, namelen, bucket);
 
@@ -173,7 +173,7 @@
 	list_del(&de->list);
 	kfree(de);
  out:
-	write_unlock(&ls->ls_dirtbl[bucket].lock);
+	spin_unlock(&ls->ls_dirtbl[bucket].lock);
 }
 
 void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@
 	DLM_ASSERT(list_empty(&ls->ls_recover_list), );
 
 	for (i = 0; i < ls->ls_dirtbl_size; i++) {
-		write_lock(&ls->ls_dirtbl[i].lock);
+		spin_lock(&ls->ls_dirtbl[i].lock);
 		head = &ls->ls_dirtbl[i].list;
 		while (!list_empty(head)) {
 			de = list_entry(head->next, struct dlm_direntry, list);
 			list_del(&de->list);
 			put_free_de(ls, de);
 		}
-		write_unlock(&ls->ls_dirtbl[i].lock);
+		spin_unlock(&ls->ls_dirtbl[i].lock);
 	}
 }
 
@@ -307,17 +307,17 @@
 
 	bucket = dir_hash(ls, name, namelen);
 
-	write_lock(&ls->ls_dirtbl[bucket].lock);
+	spin_lock(&ls->ls_dirtbl[bucket].lock);
 	de = search_bucket(ls, name, namelen, bucket);
 	if (de) {
 		*r_nodeid = de->master_nodeid;
-		write_unlock(&ls->ls_dirtbl[bucket].lock);
+		spin_unlock(&ls->ls_dirtbl[bucket].lock);
 		if (*r_nodeid == nodeid)
 			return -EEXIST;
 		return 0;
 	}
 
-	write_unlock(&ls->ls_dirtbl[bucket].lock);
+	spin_unlock(&ls->ls_dirtbl[bucket].lock);
 
 	if (namelen > DLM_RESNAME_MAXLEN)
 		return -EINVAL;
@@ -330,7 +330,7 @@
 	de->length = namelen;
 	memcpy(de->name, name, namelen);
 
-	write_lock(&ls->ls_dirtbl[bucket].lock);
+	spin_lock(&ls->ls_dirtbl[bucket].lock);
 	tmp = search_bucket(ls, name, namelen, bucket);
 	if (tmp) {
 		kfree(de);
@@ -339,7 +339,7 @@
 		list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
 	}
 	*r_nodeid = de->master_nodeid;
-	write_unlock(&ls->ls_dirtbl[bucket].lock);
+	spin_unlock(&ls->ls_dirtbl[bucket].lock);
 	return 0;
 }
 
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f..d01ca0a 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@
 
 struct dlm_dirtable {
 	struct list_head	list;
-	rwlock_t		lock;
+	spinlock_t		lock;
 };
 
 struct dlm_rsbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 01e7d39..205ec95 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -835,7 +835,7 @@
 		lkb->lkb_wait_count++;
 		hold_lkb(lkb);
 
-		log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
+		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
 			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
 			  lkb->lkb_wait_count, lkb->lkb_flags);
 		goto out;
@@ -851,7 +851,7 @@
 	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
  out:
 	if (error)
-		log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
+		log_error(ls, "addwait error %x %d flags %x %d %d %s",
 			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
 			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
 	mutex_unlock(&ls->ls_waiters_mutex);
@@ -863,23 +863,55 @@
    request reply on the requestqueue) between dlm_recover_waiters_pre() which
    set RESEND and dlm_recover_waiters_post() */
 
-static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
+static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
+				struct dlm_message *ms)
 {
 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 	int overlap_done = 0;
 
 	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
+		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
 		overlap_done = 1;
 		goto out_del;
 	}
 
 	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
+		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
 		overlap_done = 1;
 		goto out_del;
 	}
 
+	/* Cancel state was preemptively cleared by a successful convert,
+	   see next comment, nothing to do. */
+
+	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
+	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
+		log_debug(ls, "remwait %x cancel_reply wait_type %d",
+			  lkb->lkb_id, lkb->lkb_wait_type);
+		return -1;
+	}
+
+	/* Remove for the convert reply, and premptively remove for the
+	   cancel reply.  A convert has been granted while there's still
+	   an outstanding cancel on it (the cancel is moot and the result
+	   in the cancel reply should be 0).  We preempt the cancel reply
+	   because the app gets the convert result and then can follow up
+	   with another op, like convert.  This subsequent op would see the
+	   lingering state of the cancel and fail with -EBUSY. */
+
+	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
+	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
+	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
+		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
+			  lkb->lkb_id);
+		lkb->lkb_wait_type = 0;
+		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
+		lkb->lkb_wait_count--;
+		goto out_del;
+	}
+
 	/* N.B. type of reply may not always correspond to type of original
 	   msg due to lookup->request optimization, verify others? */
 
@@ -888,8 +920,8 @@
 		goto out_del;
 	}
 
-	log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
-		  lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
+	log_error(ls, "remwait error %x reply %d flags %x no wait_type",
+		  lkb->lkb_id, mstype, lkb->lkb_flags);
 	return -1;
 
  out_del:
@@ -899,7 +931,7 @@
 	   this would happen */
 
 	if (overlap_done && lkb->lkb_wait_type) {
-		log_error(ls, "remove_from_waiters %x reply %d give up on %d",
+		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
 			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
 		lkb->lkb_wait_count--;
 		lkb->lkb_wait_type = 0;
@@ -921,7 +953,7 @@
 	int error;
 
 	mutex_lock(&ls->ls_waiters_mutex);
-	error = _remove_from_waiters(lkb, mstype);
+	error = _remove_from_waiters(lkb, mstype, NULL);
 	mutex_unlock(&ls->ls_waiters_mutex);
 	return error;
 }
@@ -936,7 +968,7 @@
 
 	if (ms != &ls->ls_stub_ms)
 		mutex_lock(&ls->ls_waiters_mutex);
-	error = _remove_from_waiters(lkb, ms->m_type);
+	error = _remove_from_waiters(lkb, ms->m_type, ms);
 	if (ms != &ls->ls_stub_ms)
 		mutex_unlock(&ls->ls_waiters_mutex);
 	return error;
@@ -2083,6 +2115,11 @@
 	lkb->lkb_timeout_cs = args->timeout;
 	rv = 0;
  out:
+	if (rv)
+		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
+			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
+			  lkb->lkb_status, lkb->lkb_wait_type,
+			  lkb->lkb_resource->res_name);
 	return rv;
 }
 
@@ -2149,6 +2186,13 @@
 			goto out;
 		}
 
+		/* there's nothing to cancel */
+		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
+		    !lkb->lkb_wait_type) {
+			rv = -EBUSY;
+			goto out;
+		}
+
 		switch (lkb->lkb_wait_type) {
 		case DLM_MSG_LOOKUP:
 		case DLM_MSG_REQUEST:
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f..cd8e2df 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@
 		goto out_lkbfree;
 	for (i = 0; i < size; i++) {
 		INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
-		rwlock_init(&ls->ls_dirtbl[i].lock);
+		spin_lock_init(&ls->ls_dirtbl[i].lock);
 	}
 
 	INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5eb..609108a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
  *
  * Cluster nodes are referred to by their nodeids. nodeids are
  * simply 32 bit numbers to the locking module - if they need to
- * be expanded for the cluster infrastructure then that is it's
+ * be expanded for the cluster infrastructure then that is its
  * responsibility. It is this layer's
  * responsibility to resolve these into IP address or
  * whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
  * of high load. Also, this way, the sending thread can collect together
  * messages bound for one node and send them in one block.
  *
- * lowcomms will choose to use wither TCP or SCTP as its transport layer
+ * lowcomms will choose to use either TCP or SCTP as its transport layer
  * depending on the configuration variable 'protocol'. This should be set
- * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a
+ * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
  * cluster-wide mechanism as it must be the same on all nodes of the cluster
  * for the DLM to function.
  *
@@ -48,11 +48,11 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <linux/pagemap.h>
-#include <linux/idr.h>
 #include <linux/file.h>
 #include <linux/mutex.h>
 #include <linux/sctp.h>
 #include <net/sctp/user.h>
+#include <net/ipv6.h>
 
 #include "dlm_internal.h"
 #include "lowcomms.h"
@@ -60,6 +60,7 @@
 #include "config.h"
 
 #define NEEDED_RMEM (4*1024*1024)
+#define CONN_HASH_SIZE 32
 
 struct cbuf {
 	unsigned int base;
@@ -114,6 +115,7 @@
 	int retries;
 #define MAX_CONNECT_RETRIES 3
 	int sctp_assoc;
+	struct hlist_node list;
 	struct connection *othercon;
 	struct work_struct rwork; /* Receive workqueue */
 	struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@
 static struct workqueue_struct *recv_workqueue;
 static struct workqueue_struct *send_workqueue;
 
-static DEFINE_IDR(connections_idr);
+static struct hlist_head connection_hash[CONN_HASH_SIZE];
 static DEFINE_MUTEX(connections_lock);
-static int max_nodeid;
 static struct kmem_cache *con_cache;
 
 static void process_recv_sockets(struct work_struct *work);
 static void process_send_sockets(struct work_struct *work);
 
+
+/* This is deliberately very simple because most clusters have simple
+   sequential nodeids, so we should be able to go straight to a connection
+   struct in the array */
+static inline int nodeid_hash(int nodeid)
+{
+	return nodeid & (CONN_HASH_SIZE-1);
+}
+
+static struct connection *__find_con(int nodeid)
+{
+	int r;
+	struct hlist_node *h;
+	struct connection *con;
+
+	r = nodeid_hash(nodeid);
+
+	hlist_for_each_entry(con, h, &connection_hash[r], list) {
+		if (con->nodeid == nodeid)
+			return con;
+	}
+	return NULL;
+}
+
 /*
  * If 'allocation' is zero then we don't attempt to create a new
  * connection structure for this node.
@@ -154,31 +179,17 @@
 {
 	struct connection *con = NULL;
 	int r;
-	int n;
 
-	con = idr_find(&connections_idr, nodeid);
+	con = __find_con(nodeid);
 	if (con || !alloc)
 		return con;
 
-	r = idr_pre_get(&connections_idr, alloc);
-	if (!r)
-		return NULL;
-
 	con = kmem_cache_zalloc(con_cache, alloc);
 	if (!con)
 		return NULL;
 
-	r = idr_get_new_above(&connections_idr, con, nodeid, &n);
-	if (r) {
-		kmem_cache_free(con_cache, con);
-		return NULL;
-	}
-
-	if (n != nodeid) {
-		idr_remove(&connections_idr, n);
-		kmem_cache_free(con_cache, con);
-		return NULL;
-	}
+	r = nodeid_hash(nodeid);
+	hlist_add_head(&con->list, &connection_hash[r]);
 
 	con->nodeid = nodeid;
 	mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@
 
 	/* Setup action pointers for child sockets */
 	if (con->nodeid) {
-		struct connection *zerocon = idr_find(&connections_idr, 0);
+		struct connection *zerocon = __find_con(0);
 
 		con->connect_action = zerocon->connect_action;
 		if (!con->rx_action)
 			con->rx_action = zerocon->rx_action;
 	}
 
-	if (nodeid > max_nodeid)
-		max_nodeid = nodeid;
-
 	return con;
 }
 
+/* Loop round all connections */
+static void foreach_conn(void (*conn_func)(struct connection *c))
+{
+	int i;
+	struct hlist_node *h, *n;
+	struct connection *con;
+
+	for (i = 0; i < CONN_HASH_SIZE; i++) {
+		hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+			conn_func(con);
+		}
+	}
+}
+
 static struct connection *nodeid2con(int nodeid, gfp_t allocation)
 {
 	struct connection *con;
@@ -217,14 +239,17 @@
 static struct connection *assoc2con(int assoc_id)
 {
 	int i;
+	struct hlist_node *h;
 	struct connection *con;
 
 	mutex_lock(&connections_lock);
-	for (i=0; i<=max_nodeid; i++) {
-		con = __nodeid2con(i, 0);
-		if (con && con->sctp_assoc == assoc_id) {
-			mutex_unlock(&connections_lock);
-			return con;
+
+	for (i = 0 ; i < CONN_HASH_SIZE; i++) {
+		hlist_for_each_entry(con, h, &connection_hash[i], list) {
+			if (con && con->sctp_assoc == assoc_id) {
+				mutex_unlock(&connections_lock);
+				return con;
+			}
 		}
 	}
 	mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@
 	} else {
 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
-		memcpy(&ret6->sin6_addr, &in6->sin6_addr,
-		       sizeof(in6->sin6_addr));
+		ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
 	}
 
 	return 0;
@@ -376,25 +400,23 @@
 		log_print("send EOF to node failed: %d", ret);
 }
 
+static void sctp_init_failed_foreach(struct connection *con)
+{
+	con->sctp_assoc = 0;
+	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+			queue_work(send_workqueue, &con->swork);
+	}
+}
+
 /* INIT failed but we don't know which node...
    restart INIT on all pending nodes */
 static void sctp_init_failed(void)
 {
-	int i;
-	struct connection *con;
-
 	mutex_lock(&connections_lock);
-	for (i=1; i<=max_nodeid; i++) {
-		con = __nodeid2con(i, 0);
-		if (!con)
-			continue;
-		con->sctp_assoc = 0;
-		if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
-			if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
-				queue_work(send_workqueue, &con->swork);
-			}
-		}
-	}
+
+	foreach_conn(sctp_init_failed_foreach);
+
 	mutex_unlock(&connections_lock);
 }
 
@@ -1313,13 +1335,10 @@
 
 static void clean_one_writequeue(struct connection *con)
 {
-	struct list_head *list;
-	struct list_head *temp;
+	struct writequeue_entry *e, *safe;
 
 	spin_lock(&con->writequeue_lock);
-	list_for_each_safe(list, temp, &con->writequeue) {
-		struct writequeue_entry *e =
-			list_entry(list, struct writequeue_entry, list);
+	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
 		list_del(&e->list);
 		free_entry(e);
 	}
@@ -1369,14 +1388,7 @@
 /* Discard all entries on the write queues */
 static void clean_writequeues(void)
 {
-	int nodeid;
-
-	for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
-		struct connection *con = __nodeid2con(nodeid, 0);
-
-		if (con)
-			clean_one_writequeue(con);
-	}
+	foreach_conn(clean_one_writequeue);
 }
 
 static void work_stop(void)
@@ -1406,23 +1418,29 @@
 	return 0;
 }
 
+static void stop_conn(struct connection *con)
+{
+	con->flags |= 0x0F;
+	if (con->sock)
+		con->sock->sk->sk_user_data = NULL;
+}
+
+static void free_conn(struct connection *con)
+{
+	close_connection(con, true);
+	if (con->othercon)
+		kmem_cache_free(con_cache, con->othercon);
+	hlist_del(&con->list);
+	kmem_cache_free(con_cache, con);
+}
+
 void dlm_lowcomms_stop(void)
 {
-	int i;
-	struct connection *con;
-
 	/* Set all the flags to prevent any
 	   socket activity.
 	*/
 	mutex_lock(&connections_lock);
-	for (i = 0; i <= max_nodeid; i++) {
-		con = __nodeid2con(i, 0);
-		if (con) {
-			con->flags |= 0x0F;
-			if (con->sock)
-				con->sock->sk->sk_user_data = NULL;
-		}
-	}
+	foreach_conn(stop_conn);
 	mutex_unlock(&connections_lock);
 
 	work_stop();
@@ -1430,25 +1448,20 @@
 	mutex_lock(&connections_lock);
 	clean_writequeues();
 
-	for (i = 0; i <= max_nodeid; i++) {
-		con = __nodeid2con(i, 0);
-		if (con) {
-			close_connection(con, true);
-			if (con->othercon)
-				kmem_cache_free(con_cache, con->othercon);
-			kmem_cache_free(con_cache, con);
-		}
-	}
-	max_nodeid = 0;
+	foreach_conn(free_conn);
+
 	mutex_unlock(&connections_lock);
 	kmem_cache_destroy(con_cache);
-	idr_init(&connections_idr);
 }
 
 int dlm_lowcomms_start(void)
 {
 	int error = -EINVAL;
 	struct connection *con;
+	int i;
+
+	for (i = 0; i < CONN_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&connection_hash[i]);
 
 	init_local();
 	if (!dlm_local_count) {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 065149e..ebce994 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -84,7 +84,7 @@
 
 static void compat_input(struct dlm_write_request *kb,
 			 struct dlm_write_request32 *kb32,
-			 size_t count)
+			 int namelen)
 {
 	kb->version[0] = kb32->version[0];
 	kb->version[1] = kb32->version[1];
@@ -96,8 +96,7 @@
 	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
 		kb->i.lspace.flags = kb32->i.lspace.flags;
 		kb->i.lspace.minor = kb32->i.lspace.minor;
-		memcpy(kb->i.lspace.name, kb32->i.lspace.name, count -
-			offsetof(struct dlm_write_request32, i.lspace.name));
+		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
 	} else if (kb->cmd == DLM_USER_PURGE) {
 		kb->i.purge.nodeid = kb32->i.purge.nodeid;
 		kb->i.purge.pid = kb32->i.purge.pid;
@@ -115,8 +114,7 @@
 		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
 		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
 		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
-		memcpy(kb->i.lock.name, kb32->i.lock.name, count -
-			offsetof(struct dlm_write_request32, i.lock.name));
+		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
 	}
 }
 
@@ -539,9 +537,16 @@
 #ifdef CONFIG_COMPAT
 	if (!kbuf->is64bit) {
 		struct dlm_write_request32 *k32buf;
+		int namelen = 0;
+
+		if (count > sizeof(struct dlm_write_request32))
+			namelen = count - sizeof(struct dlm_write_request32);
+
 		k32buf = (struct dlm_write_request32 *)kbuf;
-		kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) -
-			       sizeof(struct dlm_write_request32)), GFP_KERNEL);
+
+		/* add 1 after namelen so that the name string is terminated */
+		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
+			       GFP_KERNEL);
 		if (!kbuf) {
 			kfree(k32buf);
 			return -ENOMEM;
@@ -549,7 +554,8 @@
 
 		if (proc)
 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
-		compat_input(kbuf, k32buf, count + 1);
+
+		compat_input(kbuf, k32buf, namelen);
 		kfree(k32buf);
 	}
 #endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 3e5637f..44d725f 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@
 
 	spin_lock(&inode_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
-		if (inode->i_state & (I_FREEING|I_WILL_FREE))
+		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
 			continue;
 		if (inode->i_mapping->nrpages == 0)
 			continue;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 5e59658..2dda5ad 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -89,7 +89,7 @@
 	return;
 }
 
-struct dentry_operations ecryptfs_dops = {
+const struct dentry_operations ecryptfs_dops = {
 	.d_revalidate = ecryptfs_d_revalidate,
 	.d_release = ecryptfs_d_release,
 };
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ac749d4..064c582 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -580,7 +580,7 @@
 extern const struct inode_operations ecryptfs_dir_iops;
 extern const struct inode_operations ecryptfs_symlink_iops;
 extern const struct super_operations ecryptfs_sops;
-extern struct dentry_operations ecryptfs_dops;
+extern const struct dentry_operations ecryptfs_dops;
 extern struct address_space_operations ecryptfs_aops;
 extern int ecryptfs_verbosity;
 extern unsigned int ecryptfs_message_buf_len;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 4a29d63..7f8d2e5 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -570,7 +570,7 @@
 error_return:
 	brelse(bitmap_bh);
 	release_blocks(sb, freed);
-	DQUOT_FREE_BLOCK(inode, freed);
+	vfs_dq_free_block(inode, freed);
 }
 
 /**
@@ -1247,7 +1247,7 @@
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (DQUOT_ALLOC_BLOCK(inode, num)) {
+	if (vfs_dq_alloc_block(inode, num)) {
 		*errp = -EDQUOT;
 		return 0;
 	}
@@ -1409,7 +1409,7 @@
 
 	*errp = 0;
 	brelse(bitmap_bh);
-	DQUOT_FREE_BLOCK(inode, *count-num);
+	vfs_dq_free_block(inode, *count-num);
 	*count = num;
 	return ret_block;
 
@@ -1420,7 +1420,7 @@
 	 * Undo the block allocation
 	 */
 	if (!performed_allocation)
-		DQUOT_FREE_BLOCK(inode, *count);
+		vfs_dq_free_block(inode, *count);
 	brelse(bitmap_bh);
 	return 0;
 }
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 66321a8..15387c9 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -121,8 +121,8 @@
 	if (!is_bad_inode(inode)) {
 		/* Quota is already initialized in iput() */
 		ext2_xattr_delete_inode(inode);
-	    	DQUOT_FREE_INODE(inode);
-		DQUOT_DROP(inode);
+		vfs_dq_free_inode(inode);
+		vfs_dq_drop(inode);
 	}
 
 	es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@
 		goto fail_drop;
 	}
 
-	if (DQUOT_ALLOC_INODE(inode)) {
+	if (vfs_dq_alloc_inode(inode)) {
 		err = -EDQUOT;
 		goto fail_drop;
 	}
@@ -605,10 +605,10 @@
 	return inode;
 
 fail_free_drop:
-	DQUOT_FREE_INODE(inode);
+	vfs_dq_free_inode(inode);
 
 fail_drop:
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
 	inode->i_nlink = 0;
 	unlock_new_inode(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f..b43b9556 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1444,7 +1444,7 @@
 		return error;
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
 	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
-		error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
+		error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
 		if (error)
 			return error;
 	}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7c6e360..f983225 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1331,6 +1331,7 @@
 				sb->s_blocksize - offset : toread;
 
 		tmp_bh.b_state = 0;
+		tmp_bh.b_size = sb->s_blocksize;
 		err = ext2_get_block(inode, blk, &tmp_bh, 0);
 		if (err < 0)
 			return err;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 987a526..7913531 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -642,7 +642,7 @@
 				ea_bdebug(new_bh, "reusing block");
 
 				error = -EDQUOT;
-				if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+				if (vfs_dq_alloc_block(inode, 1)) {
 					unlock_buffer(new_bh);
 					goto cleanup;
 				}
@@ -699,7 +699,7 @@
 		 * as if nothing happened and cleanup the unused block */
 		if (error && error != -ENOSPC) {
 			if (new_bh && new_bh != old_bh)
-				DQUOT_FREE_BLOCK(inode, 1);
+				vfs_dq_free_block(inode, 1);
 			goto cleanup;
 		}
 	} else
@@ -731,7 +731,7 @@
 			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
 			if (ce)
 				mb_cache_entry_release(ce);
-			DQUOT_FREE_BLOCK(inode, 1);
+			vfs_dq_free_block(inode, 1);
 			mark_buffer_dirty(old_bh);
 			ea_bdebug(old_bh, "refcount now=%d",
 				le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@
 		mark_buffer_dirty(bh);
 		if (IS_SYNC(inode))
 			sync_dirty_buffer(bh);
-		DQUOT_FREE_BLOCK(inode, 1);
+		vfs_dq_free_block(inode, 1);
 	}
 	EXT2_I(inode)->i_file_acl = 0;
 
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0dbf1c0..225202d 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -676,7 +676,7 @@
 	}
 	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
 	if (dquot_freed_blocks)
-		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+		vfs_dq_free_block(inode, dquot_freed_blocks);
 	return;
 }
 
@@ -1502,7 +1502,7 @@
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (DQUOT_ALLOC_BLOCK(inode, num)) {
+	if (vfs_dq_alloc_block(inode, num)) {
 		*errp = -EDQUOT;
 		return 0;
 	}
@@ -1714,7 +1714,7 @@
 
 	*errp = 0;
 	brelse(bitmap_bh);
-	DQUOT_FREE_BLOCK(inode, *count-num);
+	vfs_dq_free_block(inode, *count-num);
 	*count = num;
 	return ret_block;
 
@@ -1729,7 +1729,7 @@
 	 * Undo the block allocation
 	 */
 	if (!performed_allocation)
-		DQUOT_FREE_BLOCK(inode, *count);
+		vfs_dq_free_block(inode, *count);
 	brelse(bitmap_bh);
 	return 0;
 }
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 8de6c72..dd13d60 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -123,10 +123,10 @@
 	 * Note: we must free any quota before locking the superblock,
 	 * as writing the quota to disk may need the lock as well.
 	 */
-	DQUOT_INIT(inode);
+	vfs_dq_init(inode);
 	ext3_xattr_delete_inode(handle, inode);
-	DQUOT_FREE_INODE(inode);
-	DQUOT_DROP(inode);
+	vfs_dq_free_inode(inode);
+	vfs_dq_drop(inode);
 
 	is_directory = S_ISDIR(inode->i_mode);
 
@@ -589,7 +589,7 @@
 		sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
 
 	ret = inode;
-	if(DQUOT_ALLOC_INODE(inode)) {
+	if (vfs_dq_alloc_inode(inode)) {
 		err = -EDQUOT;
 		goto fail_drop;
 	}
@@ -620,10 +620,10 @@
 	return ret;
 
 fail_free_drop:
-	DQUOT_FREE_INODE(inode);
+	vfs_dq_free_inode(inode);
 
 fail_drop:
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
 	inode->i_nlink = 0;
 	unlock_new_inode(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05e5c2e..4a09ff1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3063,7 +3063,7 @@
 			error = PTR_ERR(handle);
 			goto err_out;
 		}
-		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+		error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
 		if (error) {
 			ext3_journal_stop(handle);
 			return error;
@@ -3154,7 +3154,7 @@
 		ret = 2 * (bpp + indirects) + 2;
 
 #ifdef CONFIG_QUOTA
-	/* We know that structure was already allocated during DQUOT_INIT so
+	/* We know that structure was already allocated during vfs_dq_init so
 	 * we will be updating only the data blocks + inodes */
 	ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
 #endif
@@ -3245,7 +3245,7 @@
  * i_size has been changed by generic_commit_write() and we thus need
  * to include the updated inode in the current transaction.
  *
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
  * are allocated to the file.
  *
  * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4db4ffa..e2fc63c 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2049,7 +2049,7 @@
 
 	/* Initialize quotas before so that eventual writes go in
 	 * separate transaction */
-	DQUOT_INIT(dentry->d_inode);
+	vfs_dq_init(dentry->d_inode);
 	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	DQUOT_INIT(dentry->d_inode);
+	vfs_dq_init(dentry->d_inode);
 	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
 	if (new_dentry->d_inode)
-		DQUOT_INIT(new_dentry->d_inode);
+		vfs_dq_init(new_dentry->d_inode);
 	handle = ext3_journal_start(old_dir, 2 *
 					EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
 					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4a97041..9e5b8e3 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@
 #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
 #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
 
-static int ext3_dquot_initialize(struct inode *inode, int type);
-static int ext3_dquot_drop(struct inode *inode);
 static int ext3_write_dquot(struct dquot *dquot);
 static int ext3_acquire_dquot(struct dquot *dquot);
 static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@
 				const char *data, size_t len, loff_t off);
 
 static struct dquot_operations ext3_quota_operations = {
-	.initialize	= ext3_dquot_initialize,
-	.drop		= ext3_dquot_drop,
+	.initialize	= dquot_initialize,
+	.drop		= dquot_drop,
 	.alloc_space	= dquot_alloc_space,
 	.alloc_inode	= dquot_alloc_inode,
 	.free_space	= dquot_free_space,
@@ -1438,7 +1436,7 @@
 		}
 
 		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 		if (inode->i_nlink) {
 			printk(KERN_DEBUG
 				"%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@
  * Process 1                         Process 2
  * ext3_create()                     quota_sync()
  *   journal_start()                   write_dquot()
- *   DQUOT_INIT()                        down(dqio_mutex)
+ *   vfs_dq_init()                       down(dqio_mutex)
  *     down(dqio_mutex)                    journal_start()
  *
  */
@@ -2714,44 +2712,6 @@
 	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
 }
 
-static int ext3_dquot_initialize(struct inode *inode, int type)
-{
-	handle_t *handle;
-	int ret, err;
-
-	/* We may create quota structure so we need to reserve enough blocks */
-	handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_initialize(inode, type);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_dquot_drop(struct inode *inode)
-{
-	handle_t *handle;
-	int ret, err;
-
-	/* We may delete quota structure so we need to reserve enough blocks */
-	handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle)) {
-		/*
-		 * We call dquot_drop() anyway to at least release references
-		 * to quota structures so that umount does not hang.
-		 */
-		dquot_drop(inode);
-		return PTR_ERR(handle);
-	}
-	ret = dquot_drop(inode);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
 static int ext3_write_dquot(struct dquot *dquot)
 {
 	int ret, err;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 175414a..83b7be8 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -498,7 +498,7 @@
 		error = ext3_journal_dirty_metadata(handle, bh);
 		if (IS_SYNC(inode))
 			handle->h_sync = 1;
-		DQUOT_FREE_BLOCK(inode, 1);
+		vfs_dq_free_block(inode, 1);
 		ea_bdebug(bh, "refcount now=%d; releasing",
 			  le32_to_cpu(BHDR(bh)->h_refcount));
 		if (ce)
@@ -774,7 +774,7 @@
 				/* The old block is released after updating
 				   the inode. */
 				error = -EDQUOT;
-				if (DQUOT_ALLOC_BLOCK(inode, 1))
+				if (vfs_dq_alloc_block(inode, 1))
 					goto cleanup;
 				error = ext3_journal_get_write_access(handle,
 								      new_bh);
@@ -848,7 +848,7 @@
 	return error;
 
 cleanup_dquot:
-	DQUOT_FREE_BLOCK(inode, 1);
+	vfs_dq_free_block(inode, 1);
 	goto cleanup;
 
 bad_block:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index de9459b..38f40d5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -536,7 +536,7 @@
 	ext4_mb_free_blocks(handle, inode, block, count,
 			    metadata, &dquot_freed_blocks);
 	if (dquot_freed_blocks)
-		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+		vfs_dq_free_block(inode, dquot_freed_blocks);
 	return;
 }
 
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c87dc..6083bb38 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -20,6 +20,7 @@
 #include <linux/blkdev.h>
 #include <linux/magic.h>
 #include <linux/jbd2.h>
+#include <linux/quota.h>
 #include "ext4_i.h"
 
 /*
@@ -1098,6 +1099,7 @@
 extern int ext4_block_truncate_page(handle_t *handle,
 		struct address_space *mapping, loff_t from);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern qsize_t ext4_get_reserved_space(struct inode *inode);
 
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d2b358..fb51b40 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -220,10 +220,10 @@
 	 * Note: we must free any quota before locking the superblock,
 	 * as writing the quota to disk may need the lock as well.
 	 */
-	DQUOT_INIT(inode);
+	vfs_dq_init(inode);
 	ext4_xattr_delete_inode(handle, inode);
-	DQUOT_FREE_INODE(inode);
-	DQUOT_DROP(inode);
+	vfs_dq_free_inode(inode);
+	vfs_dq_drop(inode);
 
 	is_directory = S_ISDIR(inode->i_mode);
 
@@ -915,7 +915,7 @@
 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
 	ret = inode;
-	if (DQUOT_ALLOC_INODE(inode)) {
+	if (vfs_dq_alloc_inode(inode)) {
 		err = -EDQUOT;
 		goto fail_drop;
 	}
@@ -956,10 +956,10 @@
 	return ret;
 
 fail_free_drop:
-	DQUOT_FREE_INODE(inode);
+	vfs_dq_free_inode(inode);
 
 fail_drop:
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
 	inode->i_nlink = 0;
 	unlock_new_inode(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c7fed5b..71d3ecd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -975,6 +975,17 @@
 	return err;
 }
 
+qsize_t ext4_get_reserved_space(struct inode *inode)
+{
+	unsigned long long total;
+
+	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+	total = EXT4_I(inode)->i_reserved_data_blocks +
+		EXT4_I(inode)->i_reserved_meta_blocks;
+	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+	return total;
+}
 /*
  * Calculate the number of metadata blocks need to reserve
  * to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@
 	/* update per-inode reservations */
 	BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
 	EXT4_I(inode)->i_reserved_data_blocks -= used;
-
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+	/*
+	 * free those over-booking quota for metadata blocks
+	 */
+
+	if (mdb_free)
+		vfs_dq_release_reservation_block(inode, mdb_free);
 }
 
 /*
@@ -1553,8 +1570,8 @@
 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
 {
 	int retries = 0;
-       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       unsigned long md_needed, mdblocks, total = 0;
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	unsigned long md_needed, mdblocks, total = 0;
 
 	/*
 	 * recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@
 	md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
 	total = md_needed + nrblocks;
 
+	/*
+	 * Make quota reservation here to prevent quota overflow
+	 * later. Real quota accounting is done at pages writeout
+	 * time.
+	 */
+	if (vfs_dq_reserve_block(inode, total)) {
+		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+		return -EDQUOT;
+	}
+
 	if (ext4_claim_free_blocks(sbi, total)) {
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
 			yield();
 			goto repeat;
 		}
+		vfs_dq_release_reservation_block(inode, total);
 		return -ENOSPC;
 	}
 	EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@
 	BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
 	EXT4_I(inode)->i_reserved_meta_blocks = mdb;
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+	vfs_dq_release_reservation_block(inode, release);
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@
 			error = PTR_ERR(handle);
 			goto err_out;
 		}
-		error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+		error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
 		if (error) {
 			ext4_journal_stop(handle);
 			return error;
@@ -4991,7 +5021,7 @@
  * i_size has been changed by generic_commit_write() and we thus need
  * to include the updated inode in the current transaction.
  *
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
  * are allocated to the file.
  *
  * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f61e62..b038188 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3086,9 +3086,12 @@
 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
 		/* release all the reserved blocks if non delalloc */
 		percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
-	else
+	else {
 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
 						ac->ac_b_ex.fe_len);
+		/* convert reserved quota blocks to real quota blocks */
+		vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+	}
 
 	if (sbi->s_log_groups_per_flex) {
 		ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@
 	struct ext4_sb_info *sbi;
 	struct super_block *sb;
 	ext4_fsblk_t block = 0;
-	unsigned int inquota;
+	unsigned int inquota = 0;
 	unsigned int reserv_blks = 0;
 
 	sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@
 		   (unsigned long long) ar->pleft,
 		   (unsigned long long) ar->pright);
 
-	if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
-		/*
-		 * With delalloc we already reserved the blocks
+	/*
+	 * For delayed allocation, we could skip the ENOSPC and
+	 * EDQUOT check, as blocks and quotas have been already
+	 * reserved when data being copied into pagecache.
+	 */
+	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+	else {
+		/* Without delayed allocation we need to verify
+		 * there is enough free blocks to do block allocation
+		 * and verify allocation doesn't exceed the quota limits.
 		 */
 		while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
 			/* let others to free the space */
@@ -4576,19 +4587,16 @@
 			return 0;
 		}
 		reserv_blks = ar->len;
+		while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
+			ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+			ar->len--;
+		}
+		inquota = ar->len;
+		if (ar->len == 0) {
+			*errp = -EDQUOT;
+			goto out3;
+		}
 	}
-	while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
-		ar->flags |= EXT4_MB_HINT_NOPREALLOC;
-		ar->len--;
-	}
-	if (ar->len == 0) {
-		*errp = -EDQUOT;
-		goto out3;
-	}
-	inquota = ar->len;
-
-	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
-		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
 
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 	if (!ac) {
@@ -4654,8 +4662,8 @@
 out2:
 	kmem_cache_free(ext4_ac_cachep, ac);
 out1:
-	if (ar->len < inquota)
-		DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
+	if (inquota && ar->len < inquota)
+		vfs_dq_free_block(ar->inode, inquota - ar->len);
 out3:
 	if (!ar->len) {
 		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ba702bd..8341024 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2092,7 +2092,7 @@
 
 	/* Initialize quotas before so that eventual writes go in
 	 * separate transaction */
-	DQUOT_INIT(dentry->d_inode);
+	vfs_dq_init(dentry->d_inode);
 	handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	DQUOT_INIT(dentry->d_inode);
+	vfs_dq_init(dentry->d_inode);
 	handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
 	if (new_dentry->d_inode)
-		DQUOT_INIT(new_dentry->d_inode);
+		vfs_dq_init(new_dentry->d_inode);
 	handle = ext4_journal_start(old_dir, 2 *
 					EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
 					EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 39d1993..f7371a6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@
 #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
 #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
 
-static int ext4_dquot_initialize(struct inode *inode, int type);
-static int ext4_dquot_drop(struct inode *inode);
 static int ext4_write_dquot(struct dquot *dquot);
 static int ext4_acquire_dquot(struct dquot *dquot);
 static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@
 				const char *data, size_t len, loff_t off);
 
 static struct dquot_operations ext4_quota_operations = {
-	.initialize	= ext4_dquot_initialize,
-	.drop		= ext4_dquot_drop,
+	.initialize	= dquot_initialize,
+	.drop		= dquot_drop,
 	.alloc_space	= dquot_alloc_space,
+	.reserve_space	= dquot_reserve_space,
+	.claim_space	= dquot_claim_space,
+	.release_rsv	= dquot_release_reserved_space,
+	.get_reserved_space = ext4_get_reserved_space,
 	.alloc_inode	= dquot_alloc_inode,
 	.free_space	= dquot_free_space,
 	.free_inode	= dquot_free_inode,
@@ -1802,7 +1804,7 @@
 		}
 
 		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 		if (inode->i_nlink) {
 			printk(KERN_DEBUG
 				"%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@
  * is locked for write. Otherwise the are possible deadlocks:
  * Process 1                         Process 2
  * ext4_create()                     quota_sync()
- *   jbd2_journal_start()                   write_dquot()
- *   DQUOT_INIT()                        down(dqio_mutex)
+ *   jbd2_journal_start()                  write_dquot()
+ *   vfs_dq_init()                         down(dqio_mutex)
  *     down(dqio_mutex)                    jbd2_journal_start()
  *
  */
@@ -3380,44 +3382,6 @@
 	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
 }
 
-static int ext4_dquot_initialize(struct inode *inode, int type)
-{
-	handle_t *handle;
-	int ret, err;
-
-	/* We may create quota structure so we need to reserve enough blocks */
-	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_initialize(inode, type);
-	err = ext4_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext4_dquot_drop(struct inode *inode)
-{
-	handle_t *handle;
-	int ret, err;
-
-	/* We may delete quota structure so we need to reserve enough blocks */
-	handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle)) {
-		/*
-		 * We call dquot_drop() anyway to at least release references
-		 * to quota structures so that umount does not hang.
-		 */
-		dquot_drop(inode);
-		return PTR_ERR(handle);
-	}
-	ret = dquot_drop(inode);
-	err = ext4_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
 static int ext4_write_dquot(struct dquot *dquot)
 {
 	int ret, err;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 157ce65..62b31c2 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -490,7 +490,7 @@
 		error = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (IS_SYNC(inode))
 			ext4_handle_sync(handle);
-		DQUOT_FREE_BLOCK(inode, 1);
+		vfs_dq_free_block(inode, 1);
 		ea_bdebug(bh, "refcount now=%d; releasing",
 			  le32_to_cpu(BHDR(bh)->h_refcount));
 		if (ce)
@@ -784,7 +784,7 @@
 				/* The old block is released after updating
 				   the inode. */
 				error = -EDQUOT;
-				if (DQUOT_ALLOC_BLOCK(inode, 1))
+				if (vfs_dq_alloc_block(inode, 1))
 					goto cleanup;
 				error = ext4_journal_get_write_access(handle,
 								      new_bh);
@@ -860,7 +860,7 @@
 	return error;
 
 cleanup_dquot:
-	DQUOT_FREE_BLOCK(inode, 1);
+	vfs_dq_free_block(inode, 1);
 	goto cleanup;
 
 bad_block:
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7ba03a4..da3f361 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -188,7 +188,7 @@
 	goto out;
 }
 
-static struct dentry_operations msdos_dentry_operations = {
+static const struct dentry_operations msdos_dentry_operations = {
 	.d_hash		= msdos_hash,
 	.d_compare	= msdos_cmp,
 };
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 8ae32e3..a0e00e3 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -166,13 +166,13 @@
 	return 1;
 }
 
-static struct dentry_operations vfat_ci_dentry_ops = {
+static const struct dentry_operations vfat_ci_dentry_ops = {
 	.d_revalidate	= vfat_revalidate_ci,
 	.d_hash		= vfat_hashi,
 	.d_compare	= vfat_cmpi,
 };
 
-static struct dentry_operations vfat_dentry_ops = {
+static const struct dentry_operations vfat_dentry_ops = {
 	.d_revalidate	= vfat_revalidate,
 	.d_hash		= vfat_hash,
 	.d_compare	= vfat_cmp,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fdff346..06da052 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -224,7 +224,7 @@
 	return !nodeid || nodeid == FUSE_ROOT_ID;
 }
 
-struct dentry_operations fuse_dentry_operations = {
+const struct dentry_operations fuse_dentry_operations = {
 	.d_revalidate	= fuse_dentry_revalidate,
 };
 
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5e64b81..6fc5aed 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -493,7 +493,7 @@
 /** Device operations */
 extern const struct file_operations fuse_dev_operations;
 
-extern struct dentry_operations fuse_dentry_operations;
+extern const struct dentry_operations fuse_dentry_operations;
 
 /**
  * Get a filled in inode
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 5eb57b0..022c66c 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -107,7 +107,7 @@
 	return 0;
 }
 
-struct dentry_operations gfs2_dops = {
+const struct dentry_operations gfs2_dops = {
 	.d_revalidate = gfs2_drevalidate,
 	.d_hash = gfs2_dhash,
 };
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 91abdbe..b56413e 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -49,7 +49,7 @@
 extern struct file_system_type gfs2meta_fs_type;
 extern const struct export_operations gfs2_export_ops;
 extern const struct super_operations gfs2_super_ops;
-extern struct dentry_operations gfs2_dops;
+extern const struct dentry_operations gfs2_dops;
 
 #endif /* __SUPER_DOT_H__ */
 
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 9955232..052387e 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,7 +213,7 @@
 extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
 
 /* string.c */
-extern struct dentry_operations hfs_dentry_operations;
+extern const struct dentry_operations hfs_dentry_operations;
 
 extern int hfs_hash_dentry(struct dentry *, struct qstr *);
 extern int hfs_strcmp(const unsigned char *, unsigned int,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 5bf89ec..7478f5c 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -31,7 +31,7 @@
 	return 1;
 }
 
-struct dentry_operations hfs_dentry_operations =
+const struct dentry_operations hfs_dentry_operations =
 {
 	.d_revalidate	= hfs_revalidate_dentry,
 	.d_hash		= hfs_hash_dentry,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f027a90..5c10d80 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -327,7 +327,7 @@
 /* inode.c */
 extern const struct address_space_operations hfsplus_aops;
 extern const struct address_space_operations hfsplus_btree_aops;
-extern struct dentry_operations hfsplus_dentry_operations;
+extern const struct dentry_operations hfsplus_dentry_operations;
 
 void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
 void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f105ee9..1bcf597 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -137,7 +137,7 @@
 	.writepages	= hfsplus_writepages,
 };
 
-struct dentry_operations hfsplus_dentry_operations = {
+const struct dentry_operations hfsplus_dentry_operations = {
 	.d_hash       = hfsplus_hash_dentry,
 	.d_compare    = hfsplus_compare_dentry,
 };
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c538e0..fe02ad4 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -31,12 +31,12 @@
 
 #define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
 
-int hostfs_d_delete(struct dentry *dentry)
+static int hostfs_d_delete(struct dentry *dentry)
 {
 	return 1;
 }
 
-struct dentry_operations hostfs_dentry_ops = {
+static const struct dentry_operations hostfs_dentry_ops = {
 	.d_delete		= hostfs_d_delete,
 };
 
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 0831912..940d6d1 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -49,7 +49,7 @@
 	return 0;
 }
 
-static struct dentry_operations hpfs_dentry_operations = {
+static const struct dentry_operations hpfs_dentry_operations = {
 	.d_hash		= hpfs_hash_dentry,
 	.d_compare	= hpfs_compare_dentry,
 };
diff --git a/fs/inode.c b/fs/inode.c
index 643ac43..d06d6d2 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -294,7 +294,7 @@
 	BUG_ON(!(inode->i_state & I_FREEING));
 	BUG_ON(inode->i_state & I_CLEAR);
 	inode_sync_wait(inode);
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	if (inode->i_sb->s_op->clear_inode)
 		inode->i_sb->s_op->clear_inode(inode);
 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -366,6 +366,8 @@
 		if (tmp == head)
 			break;
 		inode = list_entry(tmp, struct inode, i_sb_list);
+		if (inode->i_state & I_NEW)
+			continue;
 		invalidate_inode_buffers(inode);
 		if (!atomic_read(&inode->i_count)) {
 			list_move(&inode->i_list, dispose);
@@ -1168,7 +1170,7 @@
 	if (op->delete_inode) {
 		void (*delete)(struct inode *) = op->delete_inode;
 		if (!is_bad_inode(inode))
-			DQUOT_INIT(inode);
+			vfs_dq_init(inode);
 		/* Filesystems implementing their own
 		 * s_op->delete_inode are required to call
 		 * truncate_inode_pages and clear_inode()
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6147ec3..13d2edd 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@
 };
 
 
-static struct dentry_operations isofs_dentry_ops[] = {
+static const struct dentry_operations isofs_dentry_ops[] = {
 	{
 		.d_hash		= isofs_hash,
 		.d_compare	= isofs_dentry_cmp,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index d3e5c33..a166c16 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -233,7 +233,7 @@
 
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
 	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
-		if (DQUOT_TRANSFER(inode, iattr))
+		if (vfs_dq_transfer(inode, iattr))
 			return -EDQUOT;
 	}
 
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b00ee9f..b2ae190 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -158,9 +158,9 @@
 		/*
 		 * Free the inode from the quota allocation.
 		 */
-		DQUOT_INIT(inode);
-		DQUOT_FREE_INODE(inode);
-		DQUOT_DROP(inode);
+		vfs_dq_init(inode);
+		vfs_dq_free_inode(inode);
+		vfs_dq_drop(inode);
 	}
 
 	clear_inode(inode);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 4dcc058..925871e 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,10 +381,10 @@
 		 * It's time to move the inline table to an external
 		 * page and begin to build the xtree
 		 */
-		if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
+		if (vfs_dq_alloc_block(ip, sbi->nbperpage))
 			goto clean_up;
 		if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
-			DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+			vfs_dq_free_block(ip, sbi->nbperpage);
 			goto clean_up;
 		}
 
@@ -408,7 +408,7 @@
 			memcpy(&jfs_ip->i_dirtable, temp_table,
 			       sizeof (temp_table));
 			dbFree(ip, xaddr, sbi->nbperpage);
-			DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+			vfs_dq_free_block(ip, sbi->nbperpage);
 			goto clean_up;
 		}
 		ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@
 			n = xlen;
 
 		/* Allocate blocks to quota. */
-		if (DQUOT_ALLOC_BLOCK(ip, n)) {
+		if (vfs_dq_alloc_block(ip, n)) {
 			rc = -EDQUOT;
 			goto extendOut;
 		}
@@ -1308,7 +1308,7 @@
 
 	/* Rollback quota allocation */
 	if (rc && quota_allocation)
-		DQUOT_FREE_BLOCK(ip, quota_allocation);
+		vfs_dq_free_block(ip, quota_allocation);
 
       dtSplitUp_Exit:
 
@@ -1369,7 +1369,7 @@
 		return -EIO;
 
 	/* Allocate blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+	if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
 		release_metapage(rmp);
 		return -EDQUOT;
 	}
@@ -1916,7 +1916,7 @@
 	rp = rmp->data;
 
 	/* Allocate blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+	if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
 		release_metapage(rmp);
 		return -EDQUOT;
 	}
@@ -2287,7 +2287,7 @@
 	xlen = lengthPXD(&fp->header.self);
 
 	/* Free quota allocation. */
-	DQUOT_FREE_BLOCK(ip, xlen);
+	vfs_dq_free_block(ip, xlen);
 
 	/* free/invalidate its buffer page */
 	discard_metapage(fmp);
@@ -2363,7 +2363,7 @@
 				xlen = lengthPXD(&p->header.self);
 
 				/* Free quota allocation */
-				DQUOT_FREE_BLOCK(ip, xlen);
+				vfs_dq_free_block(ip, xlen);
 
 				/* free/invalidate its buffer page */
 				discard_metapage(mp);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e32..169802e 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -141,7 +141,7 @@
 	}
 
 	/* Allocate blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+	if (vfs_dq_alloc_block(ip, nxlen)) {
 		dbFree(ip, nxaddr, (s64) nxlen);
 		mutex_unlock(&JFS_IP(ip)->commit_mutex);
 		return -EDQUOT;
@@ -164,7 +164,7 @@
 	 */
 	if (rc) {
 		dbFree(ip, nxaddr, nxlen);
-		DQUOT_FREE_BLOCK(ip, nxlen);
+		vfs_dq_free_block(ip, nxlen);
 		mutex_unlock(&JFS_IP(ip)->commit_mutex);
 		return (rc);
 	}
@@ -256,7 +256,7 @@
 		goto exit;
 
 	/* Allocat blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, nxlen)) {
+	if (vfs_dq_alloc_block(ip, nxlen)) {
 		dbFree(ip, nxaddr, (s64) nxlen);
 		mutex_unlock(&JFS_IP(ip)->commit_mutex);
 		return -EDQUOT;
@@ -297,7 +297,7 @@
 		/* extend the extent */
 		if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
 			dbFree(ip, xaddr + xlen, delta);
-			DQUOT_FREE_BLOCK(ip, nxlen);
+			vfs_dq_free_block(ip, nxlen);
 			goto exit;
 		}
 	} else {
@@ -308,7 +308,7 @@
 		 */
 		if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
 			dbFree(ip, nxaddr, nxlen);
-			DQUOT_FREE_BLOCK(ip, nxlen);
+			vfs_dq_free_block(ip, nxlen);
 			goto exit;
 		}
 	}
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index d4d142c..dc0e021 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -116,7 +116,7 @@
 	/*
 	 * Allocate inode to quota.
 	 */
-	if (DQUOT_ALLOC_INODE(inode)) {
+	if (vfs_dq_alloc_inode(inode)) {
 		rc = -EDQUOT;
 		goto fail_drop;
 	}
@@ -162,7 +162,7 @@
 	return inode;
 
 fail_drop:
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
 fail_unlock:
 	inode->i_nlink = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index adb2faf..1eff7db 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -47,5 +47,5 @@
 extern const struct inode_operations jfs_file_inode_operations;
 extern const struct file_operations jfs_file_operations;
 extern const struct inode_operations jfs_symlink_inode_operations;
-extern struct dentry_operations jfs_ci_dentry_operations;
+extern const struct dentry_operations jfs_ci_dentry_operations;
 #endif				/* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acaf..a27e26c 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -846,10 +846,10 @@
 			hint = addressXAD(xad) + lengthXAD(xad) - 1;
 		} else
 			hint = 0;
-		if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
+		if ((rc = vfs_dq_alloc_block(ip, xlen)))
 			goto out;
 		if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
-			DQUOT_FREE_BLOCK(ip, xlen);
+			vfs_dq_free_block(ip, xlen);
 			goto out;
 		}
 	}
@@ -878,7 +878,7 @@
 			/* undo data extent allocation */
 			if (*xaddrp == 0) {
 				dbFree(ip, xaddr, (s64) xlen);
-				DQUOT_FREE_BLOCK(ip, xlen);
+				vfs_dq_free_block(ip, xlen);
 			}
 			return rc;
 		}
@@ -1246,7 +1246,7 @@
 	rbn = addressPXD(pxd);
 
 	/* Allocate blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+	if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
 		rc = -EDQUOT;
 		goto clean_up;
 	}
@@ -1456,7 +1456,7 @@
 
 	/* Rollback quota allocation. */
 	if (quota_allocation)
-		DQUOT_FREE_BLOCK(ip, quota_allocation);
+		vfs_dq_free_block(ip, quota_allocation);
 
 	return (rc);
 }
@@ -1513,7 +1513,7 @@
 		return -EIO;
 
 	/* Allocate blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+	if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
 		release_metapage(rmp);
 		return -EDQUOT;
 	}
@@ -3941,7 +3941,7 @@
 		ip->i_size = newsize;
 
 	/* update quota allocation to reflect freed blocks */
-	DQUOT_FREE_BLOCK(ip, nfreed);
+	vfs_dq_free_block(ip, nfreed);
 
 	/*
 	 * free tlock of invalidated pages
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b4de56b..514ee2e 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -35,7 +35,7 @@
 /*
  * forward references
  */
-struct dentry_operations jfs_ci_dentry_operations;
+const struct dentry_operations jfs_ci_dentry_operations;
 
 static s64 commitZeroLink(tid_t, struct inode *);
 
@@ -356,7 +356,7 @@
 	jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
 
 	/* Init inode for quota operations. */
-	DQUOT_INIT(ip);
+	vfs_dq_init(ip);
 
 	/* directory must be empty to be removed */
 	if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@
 	jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
 
 	/* Init inode for quota operations. */
-	DQUOT_INIT(ip);
+	vfs_dq_init(ip);
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
@@ -1136,7 +1136,7 @@
 	} else if (new_ip) {
 		IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
 		/* Init inode for quota operations. */
-		DQUOT_INIT(new_ip);
+		vfs_dq_init(new_ip);
 	}
 
 	/*
@@ -1595,7 +1595,7 @@
 	return result;
 }
 
-struct dentry_operations jfs_ci_dentry_operations =
+const struct dentry_operations jfs_ci_dentry_operations =
 {
 	.d_hash = jfs_ci_hash,
 	.d_compare = jfs_ci_compare,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9b7f2cd..61dfa81 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -260,14 +260,14 @@
 	nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
 
 	/* Allocate new blocks to quota. */
-	if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
+	if (vfs_dq_alloc_block(ip, nblocks)) {
 		return -EDQUOT;
 	}
 
 	rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
 	if (rc) {
 		/*Rollback quota allocation. */
-		DQUOT_FREE_BLOCK(ip, nblocks);
+		vfs_dq_free_block(ip, nblocks);
 		return rc;
 	}
 
@@ -332,7 +332,7 @@
 
       failed:
 	/* Rollback quota allocation. */
-	DQUOT_FREE_BLOCK(ip, nblocks);
+	vfs_dq_free_block(ip, nblocks);
 
 	dbFree(ip, blkno, nblocks);
 	return rc;
@@ -538,7 +538,7 @@
 
 	if (blocks_needed > current_blocks) {
 		/* Allocate new blocks to quota. */
-		if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
+		if (vfs_dq_alloc_block(inode, blocks_needed))
 			return -EDQUOT;
 
 		quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@
       clean_up:
 	/* Rollback quota allocation */
 	if (quota_allocation)
-		DQUOT_FREE_BLOCK(inode, quota_allocation);
+		vfs_dq_free_block(inode, quota_allocation);
 
 	return (rc);
 }
@@ -677,7 +677,7 @@
 
 	/* If old blocks exist, they must be removed from quota allocation. */
 	if (old_blocks)
-		DQUOT_FREE_BLOCK(inode, old_blocks);
+		vfs_dq_free_block(inode, old_blocks);
 
 	inode->i_ctime = CURRENT_TIME;
 
diff --git a/fs/libfs.c b/fs/libfs.c
index 49b4409..4910a36 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -44,7 +44,7 @@
  */
 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
-	static struct dentry_operations simple_dentry_operations = {
+	static const struct dentry_operations simple_dentry_operations = {
 		.d_delete = simple_delete_dentry,
 	};
 
@@ -242,7 +242,8 @@
 	d_instantiate(dentry, root);
 	s->s_root = dentry;
 	s->s_flags |= MS_ACTIVE;
-	return simple_set_mnt(mnt, s);
+	simple_set_mnt(mnt, s);
+	return 0;
 
 Enomem:
 	up_write(&s->s_umount);
diff --git a/fs/namei.c b/fs/namei.c
index 1993176..d040ce1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1473,7 +1473,7 @@
 	error = security_inode_create(dir, dentry, mode);
 	if (error)
 		return error;
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 	error = dir->i_op->create(dir, dentry, mode, nd);
 	if (!error)
 		fsnotify_create(dir, dentry);
@@ -1489,24 +1489,22 @@
 	if (!inode)
 		return -ENOENT;
 
-	if (S_ISLNK(inode->i_mode))
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFLNK:
 		return -ELOOP;
-	
-	if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
-		return -EISDIR;
-
-	/*
-	 * FIFO's, sockets and device files are special: they don't
-	 * actually live on the filesystem itself, and as such you
-	 * can write to them even if the filesystem is read-only.
-	 */
-	if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
-	    	flag &= ~O_TRUNC;
-	} else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+	case S_IFDIR:
+		if (acc_mode & MAY_WRITE)
+			return -EISDIR;
+		break;
+	case S_IFBLK:
+	case S_IFCHR:
 		if (path->mnt->mnt_flags & MNT_NODEV)
 			return -EACCES;
-
+		/*FALLTHRU*/
+	case S_IFIFO:
+	case S_IFSOCK:
 		flag &= ~O_TRUNC;
+		break;
 	}
 
 	error = inode_permission(inode, acc_mode);
@@ -1552,7 +1550,7 @@
 			error = security_path_truncate(path, 0,
 					       ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
 		if (!error) {
-			DQUOT_INIT(inode);
+			vfs_dq_init(inode);
 
 			error = do_truncate(dentry, 0,
 					    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1561,7 @@
 			return error;
 	} else
 		if (flag & FMODE_WRITE)
-			DQUOT_INIT(inode);
+			vfs_dq_init(inode);
 
 	return 0;
 }
@@ -1946,7 +1944,7 @@
 	if (error)
 		return error;
 
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
 	if (!error)
 		fsnotify_create(dir, dentry);
@@ -2045,7 +2043,7 @@
 	if (error)
 		return error;
 
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 	error = dir->i_op->mkdir(dir, dentry, mode);
 	if (!error)
 		fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2129,7 @@
 	if (!dir->i_op->rmdir)
 		return -EPERM;
 
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 
 	mutex_lock(&dentry->d_inode->i_mutex);
 	dentry_unhash(dentry);
@@ -2218,7 +2216,7 @@
 	if (!dir->i_op->unlink)
 		return -EPERM;
 
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 
 	mutex_lock(&dentry->d_inode->i_mutex);
 	if (d_mountpoint(dentry))
@@ -2329,7 +2327,7 @@
 	if (error)
 		return error;
 
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 	error = dir->i_op->symlink(dir, dentry, oldname);
 	if (!error)
 		fsnotify_create(dir, dentry);
@@ -2413,7 +2411,7 @@
 		return error;
 
 	mutex_lock(&inode->i_mutex);
-	DQUOT_INIT(dir);
+	vfs_dq_init(dir);
 	error = dir->i_op->link(old_dentry, dir, new_dentry);
 	mutex_unlock(&inode->i_mutex);
 	if (!error)
@@ -2612,8 +2610,8 @@
 	if (!old_dir->i_op->rename)
 		return -EPERM;
 
-	DQUOT_INIT(old_dir);
-	DQUOT_INIT(new_dir);
+	vfs_dq_init(old_dir);
+	vfs_dq_init(new_dir);
 
 	old_name = fsnotify_oldname_init(old_dentry->d_name.name);
 
diff --git a/fs/namespace.c b/fs/namespace.c
index f0e7530..0a42e0e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,11 +397,10 @@
 	spin_unlock(&vfsmount_lock);
 }
 
-int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
 {
 	mnt->mnt_sb = sb;
 	mnt->mnt_root = dget(sb->s_root);
-	return 0;
 }
 
 EXPORT_SYMBOL(simple_set_mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 07e9715..9c59072 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -79,7 +79,7 @@
 static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
 static int ncp_delete_dentry(struct dentry *);
 
-static struct dentry_operations ncp_dentry_operations =
+static const struct dentry_operations ncp_dentry_operations =
 {
 	.d_revalidate	= ncp_lookup_validate,
 	.d_hash		= ncp_hash_dentry,
@@ -87,7 +87,7 @@
 	.d_delete	= ncp_delete_dentry,
 };
 
-struct dentry_operations ncp_root_dentry_operations =
+const struct dentry_operations ncp_root_dentry_operations =
 {
 	.d_hash		= ncp_hash_dentry,
 	.d_compare	= ncp_compare_dentry,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 672368f..78bf72f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -899,7 +899,7 @@
 	iput(inode);
 }
 
-struct dentry_operations nfs_dentry_operations = {
+const struct dentry_operations nfs_dentry_operations = {
 	.d_revalidate	= nfs_lookup_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
@@ -967,7 +967,7 @@
 #ifdef CONFIG_NFS_V4
 static int nfs_open_revalidate(struct dentry *, struct nameidata *);
 
-struct dentry_operations nfs4_dentry_operations = {
+const struct dentry_operations nfs4_dentry_operations = {
 	.d_revalidate	= nfs_open_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4e4d332..84345de 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -179,7 +179,7 @@
 	int (*recover_lock)(struct nfs4_state *, struct file_lock *);
 };
 
-extern struct dentry_operations nfs4_dentry_operations;
+extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
 /* inode.c */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c165a64..78376b6 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -356,7 +356,7 @@
 			put_write_access(inode);
 			goto out_nfserr;
 		}
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 	}
 
 	/* sanitize the mode change */
@@ -723,7 +723,7 @@
 		else
 			flags = O_WRONLY|O_LARGEFILE;
 
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 	}
 	*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
 			    flags, cred);
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 331f2e8..220c13f 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -380,6 +380,14 @@
 		struct list_head *watches;
 
 		/*
+		 * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+		 * I_WILL_FREE, or I_NEW which is fine because by that point
+		 * the inode cannot have any associated watches.
+		 */
+		if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+			continue;
+
+		/*
 		 * If i_count is zero, the inode cannot have any watches and
 		 * doing an __iget/iput with MS_ACTIVE clear would actually
 		 * evict all inodes with zero i_count from icache which is
@@ -388,14 +396,6 @@
 		if (!atomic_read(&inode->i_count))
 			continue;
 
-		/*
-		 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
-		 * I_WILL_FREE which is fine because by that point the inode
-		 * cannot have any associated watches.
-		 */
-		if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
-			continue;
-
 		need_iput_tmp = need_iput;
 		need_iput = NULL;
 		/* In case inotify_remove_watch_locked() drops a reference. */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e9d7c20..7d60448 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -455,7 +455,7 @@
 	d_move(dentry, target);
 }
 
-struct dentry_operations ocfs2_dentry_ops = {
+const struct dentry_operations ocfs2_dentry_ops = {
 	.d_revalidate		= ocfs2_dentry_revalidate,
 	.d_iput			= ocfs2_dentry_iput,
 };
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index d06e16c..faa12e7 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -26,7 +26,7 @@
 #ifndef OCFS2_DCACHE_H
 #define OCFS2_DCACHE_H
 
-extern struct dentry_operations ocfs2_dentry_ops;
+extern const struct dentry_operations ocfs2_dentry_ops;
 
 struct ocfs2_dentry_lock {
 	/* Use count of dentry lock */
diff --git a/fs/open.c b/fs/open.c
index a3a78ce..75b6167 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@
 	if (!error)
 		error = security_path_truncate(&path, length, 0);
 	if (!error) {
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 		error = do_truncate(path.dentry, length, 0, NULL);
 	}
 
diff --git a/fs/pipe.c b/fs/pipe.c
index 94ad159..4af7aa5 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -860,7 +860,7 @@
 				dentry->d_inode->i_ino);
 }
 
-static struct dentry_operations pipefs_dentry_operations = {
+static const struct dentry_operations pipefs_dentry_operations = {
 	.d_delete	= pipefs_delete_dentry,
 	.d_dname	= pipefs_dname,
 };
@@ -1024,11 +1024,6 @@
 	return error;
 }
 
-int do_pipe(int *fd)
-{
-	return do_pipe_flags(fd, 0);
-}
-
 /*
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way Unix traditionally does this, though.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index beaa0ce..aef6d55 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1545,7 +1545,7 @@
 	return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
 }
 
-static struct dentry_operations pid_dentry_operations =
+static const struct dentry_operations pid_dentry_operations =
 {
 	.d_revalidate	= pid_revalidate,
 	.d_delete	= pid_delete_dentry,
@@ -1717,7 +1717,7 @@
 	return 0;
 }
 
-static struct dentry_operations tid_fd_dentry_operations =
+static const struct dentry_operations tid_fd_dentry_operations =
 {
 	.d_revalidate	= tid_fd_revalidate,
 	.d_delete	= pid_delete_dentry,
@@ -2339,7 +2339,7 @@
 	return 0;
 }
 
-static struct dentry_operations proc_base_dentry_operations =
+static const struct dentry_operations proc_base_dentry_operations =
 {
 	.d_revalidate	= proc_base_revalidate,
 	.d_delete	= pid_delete_dentry,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5c..5d2989e 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -363,7 +363,7 @@
 	return 1;
 }
 
-static struct dentry_operations proc_dentry_operations =
+static const struct dentry_operations proc_dentry_operations =
 {
 	.d_delete	= proc_delete_dentry,
 };
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 94fcfff..9b1e4e9 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -7,7 +7,7 @@
 #include <linux/security.h>
 #include "internal.h"
 
-static struct dentry_operations proc_sys_dentry_operations;
+static const struct dentry_operations proc_sys_dentry_operations;
 static const struct file_operations proc_sys_file_operations;
 static const struct inode_operations proc_sys_inode_operations;
 static const struct file_operations proc_sys_dir_file_operations;
@@ -396,7 +396,7 @@
 	return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
 }
 
-static struct dentry_operations proc_sys_dentry_operations = {
+static const struct dentry_operations proc_sys_dentry_operations = {
 	.d_revalidate	= proc_sys_revalidate,
 	.d_delete	= proc_sys_delete,
 	.d_compare	= proc_sys_compare,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f6299a2..1e15a2b 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -83,7 +83,8 @@
 		ns->proc_mnt = mnt;
 	}
 
-	return simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+	return 0;
 }
 
 static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 0000000..8047e01
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
+#
+#  Quota configuration
+#
+
+config QUOTA
+	bool "Quota support"
+	help
+	  If you say Y here, you will be able to set per user limits for disk
+	  usage (also called disk quotas). Currently, it works for the
+	  ext2, ext3, and reiserfs file system. ext3 also supports journalled
+	  quotas for which you don't need to run quotacheck(8) after an unclean
+	  shutdown.
+	  For further details, read the Quota mini-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>, or the documentation provided
+	  with the quota tools. Probably the quota support is only useful for
+	  multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+	bool "Report quota messages through netlink interface"
+	depends on QUOTA && NET
+	help
+	  If you say Y here, quota warnings (about exceeding softlimit, reaching
+	  hardlimit, etc.) will be reported through netlink interface. If unsure,
+	  say Y.
+
+config PRINT_QUOTA_WARNING
+	bool "Print quota warnings to console (OBSOLETE)"
+	depends on QUOTA
+	default y
+	help
+	  If you say Y here, quota warnings (about exceeding softlimit, reaching
+	  hardlimit, etc.) will be printed to the process' controlling terminal.
+	  Note that this behavior is currently deprecated and may go away in
+	  future. Please use notification via netlink socket instead.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+	 tristate
+
+config QFMT_V1
+	tristate "Old quota format support"
+	depends on QUOTA
+	help
+	  This quota format was (is) used by kernels earlier than 2.4.22. If
+	  you have quota working and you don't want to convert to new quota
+	  format say Y here.
+
+config QFMT_V2
+	tristate "Quota format v2 support"
+	depends on QUOTA
+	select QUOTA_TREE
+	help
+	  This quota format allows using quotas with 32-bit UIDs/GIDs. If you
+	  need this functionality say Y here.
+
+config QUOTACTL
+	bool
+	depends on XFS_QUOTA || QUOTA
+	default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 0000000..385a083
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Linux filesystems.
+#
+# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-y :=
+
+obj-$(CONFIG_QUOTA)		+= dquot.o
+obj-$(CONFIG_QFMT_V1)		+= quota_v1.o
+obj-$(CONFIG_QFMT_V2)		+= quota_v2.o
+obj-$(CONFIG_QUOTA_TREE)	+= quota_tree.o
+obj-$(CONFIG_QUOTACTL)		+= quota.o
diff --git a/fs/dquot.c b/fs/quota/dquot.c
similarity index 85%
rename from fs/dquot.c
rename to fs/quota/dquot.c
index d6add0b..2ca967a 100644
--- a/fs/dquot.c
+++ b/fs/quota/dquot.c
@@ -129,9 +129,10 @@
  * i_mutex on quota files is special (it's below dqio_mutex)
  */
 
-static DEFINE_SPINLOCK(dq_list_lock);
-static DEFINE_SPINLOCK(dq_state_lock);
-DEFINE_SPINLOCK(dq_data_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
 
 static char *quotatypes[] = INITQFNAMES;
 static struct quota_format_type *quota_formats;	/* List of registered formats */
@@ -148,35 +149,46 @@
 	spin_unlock(&dq_list_lock);
 	return 0;
 }
+EXPORT_SYMBOL(register_quota_format);
 
 void unregister_quota_format(struct quota_format_type *fmt)
 {
 	struct quota_format_type **actqf;
 
 	spin_lock(&dq_list_lock);
-	for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
+	for (actqf = &quota_formats; *actqf && *actqf != fmt;
+	     actqf = &(*actqf)->qf_next)
+		;
 	if (*actqf)
 		*actqf = (*actqf)->qf_next;
 	spin_unlock(&dq_list_lock);
 }
+EXPORT_SYMBOL(unregister_quota_format);
 
 static struct quota_format_type *find_quota_format(int id)
 {
 	struct quota_format_type *actqf;
 
 	spin_lock(&dq_list_lock);
-	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+	     actqf = actqf->qf_next)
+		;
 	if (!actqf || !try_module_get(actqf->qf_owner)) {
 		int qm;
 
 		spin_unlock(&dq_list_lock);
 		
-		for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
-		if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
+		for (qm = 0; module_names[qm].qm_fmt_id &&
+			     module_names[qm].qm_fmt_id != id; qm++)
+			;
+		if (!module_names[qm].qm_fmt_id ||
+		    request_module(module_names[qm].qm_mod_name))
 			return NULL;
 
 		spin_lock(&dq_list_lock);
-		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
+		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+		     actqf = actqf->qf_next)
+			;
 		if (actqf && !try_module_get(actqf->qf_owner))
 			actqf = NULL;
 	}
@@ -215,6 +227,7 @@
 static struct hlist_head *dquot_hash;
 
 struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
 
 static inline unsigned int
 hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -230,7 +243,8 @@
  */
 static inline void insert_dquot_hash(struct dquot *dquot)
 {
-	struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+	struct hlist_head *head;
+	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
 	hlist_add_head(&dquot->dq_hash, head);
 }
 
@@ -239,17 +253,19 @@
 	hlist_del_init(&dquot->dq_hash);
 }
 
-static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+				unsigned int id, int type)
 {
 	struct hlist_node *node;
 	struct dquot *dquot;
 
 	hlist_for_each (node, dquot_hash+hashent) {
 		dquot = hlist_entry(node, struct dquot, dq_hash);
-		if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
+		if (dquot->dq_sb == sb && dquot->dq_id == id &&
+		    dquot->dq_type == type)
 			return dquot;
 	}
-	return NODQUOT;
+	return NULL;
 }
 
 /* Add a dquot to the tail of the free list */
@@ -309,6 +325,7 @@
 	spin_unlock(&dq_list_lock);
 	return 0;
 }
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
 
 /* This function needs dq_list_lock */
 static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -345,8 +362,10 @@
 	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
 		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
 		/* Write the info if needed */
-		if (info_dirty(&dqopt->info[dquot->dq_type]))
-			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (info_dirty(&dqopt->info[dquot->dq_type])) {
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+						dquot->dq_sb, dquot->dq_type);
+		}
 		if (ret < 0)
 			goto out_iolock;
 		if (ret2 < 0) {
@@ -360,6 +379,7 @@
 	mutex_unlock(&dquot->dq_lock);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_acquire);
 
 /*
  *	Write dquot to disk
@@ -380,8 +400,10 @@
 	 * => we have better not writing it */
 	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
-		if (info_dirty(&dqopt->info[dquot->dq_type]))
-			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (info_dirty(&dqopt->info[dquot->dq_type])) {
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+						dquot->dq_sb, dquot->dq_type);
+		}
 		if (ret >= 0)
 			ret = ret2;
 	}
@@ -389,6 +411,7 @@
 	mutex_unlock(&dqopt->dqio_mutex);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_commit);
 
 /*
  *	Release dquot
@@ -406,8 +429,10 @@
 	if (dqopt->ops[dquot->dq_type]->release_dqblk) {
 		ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
 		/* Write the info */
-		if (info_dirty(&dqopt->info[dquot->dq_type]))
-			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
+		if (info_dirty(&dqopt->info[dquot->dq_type])) {
+			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+						dquot->dq_sb, dquot->dq_type);
+		}
 		if (ret >= 0)
 			ret = ret2;
 	}
@@ -417,6 +442,7 @@
 	mutex_unlock(&dquot->dq_lock);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_release);
 
 void dquot_destroy(struct dquot *dquot)
 {
@@ -516,6 +542,7 @@
 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_scan_active);
 
 int vfs_quota_sync(struct super_block *sb, int type)
 {
@@ -533,7 +560,8 @@
 		spin_lock(&dq_list_lock);
 		dirty = &dqopt->info[cnt].dqi_dirty_list;
 		while (!list_empty(dirty)) {
-			dquot = list_first_entry(dirty, struct dquot, dq_dirty);
+			dquot = list_first_entry(dirty, struct dquot,
+						 dq_dirty);
 			/* Dirty and inactive can be only bad dquot... */
 			if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
 				clear_dquot_dirty(dquot);
@@ -563,6 +591,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(vfs_quota_sync);
 
 /* Free unused dquots from cache */
 static void prune_dqcache(int count)
@@ -672,6 +701,7 @@
 	put_dquot_last(dquot);
 	spin_unlock(&dq_list_lock);
 }
+EXPORT_SYMBOL(dqput);
 
 struct dquot *dquot_alloc(struct super_block *sb, int type)
 {
@@ -685,7 +715,7 @@
 
 	dquot = sb->dq_op->alloc_dquot(sb, type);
 	if(!dquot)
-		return NODQUOT;
+		return NULL;
 
 	mutex_init(&dquot->dq_lock);
 	INIT_LIST_HEAD(&dquot->dq_free);
@@ -711,10 +741,10 @@
 struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
 {
 	unsigned int hashent = hashfn(sb, id, type);
-	struct dquot *dquot = NODQUOT, *empty = NODQUOT;
+	struct dquot *dquot = NULL, *empty = NULL;
 
         if (!sb_has_quota_active(sb, type))
-		return NODQUOT;
+		return NULL;
 we_slept:
 	spin_lock(&dq_list_lock);
 	spin_lock(&dq_state_lock);
@@ -725,15 +755,17 @@
 	}
 	spin_unlock(&dq_state_lock);
 
-	if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
-		if (empty == NODQUOT) {
+	dquot = find_dquot(hashent, sb, id, type);
+	if (!dquot) {
+		if (!empty) {
 			spin_unlock(&dq_list_lock);
-			if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
+			empty = get_empty_dquot(sb, type);
+			if (!empty)
 				schedule();	/* Try to wait for a moment... */
 			goto we_slept;
 		}
 		dquot = empty;
-		empty = NODQUOT;
+		empty = NULL;
 		dquot->dq_id = id;
 		/* all dquots go on the inuse_list */
 		put_inuse(dquot);
@@ -749,13 +781,14 @@
 		dqstats.lookups++;
 		spin_unlock(&dq_list_lock);
 	}
-	/* Wait for dq_lock - after this we know that either dquot_release() is already
-	 * finished or it will be canceled due to dq_count > 1 test */
+	/* Wait for dq_lock - after this we know that either dquot_release() is
+	 * already finished or it will be canceled due to dq_count > 1 test */
 	wait_on_dquot(dquot);
-	/* Read the dquot and instantiate it (everything done only if needed) */
-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
+	/* Read the dquot / allocate space in quota file */
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
+	    sb->dq_op->acquire_dquot(dquot) < 0) {
 		dqput(dquot);
-		dquot = NODQUOT;
+		dquot = NULL;
 		goto out;
 	}
 #ifdef __DQUOT_PARANOIA
@@ -767,6 +800,7 @@
 
 	return dquot;
 }
+EXPORT_SYMBOL(dqget);
 
 static int dqinit_needed(struct inode *inode, int type)
 {
@@ -775,9 +809,9 @@
 	if (IS_NOQUOTA(inode))
 		return 0;
 	if (type != -1)
-		return inode->i_dquot[type] == NODQUOT;
+		return !inode->i_dquot[type];
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			return 1;
 	return 0;
 }
@@ -789,12 +823,12 @@
 
 	spin_lock(&inode_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
+			continue;
 		if (!atomic_read(&inode->i_writecount))
 			continue;
 		if (!dqinit_needed(inode, type))
 			continue;
-		if (inode->i_state & (I_FREEING|I_WILL_FREE))
-			continue;
 
 		__iget(inode);
 		spin_unlock(&inode_lock);
@@ -813,7 +847,10 @@
 	iput(old_inode);
 }
 
-/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
+/*
+ * Return 0 if dqput() won't block.
+ * (note that 1 doesn't necessarily mean blocking)
+ */
 static inline int dqput_blocks(struct dquot *dquot)
 {
 	if (atomic_read(&dquot->dq_count) <= 1)
@@ -821,22 +858,27 @@
 	return 0;
 }
 
-/* Remove references to dquots from inode - add dquot to list for freeing if needed */
-/* We can't race with anybody because we hold dqptr_sem for writing... */
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last referece to dquot
+ * We can't race with anybody because we hold dqptr_sem for writing...
+ */
 static int remove_inode_dquot_ref(struct inode *inode, int type,
 				  struct list_head *tofree_head)
 {
 	struct dquot *dquot = inode->i_dquot[type];
 
-	inode->i_dquot[type] = NODQUOT;
-	if (dquot != NODQUOT) {
+	inode->i_dquot[type] = NULL;
+	if (dquot) {
 		if (dqput_blocks(dquot)) {
 #ifdef __DQUOT_PARANOIA
 			if (atomic_read(&dquot->dq_count) != 1)
 				printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
 #endif
 			spin_lock(&dq_list_lock);
-			list_add(&dquot->dq_free, tofree_head);	/* As dquot must have currently users it can't be on the free list... */
+			/* As dquot must have currently users it can't be on
+			 * the free list... */
+			list_add(&dquot->dq_free, tofree_head);
 			spin_unlock(&dq_list_lock);
 			return 1;
 		}
@@ -846,19 +888,22 @@
 	return 0;
 }
 
-/* Free list of dquots - called from inode.c */
-/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
 static void put_dquot_list(struct list_head *tofree_head)
 {
 	struct list_head *act_head;
 	struct dquot *dquot;
 
 	act_head = tofree_head->next;
-	/* So now we have dquots on the list... Just free them */
 	while (act_head != tofree_head) {
 		dquot = list_entry(act_head, struct dquot, dq_free);
 		act_head = act_head->next;
-		list_del_init(&dquot->dq_free);	/* Remove dquot from the list so we won't have problems... */
+		/* Remove dquot from the list so we won't have problems... */
+		list_del_init(&dquot->dq_free);
 		dqput(dquot);
 	}
 }
@@ -870,6 +915,12 @@
 
 	spin_lock(&inode_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+		/*
+		 *  We have to scan also I_NEW inodes because they can already
+		 *  have quota pointer initialized. Luckily, we need to touch
+		 *  only quota pointers and these have separate locking
+		 *  (dqptr_sem).
+		 */
 		if (!IS_NOQUOTA(inode))
 			remove_inode_dquot_ref(inode, type, tofree_head);
 	}
@@ -899,7 +950,29 @@
 	dquot->dq_dqb.dqb_curspace += number;
 }
 
-static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+	dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */
+static void dquot_claim_reserved_space(struct dquot *dquot,
+						qsize_t number)
+{
+	WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
+	dquot->dq_dqb.dqb_curspace += number;
+	dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+	dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
 {
 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
 	    dquot->dq_dqb.dqb_curinodes >= number)
@@ -911,7 +984,7 @@
 	clear_bit(DQ_INODES_B, &dquot->dq_flags);
 }
 
-static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
+static void dquot_decr_space(struct dquot *dquot, qsize_t number)
 {
 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
 	    dquot->dq_dqb.dqb_curspace >= number)
@@ -938,7 +1011,7 @@
 #ifdef CONFIG_PRINT_QUOTA_WARNING
 static int flag_print_warnings = 1;
 
-static inline int need_print_warning(struct dquot *dquot)
+static int need_print_warning(struct dquot *dquot)
 {
 	if (!flag_print_warnings)
 		return 0;
@@ -1065,13 +1138,17 @@
 	kfree_skb(skb);
 }
 #endif
-
-static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can sleep.
+ */
+static void flush_warnings(struct dquot *const *dquots, char *warntype)
 {
 	int i;
 
 	for (i = 0; i < MAXQUOTAS; i++)
-		if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
+		if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
 		    !warning_issued(dquots[i], warntype[i])) {
 #ifdef CONFIG_PRINT_QUOTA_WARNING
 			print_warning(dquots[i], warntype[i]);
@@ -1082,42 +1159,47 @@
 		}
 }
 
-static inline char ignore_hardlimit(struct dquot *dquot)
+static int ignore_hardlimit(struct dquot *dquot)
 {
 	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
 
 	return capable(CAP_SYS_RESOURCE) &&
-	    (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
+	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+		!(info->dqi_flags & V1_DQF_RSQUASH));
 }
 
 /* needs dq_data_lock */
 static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
 {
+	qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
+
 	*warntype = QUOTA_NL_NOWARN;
 	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
 		return QUOTA_OK;
 
 	if (dquot->dq_dqb.dqb_ihardlimit &&
-	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
+	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
             !ignore_hardlimit(dquot)) {
 		*warntype = QUOTA_NL_IHARDWARN;
 		return NO_QUOTA;
 	}
 
 	if (dquot->dq_dqb.dqb_isoftlimit &&
-	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
-	    dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
+	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+	    dquot->dq_dqb.dqb_itime &&
+	    get_seconds() >= dquot->dq_dqb.dqb_itime &&
             !ignore_hardlimit(dquot)) {
 		*warntype = QUOTA_NL_ISOFTLONGWARN;
 		return NO_QUOTA;
 	}
 
 	if (dquot->dq_dqb.dqb_isoftlimit &&
-	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
+	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
 	    dquot->dq_dqb.dqb_itime == 0) {
 		*warntype = QUOTA_NL_ISOFTWARN;
-		dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+		dquot->dq_dqb.dqb_itime = get_seconds() +
+		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
 	}
 
 	return QUOTA_OK;
@@ -1126,13 +1208,19 @@
 /* needs dq_data_lock */
 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
 {
+	qsize_t tspace;
+	struct super_block *sb = dquot->dq_sb;
+
 	*warntype = QUOTA_NL_NOWARN;
-	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+	if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
 	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
 		return QUOTA_OK;
 
+	tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+		+ space;
+
 	if (dquot->dq_dqb.dqb_bhardlimit &&
-	    dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
+	    tspace > dquot->dq_dqb.dqb_bhardlimit &&
             !ignore_hardlimit(dquot)) {
 		if (!prealloc)
 			*warntype = QUOTA_NL_BHARDWARN;
@@ -1140,8 +1228,9 @@
 	}
 
 	if (dquot->dq_dqb.dqb_bsoftlimit &&
-	    dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
-	    dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
+	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+	    dquot->dq_dqb.dqb_btime &&
+	    get_seconds() >= dquot->dq_dqb.dqb_btime &&
             !ignore_hardlimit(dquot)) {
 		if (!prealloc)
 			*warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1149,11 +1238,12 @@
 	}
 
 	if (dquot->dq_dqb.dqb_bsoftlimit &&
-	    dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
+	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
 	    dquot->dq_dqb.dqb_btime == 0) {
 		if (!prealloc) {
 			*warntype = QUOTA_NL_BSOFTWARN;
-			dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+			dquot->dq_dqb.dqb_btime = get_seconds() +
+			    sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
 		}
 		else
 			/*
@@ -1168,15 +1258,18 @@
 
 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
 {
+	qsize_t newinodes;
+
 	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
 	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
 	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
 		return QUOTA_NL_NOWARN;
 
-	if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
+	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
+	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
 		return QUOTA_NL_ISOFTBELOW;
 	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
-	    dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
+	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
 		return QUOTA_NL_IHARDBELOW;
 	return QUOTA_NL_NOWARN;
 }
@@ -1203,7 +1296,7 @@
 {
 	unsigned int id = 0;
 	int cnt, ret = 0;
-	struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
+	struct dquot *got[MAXQUOTAS] = { NULL, NULL };
 	struct super_block *sb = inode->i_sb;
 
 	/* First test before acquiring mutex - solves deadlocks when we
@@ -1236,9 +1329,9 @@
 		/* Avoid races with quotaoff() */
 		if (!sb_has_quota_active(sb, cnt))
 			continue;
-		if (inode->i_dquot[cnt] == NODQUOT) {
+		if (!inode->i_dquot[cnt]) {
 			inode->i_dquot[cnt] = got[cnt];
-			got[cnt] = NODQUOT;
+			got[cnt] = NULL;
 		}
 	}
 out_err:
@@ -1248,6 +1341,7 @@
 		dqput(got[cnt]);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_initialize);
 
 /*
  * 	Release all quotas referenced by inode
@@ -1260,7 +1354,7 @@
 	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		put[cnt] = inode->i_dquot[cnt];
-		inode->i_dquot[cnt] = NODQUOT;
+		inode->i_dquot[cnt] = NULL;
 	}
 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 
@@ -1268,6 +1362,7 @@
 		dqput(put[cnt]);
 	return 0;
 }
+EXPORT_SYMBOL(dquot_drop);
 
 /* Wrapper to remove references to quota structures from inode */
 void vfs_dq_drop(struct inode *inode)
@@ -1284,12 +1379,13 @@
 		 * must assure that nobody can come after the DQUOT_DROP and
 		 * add quota pointers back anyway */
 		for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-			if (inode->i_dquot[cnt] != NODQUOT)
+			if (inode->i_dquot[cnt])
 				break;
 		if (cnt < MAXQUOTAS)
 			inode->i_sb->dq_op->drop(inode);
 	}
 }
+EXPORT_SYMBOL(vfs_dq_drop);
 
 /*
  * Following four functions update i_blocks+i_bytes fields and
@@ -1303,52 +1399,94 @@
 /*
  * This operation can block, but only after everything is updated
  */
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+int __dquot_alloc_space(struct inode *inode, qsize_t number,
+			int warn, int reserve)
 {
-	int cnt, ret = NO_QUOTA;
+	int cnt, ret = QUOTA_OK;
 	char warntype[MAXQUOTAS];
 
-	/* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-	if (IS_NOQUOTA(inode)) {
-out_add:
-		inode_add_bytes(inode, number);
-		return QUOTA_OK;
-	}
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		warntype[cnt] = QUOTA_NL_NOWARN;
 
-	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-	if (IS_NOQUOTA(inode)) {	/* Now we can do reliable test... */
-		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
-		goto out_add;
-	}
 	spin_lock(&dq_data_lock);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
-		if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
-			goto warn_put_all;
+		if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+		    == NO_QUOTA) {
+			ret = NO_QUOTA;
+			goto out_unlock;
+		}
 	}
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
-		dquot_incr_space(inode->i_dquot[cnt], number);
+		if (reserve)
+			dquot_resv_space(inode->i_dquot[cnt], number);
+		else
+			dquot_incr_space(inode->i_dquot[cnt], number);
 	}
-	inode_add_bytes(inode, number);
-	ret = QUOTA_OK;
-warn_put_all:
+	if (!reserve)
+		inode_add_bytes(inode, number);
+out_unlock:
 	spin_unlock(&dq_data_lock);
-	if (ret == QUOTA_OK)
-		/* Dirtify all the dquots - this can block when journalling */
-		for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-			if (inode->i_dquot[cnt])
-				mark_dquot_dirty(inode->i_dquot[cnt]);
 	flush_warnings(inode->i_dquot, warntype);
-	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	return ret;
 }
 
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+	int cnt, ret = QUOTA_OK;
+
+	/*
+	 * First test before acquiring mutex - solves deadlocks when we
+	 * re-enter the quota code and are already holding the mutex
+	 */
+	if (IS_NOQUOTA(inode)) {
+		inode_add_bytes(inode, number);
+		goto out;
+	}
+
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode)) {
+		inode_add_bytes(inode, number);
+		goto out_unlock;
+	}
+
+	ret = __dquot_alloc_space(inode, number, warn, 0);
+	if (ret == NO_QUOTA)
+		goto out_unlock;
+
+	/* Dirtify all the dquots - this can block when journalling */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (inode->i_dquot[cnt])
+			mark_dquot_dirty(inode->i_dquot[cnt]);
+out_unlock:
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(dquot_alloc_space);
+
+int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+{
+	int ret = QUOTA_OK;
+
+	if (IS_NOQUOTA(inode))
+		goto out;
+
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode))
+		goto out_unlock;
+
+	ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(dquot_reserve_space);
+
 /*
  * This operation can block, but only after everything is updated
  */
@@ -1370,14 +1508,15 @@
 	}
 	spin_lock(&dq_data_lock);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
-		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
+		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
+		    == NO_QUOTA)
 			goto warn_put_all;
 	}
 
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
 		dquot_incr_inodes(inode->i_dquot[cnt], number);
 	}
@@ -1393,6 +1532,73 @@
 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+int dquot_claim_space(struct inode *inode, qsize_t number)
+{
+	int cnt;
+	int ret = QUOTA_OK;
+
+	if (IS_NOQUOTA(inode)) {
+		inode_add_bytes(inode, number);
+		goto out;
+	}
+
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode))	{
+		up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+		inode_add_bytes(inode, number);
+		goto out;
+	}
+
+	spin_lock(&dq_data_lock);
+	/* Claim reserved quotas to allocated quotas */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt])
+			dquot_claim_reserved_space(inode->i_dquot[cnt],
+							number);
+	}
+	/* Update inode bytes */
+	inode_add_bytes(inode, number);
+	spin_unlock(&dq_data_lock);
+	/* Dirtify all the dquots - this can block when journalling */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+		if (inode->i_dquot[cnt])
+			mark_dquot_dirty(inode->i_dquot[cnt]);
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(dquot_claim_space);
+
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+	int cnt;
+
+	if (IS_NOQUOTA(inode))
+		goto out;
+
+	down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+	if (IS_NOQUOTA(inode))
+		goto out_unlock;
+
+	spin_lock(&dq_data_lock);
+	/* Release reserved dquots */
+	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+		if (inode->i_dquot[cnt])
+			dquot_free_reserved_space(inode->i_dquot[cnt], number);
+	}
+	spin_unlock(&dq_data_lock);
+
+out_unlock:
+	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+	return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
 
 /*
  * This operation can block, but only after everything is updated
@@ -1418,7 +1624,7 @@
 	}
 	spin_lock(&dq_data_lock);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
 		warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
 		dquot_decr_space(inode->i_dquot[cnt], number);
@@ -1433,6 +1639,7 @@
 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	return QUOTA_OK;
 }
+EXPORT_SYMBOL(dquot_free_space);
 
 /*
  * This operation can block, but only after everything is updated
@@ -1455,7 +1662,7 @@
 	}
 	spin_lock(&dq_data_lock);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt] == NODQUOT)
+		if (!inode->i_dquot[cnt])
 			continue;
 		warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
 		dquot_decr_inodes(inode->i_dquot[cnt], number);
@@ -1469,6 +1676,20 @@
 	up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	return QUOTA_OK;
 }
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+	qsize_t reserved_space = 0;
+
+	if (sb_any_quota_active(inode->i_sb) &&
+	    inode->i_sb->dq_op->get_reserved_space)
+		reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+	return reserved_space;
+}
 
 /*
  * Transfer the number of inode and blocks from one diskquota to an other.
@@ -1478,7 +1699,8 @@
  */
 int dquot_transfer(struct inode *inode, struct iattr *iattr)
 {
-	qsize_t space;
+	qsize_t space, cur_space;
+	qsize_t rsv_space = 0;
 	struct dquot *transfer_from[MAXQUOTAS];
 	struct dquot *transfer_to[MAXQUOTAS];
 	int cnt, ret = QUOTA_OK;
@@ -1493,22 +1715,16 @@
 		return QUOTA_OK;
 	/* Initialize the arrays */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		transfer_from[cnt] = NODQUOT;
-		transfer_to[cnt] = NODQUOT;
+		transfer_from[cnt] = NULL;
+		transfer_to[cnt] = NULL;
 		warntype_to[cnt] = QUOTA_NL_NOWARN;
-		switch (cnt) {
-			case USRQUOTA:
-				if (!chuid)
-					continue;
-				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
-				break;
-			case GRPQUOTA:
-				if (!chgid)
-					continue;
-				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
-				break;
-		}
 	}
+	if (chuid)
+		transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
+					      USRQUOTA);
+	if (chgid)
+		transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
+					      GRPQUOTA);
 
 	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	/* Now recheck reliably when holding dqptr_sem */
@@ -1517,10 +1733,12 @@
 		goto put_all;
 	}
 	spin_lock(&dq_data_lock);
-	space = inode_get_bytes(inode);
+	cur_space = inode_get_bytes(inode);
+	rsv_space = dquot_get_reserved_space(inode);
+	space = cur_space + rsv_space;
 	/* Build the transfer_from list and check the limits */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (transfer_to[cnt] == NODQUOT)
+		if (!transfer_to[cnt])
 			continue;
 		transfer_from[cnt] = inode->i_dquot[cnt];
 		if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
@@ -1536,7 +1754,7 @@
 		/*
 		 * Skip changes for same uid or gid or for turned off quota-type.
 		 */
-		if (transfer_to[cnt] == NODQUOT)
+		if (!transfer_to[cnt])
 			continue;
 
 		/* Due to IO error we might not have transfer_from[] structure */
@@ -1546,11 +1764,14 @@
 			warntype_from_space[cnt] =
 				info_bdq_free(transfer_from[cnt], space);
 			dquot_decr_inodes(transfer_from[cnt], 1);
-			dquot_decr_space(transfer_from[cnt], space);
+			dquot_decr_space(transfer_from[cnt], cur_space);
+			dquot_free_reserved_space(transfer_from[cnt],
+						  rsv_space);
 		}
 
 		dquot_incr_inodes(transfer_to[cnt], 1);
-		dquot_incr_space(transfer_to[cnt], space);
+		dquot_incr_space(transfer_to[cnt], cur_space);
+		dquot_resv_space(transfer_to[cnt], rsv_space);
 
 		inode->i_dquot[cnt] = transfer_to[cnt];
 	}
@@ -1564,7 +1785,7 @@
 		if (transfer_to[cnt]) {
 			mark_dquot_dirty(transfer_to[cnt]);
 			/* The reference we got is transferred to the inode */
-			transfer_to[cnt] = NODQUOT;
+			transfer_to[cnt] = NULL;
 		}
 	}
 warn_put_all:
@@ -1582,10 +1803,11 @@
 	up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
 	/* Clear dquot pointers we don't want to dqput() */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-		transfer_from[cnt] = NODQUOT;
+		transfer_from[cnt] = NULL;
 	ret = NO_QUOTA;
 	goto warn_put_all;
 }
+EXPORT_SYMBOL(dquot_transfer);
 
 /* Wrapper for transferring ownership of an inode */
 int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1597,7 +1819,7 @@
 	}
 	return 0;
 }
-
+EXPORT_SYMBOL(vfs_dq_transfer);
 
 /*
  * Write info of quota file to disk
@@ -1612,6 +1834,7 @@
 	mutex_unlock(&dqopt->dqio_mutex);
 	return ret;
 }
+EXPORT_SYMBOL(dquot_commit_info);
 
 /*
  * Definitions of diskquota operations.
@@ -1697,8 +1920,8 @@
 		drop_dquot_ref(sb, cnt);
 		invalidate_dquots(sb, cnt);
 		/*
-		 * Now all dquots should be invalidated, all writes done so we should be only
-		 * users of the info. No locks needed.
+		 * Now all dquots should be invalidated, all writes done so we
+		 * should be only users of the info. No locks needed.
 		 */
 		if (info_dirty(&dqopt->info[cnt]))
 			sb->dq_op->write_info(sb, cnt);
@@ -1736,10 +1959,12 @@
 			/* If quota was reenabled in the meantime, we have
 			 * nothing to do */
 			if (!sb_has_quota_loaded(sb, cnt)) {
-				mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
+				mutex_lock_nested(&toputinode[cnt]->i_mutex,
+						  I_MUTEX_QUOTA);
 				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
 				  S_NOATIME | S_NOQUOTA);
-				truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+				truncate_inode_pages(&toputinode[cnt]->i_data,
+						     0);
 				mutex_unlock(&toputinode[cnt]->i_mutex);
 				mark_inode_dirty(toputinode[cnt]);
 			}
@@ -1764,13 +1989,14 @@
 		}
 	return ret;
 }
+EXPORT_SYMBOL(vfs_quota_disable);
 
 int vfs_quota_off(struct super_block *sb, int type, int remount)
 {
 	return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
 				 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
 }
-
+EXPORT_SYMBOL(vfs_quota_off);
 /*
  *	Turn quotas on on a device
  */
@@ -1828,7 +2054,8 @@
 		 * possible) Also nobody should write to the file - we use
 		 * special IO operations which ignore the immutable bit. */
 		down_write(&dqopt->dqptr_sem);
-		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
+		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
+					     S_NOQUOTA);
 		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
 		up_write(&dqopt->dqptr_sem);
 		sb->dq_op->drop(inode);
@@ -1847,7 +2074,8 @@
 	dqopt->info[type].dqi_fmt_id = format_id;
 	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
 	mutex_lock(&dqopt->dqio_mutex);
-	if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
+	error = dqopt->ops[type]->read_file_info(sb, type);
+	if (error < 0) {
 		mutex_unlock(&dqopt->dqio_mutex);
 		goto out_file_init;
 	}
@@ -1927,6 +2155,7 @@
 					     DQUOT_LIMITS_ENABLED);
 	return error;
 }
+EXPORT_SYMBOL(vfs_quota_on_path);
 
 int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
 		 int remount)
@@ -1944,6 +2173,7 @@
 	}
 	return error;
 }
+EXPORT_SYMBOL(vfs_quota_on);
 
 /*
  * More powerful function for turning on quotas allowing setting
@@ -1990,6 +2220,7 @@
 load_quota:
 	return vfs_load_quota_inode(inode, type, format_id, flags);
 }
+EXPORT_SYMBOL(vfs_quota_enable);
 
 /*
  * This function is used when filesystem needs to initialize quotas
@@ -2019,6 +2250,7 @@
 	dput(dentry);
 	return error;
 }
+EXPORT_SYMBOL(vfs_quota_on_mount);
 
 /* Wrapper to turn on quotas when remounting rw */
 int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2035,6 +2267,7 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(vfs_dq_quota_on_remount);
 
 static inline qsize_t qbtos(qsize_t blocks)
 {
@@ -2054,7 +2287,7 @@
 	spin_lock(&dq_data_lock);
 	di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
 	di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
-	di->dqb_curspace = dm->dqb_curspace;
+	di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
 	di->dqb_ihardlimit = dm->dqb_ihardlimit;
 	di->dqb_isoftlimit = dm->dqb_isoftlimit;
 	di->dqb_curinodes = dm->dqb_curinodes;
@@ -2064,18 +2297,20 @@
 	spin_unlock(&dq_data_lock);
 }
 
-int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
+		  struct if_dqblk *di)
 {
 	struct dquot *dquot;
 
 	dquot = dqget(sb, id, type);
-	if (dquot == NODQUOT)
+	if (!dquot)
 		return -ESRCH;
 	do_get_dqblk(dquot, di);
 	dqput(dquot);
 
 	return 0;
 }
+EXPORT_SYMBOL(vfs_get_dqblk);
 
 /* Generic routine for setting common part of quota structure */
 static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2094,7 +2329,7 @@
 
 	spin_lock(&dq_data_lock);
 	if (di->dqb_valid & QIF_SPACE) {
-		dm->dqb_curspace = di->dqb_curspace;
+		dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
 		check_blim = 1;
 		__set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
 	}
@@ -2127,22 +2362,25 @@
 	}
 
 	if (check_blim) {
-		if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
+		if (!dm->dqb_bsoftlimit ||
+		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
 			dm->dqb_btime = 0;
 			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-		}
-		else if (!(di->dqb_valid & QIF_BTIME))	/* Set grace only if user hasn't provided his own... */
+		} else if (!(di->dqb_valid & QIF_BTIME))
+			/* Set grace only if user hasn't provided his own... */
 			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
 	}
 	if (check_ilim) {
-		if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
+		if (!dm->dqb_isoftlimit ||
+		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
 			dm->dqb_itime = 0;
 			clear_bit(DQ_INODES_B, &dquot->dq_flags);
-		}
-		else if (!(di->dqb_valid & QIF_ITIME))	/* Set grace only if user hasn't provided his own... */
+		} else if (!(di->dqb_valid & QIF_ITIME))
+			/* Set grace only if user hasn't provided his own... */
 			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
 	}
-	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
+	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
+	    dm->dqb_isoftlimit)
 		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
 	else
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2152,7 +2390,8 @@
 	return 0;
 }
 
-int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
+int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
+		  struct if_dqblk *di)
 {
 	struct dquot *dquot;
 	int rc;
@@ -2167,6 +2406,7 @@
 out:
 	return rc;
 }
+EXPORT_SYMBOL(vfs_set_dqblk);
 
 /* Generic routine for getting common part of quota file information */
 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2188,6 +2428,7 @@
 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
 	return 0;
 }
+EXPORT_SYMBOL(vfs_get_dqinfo);
 
 /* Generic routine for setting common part of quota file information */
 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2207,7 +2448,8 @@
 	if (ii->dqi_valid & IIF_IGRACE)
 		mi->dqi_igrace = ii->dqi_igrace;
 	if (ii->dqi_valid & IIF_FLAGS)
-		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
+		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
+				(ii->dqi_flags & DQF_MASK);
 	spin_unlock(&dq_data_lock);
 	mark_info_dirty(sb, type);
 	/* Force write to disk */
@@ -2216,6 +2458,7 @@
 	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
 	return err;
 }
+EXPORT_SYMBOL(vfs_set_dqinfo);
 
 struct quotactl_ops vfs_quotactl_ops = {
 	.quota_on	= vfs_quota_on,
@@ -2365,43 +2608,10 @@
 
 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
 	if (genl_register_family(&quota_genl_family) != 0)
-		printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
+		printk(KERN_ERR
+		       "VFS: Failed to create quota netlink interface.\n");
 #endif
 
 	return 0;
 }
 module_init(dquot_init);
-
-EXPORT_SYMBOL(register_quota_format);
-EXPORT_SYMBOL(unregister_quota_format);
-EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_data_lock);
-EXPORT_SYMBOL(vfs_quota_enable);
-EXPORT_SYMBOL(vfs_quota_on);
-EXPORT_SYMBOL(vfs_quota_on_path);
-EXPORT_SYMBOL(vfs_quota_on_mount);
-EXPORT_SYMBOL(vfs_quota_disable);
-EXPORT_SYMBOL(vfs_quota_off);
-EXPORT_SYMBOL(dquot_scan_active);
-EXPORT_SYMBOL(vfs_quota_sync);
-EXPORT_SYMBOL(vfs_get_dqinfo);
-EXPORT_SYMBOL(vfs_set_dqinfo);
-EXPORT_SYMBOL(vfs_get_dqblk);
-EXPORT_SYMBOL(vfs_set_dqblk);
-EXPORT_SYMBOL(dquot_commit);
-EXPORT_SYMBOL(dquot_commit_info);
-EXPORT_SYMBOL(dquot_acquire);
-EXPORT_SYMBOL(dquot_release);
-EXPORT_SYMBOL(dquot_mark_dquot_dirty);
-EXPORT_SYMBOL(dquot_initialize);
-EXPORT_SYMBOL(dquot_drop);
-EXPORT_SYMBOL(vfs_dq_drop);
-EXPORT_SYMBOL(dqget);
-EXPORT_SYMBOL(dqput);
-EXPORT_SYMBOL(dquot_alloc_space);
-EXPORT_SYMBOL(dquot_alloc_inode);
-EXPORT_SYMBOL(dquot_free_space);
-EXPORT_SYMBOL(dquot_free_inode);
-EXPORT_SYMBOL(dquot_transfer);
-EXPORT_SYMBOL(vfs_dq_transfer);
-EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/quota.c b/fs/quota/quota.c
similarity index 93%
rename from fs/quota.c
rename to fs/quota/quota.c
index d76ada9..b7f5a46 100644
--- a/fs/quota.c
+++ b/fs/quota/quota.c
@@ -20,7 +20,8 @@
 #include <linux/types.h>
 
 /* Check validity of generic quotactl commands */
-static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
+				  qid_t id)
 {
 	if (type >= MAXQUOTAS)
 		return -EINVAL;
@@ -72,7 +73,8 @@
 		case Q_SETINFO:
 		case Q_SETQUOTA:
 		case Q_GETQUOTA:
-			/* This is just informative test so we are satisfied without a lock */
+			/* This is just an informative test so we are satisfied
+			 * without the lock */
 			if (!sb_has_quota_active(sb, type))
 				return -ESRCH;
 	}
@@ -92,7 +94,8 @@
 }
 
 /* Check validity of XFS Quota Manager commands */
-static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
+			      qid_t id)
 {
 	if (type >= XQM_MAXQUOTAS)
 		return -EINVAL;
@@ -142,7 +145,8 @@
 	return 0;
 }
 
-static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
+static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
+				qid_t id)
 {
 	int error;
 
@@ -180,7 +184,8 @@
 			continue;
 		if (!sb_has_quota_active(sb, cnt))
 			continue;
-		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
+		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
+				  I_MUTEX_QUOTA);
 		truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
 		mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
 	}
@@ -200,14 +205,15 @@
 	spin_lock(&sb_lock);
 restart:
 	list_for_each_entry(sb, &super_blocks, s_list) {
-		/* This test just improves performance so it needn't be reliable... */
+		/* This test just improves performance so it needn't be
+		 * reliable... */
 		for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 			if (type != -1 && type != cnt)
 				continue;
 			if (!sb_has_quota_active(sb, cnt))
 				continue;
 			if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
-			    list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
+			   list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
 				continue;
 			break;
 		}
@@ -227,7 +233,8 @@
 }
 
 /* Copy parameters and call proper function */
-static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
+		       void __user *addr)
 {
 	int ret;
 
@@ -235,7 +242,8 @@
 		case Q_QUOTAON: {
 			char *pathname;
 
-			if (IS_ERR(pathname = getname(addr)))
+			pathname = getname(addr);
+			if (IS_ERR(pathname))
 				return PTR_ERR(pathname);
 			ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
 			putname(pathname);
@@ -261,7 +269,8 @@
 		case Q_GETINFO: {
 			struct if_dqinfo info;
 
-			if ((ret = sb->s_qcop->get_info(sb, type, &info)))
+			ret = sb->s_qcop->get_info(sb, type, &info);
+			if (ret)
 				return ret;
 			if (copy_to_user(addr, &info, sizeof(info)))
 				return -EFAULT;
@@ -277,7 +286,8 @@
 		case Q_GETQUOTA: {
 			struct if_dqblk idq;
 
-			if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
+			ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
+			if (ret)
 				return ret;
 			if (copy_to_user(addr, &idq, sizeof(idq)))
 				return -EFAULT;
@@ -322,7 +332,8 @@
 		case Q_XGETQUOTA: {
 			struct fs_disk_quota fdq;
 
-			if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
+			ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
+			if (ret)
 				return ret;
 			if (copy_to_user(addr, &fdq, sizeof(fdq)))
 				return -EFAULT;
@@ -341,7 +352,7 @@
  * look up a superblock on which quota ops will be performed
  * - use the name of a block device to find the superblock thereon
  */
-static inline struct super_block *quotactl_block(const char __user *special)
+static struct super_block *quotactl_block(const char __user *special)
 {
 #ifdef CONFIG_BLOCK
 	struct block_device *bdev;
diff --git a/fs/quota_tree.c b/fs/quota/quota_tree.c
similarity index 84%
rename from fs/quota_tree.c
rename to fs/quota/quota_tree.c
index 953404c..f81f4bc 100644
--- a/fs/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -22,8 +22,6 @@
 
 #define __QUOTA_QT_PARANOIA
 
-typedef char *dqbuf_t;
-
 static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
 {
 	unsigned int epb = info->dqi_usable_bs >> 2;
@@ -35,46 +33,42 @@
 }
 
 /* Number of entries in one blocks */
-static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
+static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
 {
 	return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
 	       / info->dqi_entry_size;
 }
 
-static dqbuf_t getdqbuf(size_t size)
+static char *getdqbuf(size_t size)
 {
-	dqbuf_t buf = kmalloc(size, GFP_NOFS);
+	char *buf = kmalloc(size, GFP_NOFS);
 	if (!buf)
-		printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
+		printk(KERN_WARNING
+		       "VFS: Not enough memory for quota buffers.\n");
 	return buf;
 }
 
-static inline void freedqbuf(dqbuf_t buf)
-{
-	kfree(buf);
-}
-
-static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 {
 	struct super_block *sb = info->dqi_sb;
 
 	memset(buf, 0, info->dqi_usable_bs);
-	return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf,
+	return sb->s_op->quota_read(sb, info->dqi_type, buf,
 	       info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
 }
 
-static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
+static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
 {
 	struct super_block *sb = info->dqi_sb;
 
-	return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf,
+	return sb->s_op->quota_write(sb, info->dqi_type, buf,
 	       info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
 }
 
 /* Remove empty block from list and return it */
 static int get_free_dqblk(struct qtree_mem_dqinfo *info)
 {
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
 	int ret, blk;
 
@@ -98,12 +92,12 @@
 	mark_info_dirty(info->dqi_sb, info->dqi_type);
 	ret = blk;
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
 /* Insert empty block to the list */
-static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
 {
 	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
 	int err;
@@ -120,9 +114,10 @@
 }
 
 /* Remove given block from the list of blocks with free entries */
-static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+			       uint blk)
 {
-	dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
 	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
 	uint nextblk = le32_to_cpu(dh->dqdh_next_free);
 	uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@@ -153,21 +148,24 @@
 		info->dqi_free_entry = nextblk;
 		mark_info_dirty(info->dqi_sb, info->dqi_type);
 	}
-	freedqbuf(tmpbuf);
+	kfree(tmpbuf);
 	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
 	/* No matter whether write succeeds block is out of list */
 	if (write_blk(info, blk, buf) < 0)
-		printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
+		printk(KERN_ERR
+		       "VFS: Can't write block (%u) with free entries.\n",
+		       blk);
 	return 0;
 out_buf:
-	freedqbuf(tmpbuf);
+	kfree(tmpbuf);
 	return err;
 }
 
 /* Insert given block to the beginning of list with free entries */
-static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
+static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+			       uint blk)
 {
-	dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs);
+	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
 	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
 	int err;
 
@@ -188,12 +186,12 @@
 		if (err < 0)
 			goto out_buf;
 	}
-	freedqbuf(tmpbuf);
+	kfree(tmpbuf);
 	info->dqi_free_entry = blk;
 	mark_info_dirty(info->dqi_sb, info->dqi_type);
 	return 0;
 out_buf:
-	freedqbuf(tmpbuf);
+	kfree(tmpbuf);
 	return err;
 }
 
@@ -215,7 +213,7 @@
 {
 	uint blk, i;
 	struct qt_disk_dqdbheader *dh;
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	char *ddquot;
 
 	*err = 0;
@@ -233,11 +231,12 @@
 		blk = get_free_dqblk(info);
 		if ((int)blk < 0) {
 			*err = blk;
-			freedqbuf(buf);
+			kfree(buf);
 			return 0;
 		}
 		memset(buf, 0, info->dqi_usable_bs);
-		/* This is enough as block is already zeroed and entry list is empty... */
+		/* This is enough as the block is already zeroed and the entry
+		 * list is empty... */
 		info->dqi_free_entry = blk;
 		mark_info_dirty(dquot->dq_sb, dquot->dq_type);
 	}
@@ -253,9 +252,12 @@
 	}
 	le16_add_cpu(&dh->dqdh_entries, 1);
 	/* Find free structure in block */
-	for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
-	     i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
-	     i++, ddquot += info->dqi_entry_size);
+	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+		if (qtree_entry_unused(info, ddquot))
+			break;
+		ddquot += info->dqi_entry_size;
+	}
 #ifdef __QUOTA_QT_PARANOIA
 	if (i == qtree_dqstr_in_blk(info)) {
 		printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -273,10 +275,10 @@
 	dquot->dq_off = (blk << info->dqi_blocksize_bits) +
 			sizeof(struct qt_disk_dqdbheader) +
 			i * info->dqi_entry_size;
-	freedqbuf(buf);
+	kfree(buf);
 	return blk;
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return 0;
 }
 
@@ -284,7 +286,7 @@
 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
 			  uint *treeblk, int depth)
 {
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	int ret = 0, newson = 0, newact = 0;
 	__le32 *ref;
 	uint newblk;
@@ -333,7 +335,7 @@
 		put_free_dqblk(info, buf, *treeblk);
 	}
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
@@ -346,14 +348,15 @@
 }
 
 /*
- *	We don't have to be afraid of deadlocks as we never have quotas on quota files...
+ * We don't have to be afraid of deadlocks as we never have quotas on quota
+ * files...
  */
 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
 {
 	int type = dquot->dq_type;
 	struct super_block *sb = dquot->dq_sb;
 	ssize_t ret;
-	dqbuf_t ddquot = getdqbuf(info->dqi_entry_size);
+	char *ddquot = getdqbuf(info->dqi_entry_size);
 
 	if (!ddquot)
 		return -ENOMEM;
@@ -364,15 +367,15 @@
 		if (ret < 0) {
 			printk(KERN_ERR "VFS: Error %zd occurred while "
 					"creating quota.\n", ret);
-			freedqbuf(ddquot);
+			kfree(ddquot);
 			return ret;
 		}
 	}
 	spin_lock(&dq_data_lock);
 	info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
 	spin_unlock(&dq_data_lock);
-	ret = sb->s_op->quota_write(sb, type, (char *)ddquot,
-					info->dqi_entry_size, dquot->dq_off);
+	ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
+				    dquot->dq_off);
 	if (ret != info->dqi_entry_size) {
 		printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
 		       sb->s_id);
@@ -382,7 +385,7 @@
 		ret = 0;
 	}
 	dqstats.writes++;
-	freedqbuf(ddquot);
+	kfree(ddquot);
 
 	return ret;
 }
@@ -393,7 +396,7 @@
 			uint blk)
 {
 	struct qt_disk_dqdbheader *dh;
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	int ret = 0;
 
 	if (!buf)
@@ -444,7 +447,7 @@
 	}
 	dquot->dq_off = 0;	/* Quota is now unattached */
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
@@ -452,7 +455,7 @@
 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
 		       uint *blk, int depth)
 {
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	int ret = 0;
 	uint newblk;
 	__le32 *ref = (__le32 *)buf;
@@ -475,9 +478,8 @@
 		int i;
 		ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
 		/* Block got empty? */
-		for (i = 0;
-		     i < (info->dqi_usable_bs >> 2) && !ref[i];
-		     i++);
+		for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+			;
 		/* Don't put the root block into the free block list */
 		if (i == (info->dqi_usable_bs >> 2)
 		    && *blk != QT_TREEOFF) {
@@ -491,7 +493,7 @@
 		}
 	}
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
@@ -510,7 +512,7 @@
 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
 				 struct dquot *dquot, uint blk)
 {
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	loff_t ret = 0;
 	int i;
 	char *ddquot;
@@ -522,9 +524,12 @@
 		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
 		goto out_buf;
 	}
-	for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader);
-	     i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
-	     i++, ddquot += info->dqi_entry_size);
+	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+		if (info->dqi_ops->is_id(ddquot, dquot))
+			break;
+		ddquot += info->dqi_entry_size;
+	}
 	if (i == qtree_dqstr_in_blk(info)) {
 		printk(KERN_ERR "VFS: Quota for id %u referenced "
 		  "but not present.\n", dquot->dq_id);
@@ -535,7 +540,7 @@
 		  qt_disk_dqdbheader) + i * info->dqi_entry_size;
 	}
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
@@ -543,7 +548,7 @@
 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
 				struct dquot *dquot, uint blk, int depth)
 {
-	dqbuf_t buf = getdqbuf(info->dqi_usable_bs);
+	char *buf = getdqbuf(info->dqi_usable_bs);
 	loff_t ret = 0;
 	__le32 *ref = (__le32 *)buf;
 
@@ -563,7 +568,7 @@
 	else
 		ret = find_block_dqentry(info, dquot, blk);
 out_buf:
-	freedqbuf(buf);
+	kfree(buf);
 	return ret;
 }
 
@@ -579,7 +584,7 @@
 	int type = dquot->dq_type;
 	struct super_block *sb = dquot->dq_sb;
 	loff_t offset;
-	dqbuf_t ddquot;
+	char *ddquot;
 	int ret = 0;
 
 #ifdef __QUOTA_QT_PARANOIA
@@ -607,8 +612,8 @@
 	ddquot = getdqbuf(info->dqi_entry_size);
 	if (!ddquot)
 		return -ENOMEM;
-	ret = sb->s_op->quota_read(sb, type, (char *)ddquot,
-				   info->dqi_entry_size, dquot->dq_off);
+	ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
+				   dquot->dq_off);
 	if (ret != info->dqi_entry_size) {
 		if (ret >= 0)
 			ret = -EIO;
@@ -616,7 +621,7 @@
 				"structure for id %u.\n", dquot->dq_id);
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
 		memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
-		freedqbuf(ddquot);
+		kfree(ddquot);
 		goto out;
 	}
 	spin_lock(&dq_data_lock);
@@ -627,7 +632,7 @@
 	    !dquot->dq_dqb.dqb_isoftlimit)
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
 	spin_unlock(&dq_data_lock);
-	freedqbuf(ddquot);
+	kfree(ddquot);
 out:
 	dqstats.reads++;
 	return ret;
@@ -638,7 +643,8 @@
  * the only one operating on dquot (thanks to dq_lock) */
 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
 {
-	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
+	    !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
 		return qtree_delete_dquot(info, dquot);
 	return 0;
 }
diff --git a/fs/quota_tree.h b/fs/quota/quota_tree.h
similarity index 100%
rename from fs/quota_tree.h
rename to fs/quota/quota_tree.h
diff --git a/fs/quota_v1.c b/fs/quota/quota_v1.c
similarity index 79%
rename from fs/quota_v1.c
rename to fs/quota/quota_v1.c
index b4af1c6..0edcf42 100644
--- a/fs/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -62,11 +62,14 @@
 
 	/* Set structure to 0s in case read fails/is after end of file */
 	memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
-	dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+	dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
+			sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
 
 	v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
-	if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 &&
-	    dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0)
+	if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
+	    dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+	    dquot->dq_dqb.dqb_ihardlimit == 0 &&
+	    dquot->dq_dqb.dqb_isoftlimit == 0)
 		set_bit(DQ_FAKE_B, &dquot->dq_flags);
 	dqstats.reads++;
 
@@ -81,13 +84,16 @@
 
 	v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
 	if (dquot->dq_id == 0) {
-		dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
-		dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+		dqblk.dqb_btime =
+			sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+		dqblk.dqb_itime =
+			sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
 	}
 	ret = 0;
 	if (sb_dqopt(dquot->dq_sb)->files[type])
-		ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk,
-					sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+		ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+			(char *)&dqblk, sizeof(struct v1_disk_dqblk),
+			v1_dqoff(dquot->dq_id));
 	if (ret != sizeof(struct v1_disk_dqblk)) {
 		printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
 			dquot->dq_sb->s_id);
@@ -130,15 +136,20 @@
 		return 0;
 	blocks = isize >> BLOCK_SIZE_BITS;
 	off = isize & (BLOCK_SIZE - 1);
-	if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
+	if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
+	    sizeof(struct v1_disk_dqblk))
 		return 0;
-	/* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
-	size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+	/* Doublecheck whether we didn't get file with new format - with old
+	 * quotactl() this could happen */
+	size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+				    sizeof(struct v2_disk_dqheader), 0);
 	if (size != sizeof(struct v2_disk_dqheader))
 		return 1;	/* Probably not new format */
 	if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
 		return 1;	/* Definitely not new format */
-	printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id);
+	printk(KERN_INFO
+	       "VFS: %s: Refusing to turn on old quota format on given file."
+	       " It probably contains newer quota format.\n", sb->s_id);
         return 0;		/* Seems like a new format file -> refuse it */
 }
 
@@ -148,7 +159,9 @@
 	struct v1_disk_dqblk dqblk;
 	int ret;
 
-	if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+	ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+				sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+	if (ret != sizeof(struct v1_disk_dqblk)) {
 		if (ret >= 0)
 			ret = -EIO;
 		goto out;
@@ -157,8 +170,10 @@
 	/* limits are stored as unsigned 32-bit data */
 	dqopt->info[type].dqi_maxblimit = 0xffffffff;
 	dqopt->info[type].dqi_maxilimit = 0xffffffff;
-	dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
-	dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+	dqopt->info[type].dqi_igrace =
+			dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+	dqopt->info[type].dqi_bgrace =
+			dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
 out:
 	return ret;
 }
@@ -170,8 +185,9 @@
 	int ret;
 
 	dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
-	if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
-	    sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) {
+	ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+				sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+	if (ret != sizeof(struct v1_disk_dqblk)) {
 		if (ret >= 0)
 			ret = -EIO;
 		goto out;
diff --git a/fs/quota_v2.c b/fs/quota/quota_v2.c
similarity index 98%
rename from fs/quota_v2.c
rename to fs/quota/quota_v2.c
index b618b56..a5475fb 100644
--- a/fs/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -54,7 +54,8 @@
 	static const uint quota_magics[] = V2_INITQMAGICS;
 	static const uint quota_versions[] = V2_INITQVERSIONS;
  
-	size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
+	size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+				    sizeof(struct v2_disk_dqheader), 0);
 	if (size != sizeof(struct v2_disk_dqheader)) {
 		printk("quota_v2: failed read expected=%zd got=%zd\n",
 			sizeof(struct v2_disk_dqheader), size);
diff --git a/fs/quotaio_v1.h b/fs/quota/quotaio_v1.h
similarity index 100%
rename from fs/quotaio_v1.h
rename to fs/quota/quotaio_v1.h
diff --git a/fs/quotaio_v2.h b/fs/quota/quotaio_v2.h
similarity index 100%
rename from fs/quotaio_v2.h
rename to fs/quota/quotaio_v2.h
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5d7c7ec..995ef1d 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/ramfs.h>
-#include <linux/quotaops.h>
 #include <linux/pagevec.h>
 #include <linux/mman.h>
 
@@ -205,11 +204,6 @@
 	if (ret)
 		return ret;
 
-	/* by providing our own setattr() method, we skip this quotaism */
-	if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
-	    (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
-		ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
-
 	/* pick out size-changing events */
 	if (ia->ia_valid & ATTR_SIZE) {
 		loff_t size = i_size_read(inode);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4646caa..f32d142 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -430,7 +430,7 @@
 
 	journal_mark_dirty(th, s, sbh);
 	if (for_unformatted)
-		DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+		vfs_dq_free_block_nodirty(inode, 1);
 }
 
 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@
 			       amount_needed, hint->inode->i_uid);
 #endif
 		quota_ret =
-		    DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
+		    vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
 		if (quota_ret)	/* Quota exceeded? */
 			return QUOTA_EXCEEDED;
 		if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@
 				       "reiserquota: allocating (prealloc) %d blocks id=%u",
 				       hint->prealloc_size, hint->inode->i_uid);
 #endif
-			quota_ret =
-			    DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
+			quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
 							 hint->prealloc_size);
 			if (quota_ret)
 				hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@
 					       nr_allocated,
 					       hint->inode->i_uid);
 #endif
-				DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated);	/* Free not allocated blocks */
+				/* Free not allocated blocks */
+				vfs_dq_free_block_nodirty(hint->inode,
+					amount_needed + hint->prealloc_size -
+					nr_allocated);
 			}
 			while (nr_allocated--)
 				reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@
 			       REISERFS_I(hint->inode)->i_prealloc_count,
 			       hint->inode->i_uid);
 #endif
-		DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
+		vfs_dq_free_block_nodirty(hint->inode, amount_needed +
 					 hint->prealloc_size - nr_allocated -
 					 REISERFS_I(hint->inode)->
 					 i_prealloc_count);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 55fce92..823227a 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -53,7 +53,7 @@
 		 * after delete_object so that quota updates go into the same transaction as
 		 * stat data deletion */
 		if (!err) 
-			DQUOT_FREE_INODE(inode);
+			vfs_dq_free_inode(inode);
 
 		if (journal_end(&th, inode->i_sb, jbegin_count))
 			goto out;
@@ -1763,7 +1763,7 @@
 
 	BUG_ON(!th->t_trans_id);
 
-	if (DQUOT_ALLOC_INODE(inode)) {
+	if (vfs_dq_alloc_inode(inode)) {
 		err = -EDQUOT;
 		goto out_end_trans;
 	}
@@ -1947,12 +1947,12 @@
 	INODE_PKEY(inode)->k_objectid = 0;
 
 	/* Quota change must be inside a transaction for journaling */
-	DQUOT_FREE_INODE(inode);
+	vfs_dq_free_inode(inode);
 
       out_end_trans:
 	journal_end(th, th->t_super, th->t_blocks_allocated);
 	/* Drop can be outside and it needs more credits so it's better to have it outside */
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
 	make_bad_inode(inode);
 
@@ -3119,7 +3119,7 @@
 				if (error)
 					goto out;
 				error =
-				    DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+				    vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
 				if (error) {
 					journal_end(&th, inode->i_sb,
 						    jbegin_count);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 738967f..639d635 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -555,7 +555,7 @@
 */
 static int drop_new_inode(struct inode *inode)
 {
-	DQUOT_DROP(inode);
+	vfs_dq_drop(inode);
 	make_bad_inode(inode);
 	inode->i_flags |= S_NOQUOTA;
 	iput(inode);
@@ -563,7 +563,7 @@
 }
 
 /* utility function that does setup for reiserfs_new_inode.  
-** DQUOT_INIT needs lots of credits so it's better to have it
+** vfs_dq_init needs lots of credits so it's better to have it
 ** outside of a transaction, so we had to pull some bits of
 ** reiserfs_new_inode out into this func.
 */
@@ -586,7 +586,7 @@
 	} else {
 		inode->i_gid = current_fsgid();
 	}
-	DQUOT_INIT(inode);
+	vfs_dq_init(inode);
 	return 0;
 }
 
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index abbc64d..73aaa33 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1297,7 +1297,7 @@
 		       "reiserquota delete_item(): freeing %u, id=%u type=%c",
 		       quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
 #endif
-	DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+	vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
 
 	/* Return deleted body length */
 	return n_ret_value;
@@ -1383,7 +1383,7 @@
 					       quota_cut_bytes, inode->i_uid,
 					       key2type(key));
 #endif
-				DQUOT_FREE_SPACE_NODIRTY(inode,
+				vfs_dq_free_space_nodirty(inode,
 							 quota_cut_bytes);
 			}
 			break;
@@ -1734,7 +1734,7 @@
 		       "reiserquota cut_from_item(): freeing %u id=%u type=%c",
 		       quota_cut_bytes, p_s_inode->i_uid, '?');
 #endif
-	DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
+	vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
 	return n_ret_value;
 }
 
@@ -1971,7 +1971,7 @@
 		       key2type(&(p_s_key->on_disk_key)));
 #endif
 
-	if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+	if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
 		pathrelse(p_s_search_path);
 		return -EDQUOT;
 	}
@@ -2027,7 +2027,7 @@
 		       n_pasted_size, inode->i_uid,
 		       key2type(&(p_s_key->on_disk_key)));
 #endif
-	DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+	vfs_dq_free_space_nodirty(inode, n_pasted_size);
 	return retval;
 }
 
@@ -2060,7 +2060,7 @@
 #endif
 		/* We can't dirty inode here. It would be immediately written but
 		 * appropriate stat item isn't inserted yet... */
-		if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+		if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
 			pathrelse(p_s_path);
 			return -EDQUOT;
 		}
@@ -2112,6 +2112,6 @@
 		       quota_bytes, inode->i_uid, head2type(p_s_ih));
 #endif
 	if (inode)
-		DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+		vfs_dq_free_space_nodirty(inode, quota_bytes);
 	return retval;
 }
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b..5dbafb7 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -250,7 +250,7 @@
 			retval = remove_save_link_only(s, &save_link_key, 0);
 			continue;
 		}
-		DQUOT_INIT(inode);
+		vfs_dq_init(inode);
 
 		if (truncate && S_ISDIR(inode->i_mode)) {
 			/* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@
 #ifdef CONFIG_QUOTA
 #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
 
-static int reiserfs_dquot_initialize(struct inode *, int);
-static int reiserfs_dquot_drop(struct inode *);
 static int reiserfs_write_dquot(struct dquot *);
 static int reiserfs_acquire_dquot(struct dquot *);
 static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@
 static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
 
 static struct dquot_operations reiserfs_quota_operations = {
-	.initialize = reiserfs_dquot_initialize,
-	.drop = reiserfs_dquot_drop,
+	.initialize = dquot_initialize,
+	.drop = dquot_drop,
 	.alloc_space = dquot_alloc_space,
 	.alloc_inode = dquot_alloc_inode,
 	.free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@
 }
 
 #ifdef CONFIG_QUOTA
-static int reiserfs_dquot_initialize(struct inode *inode, int type)
-{
-	struct reiserfs_transaction_handle th;
-	int ret, err;
-
-	/* We may create quota structure so we need to reserve enough blocks */
-	reiserfs_write_lock(inode->i_sb);
-	ret =
-	    journal_begin(&th, inode->i_sb,
-			  2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
-	if (ret)
-		goto out;
-	ret = dquot_initialize(inode, type);
-	err =
-	    journal_end(&th, inode->i_sb,
-			2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
-	if (!ret && err)
-		ret = err;
-      out:
-	reiserfs_write_unlock(inode->i_sb);
-	return ret;
-}
-
-static int reiserfs_dquot_drop(struct inode *inode)
-{
-	struct reiserfs_transaction_handle th;
-	int ret, err;
-
-	/* We may delete quota structure so we need to reserve enough blocks */
-	reiserfs_write_lock(inode->i_sb);
-	ret =
-	    journal_begin(&th, inode->i_sb,
-			  2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
-	if (ret) {
-		/*
-		 * We call dquot_drop() anyway to at least release references
-		 * to quota structures so that umount does not hang.
-		 */
-		dquot_drop(inode);
-		goto out;
-	}
-	ret = dquot_drop(inode);
-	err =
-	    journal_end(&th, inode->i_sb,
-			2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
-	if (!ret && err)
-		ret = err;
-      out:
-	reiserfs_write_unlock(inode->i_sb);
-	return ret;
-}
-
 static int reiserfs_write_dquot(struct dquot *dquot)
 {
 	struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ad92461..ae881cc 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -1136,7 +1136,7 @@
 	return 1;
 }
 
-static struct dentry_operations xattr_lookup_poison_ops = {
+static const struct dentry_operations xattr_lookup_poison_ops = {
 	.d_compare = xattr_lookup_poison,
 };
 
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index e7ddd03..3e4803b 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -277,7 +277,7 @@
 static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
 static int smb_delete_dentry(struct dentry *);
 
-static struct dentry_operations smbfs_dentry_operations =
+static const struct dentry_operations smbfs_dentry_operations =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_hash		= smb_hash_dentry,
@@ -285,7 +285,7 @@
 	.d_delete	= smb_delete_dentry,
 };
 
-static struct dentry_operations smbfs_dentry_operations_case =
+static const struct dentry_operations smbfs_dentry_operations_case =
 {
 	.d_revalidate	= smb_lookup_validate,
 	.d_delete	= smb_delete_dentry,
diff --git a/fs/super.c b/fs/super.c
index dd4acb1..2ba4815 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -197,7 +197,7 @@
 	if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
 		s->s_count -= S_BIAS-1;
 		spin_unlock(&sb_lock);
-		DQUOT_OFF(s, 0);
+		vfs_dq_off(s, 0);
 		down_write(&s->s_umount);
 		fs->kill_sb(s);
 		put_filesystem(fs);
@@ -266,7 +266,7 @@
 void __fsync_super(struct super_block *sb)
 {
 	sync_inodes_sb(sb, 0);
-	DQUOT_SYNC(sb);
+	vfs_dq_sync(sb);
 	lock_super(sb);
 	if (sb->s_dirt && sb->s_op->write_super)
 		sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@
 			mark_files_ro(sb);
 		else if (!fs_may_remount_ro(sb))
 			return -EBUSY;
-		retval = DQUOT_OFF(sb, 1);
+		retval = vfs_dq_off(sb, 1);
 		if (retval < 0 && retval != -ENOSYS)
 			return -EBUSY;
 	}
@@ -670,7 +670,7 @@
 	}
 	sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
 	if (remount_rw)
-		DQUOT_ON_REMOUNT(sb);
+		vfs_dq_quota_on_remount(sb);
 	return 0;
 }
 
@@ -838,7 +838,8 @@
 		bdev->bd_super = s;
 	}
 
-	return simple_set_mnt(mnt, s);
+	simple_set_mnt(mnt, s);
+	return 0;
 
 error_s:
 	error = PTR_ERR(s);
@@ -884,7 +885,8 @@
 		return error;
 	}
 	s->s_flags |= MS_ACTIVE;
-	return simple_set_mnt(mnt, s);
+	simple_set_mnt(mnt, s);
+	return 0;
 }
 
 EXPORT_SYMBOL(get_sb_nodev);
@@ -916,7 +918,8 @@
 		s->s_flags |= MS_ACTIVE;
 	}
 	do_remount_sb(s, flags, data, 0);
-	return simple_set_mnt(mnt, s);
+	simple_set_mnt(mnt, s);
+	return 0;
 }
 
 EXPORT_SYMBOL(get_sb_single);
diff --git a/fs/sync.c b/fs/sync.c
index ec95a69..7abc65f 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@
 {
 	wakeup_pdflush(0);
 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
-	DQUOT_SYNC(NULL);
+	vfs_dq_sync(NULL);
 	sync_supers();		/* Write the superblocks */
 	sync_filesystems(0);	/* Start syncing the filesystems */
 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 66aeb4f..d88d0fa 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -302,7 +302,7 @@
 	iput(inode);
 }
 
-static struct dentry_operations sysfs_dentry_ops = {
+static const struct dentry_operations sysfs_dentry_ops = {
 	.d_iput		= sysfs_d_iput,
 };
 
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a1f1ef3..33e047b 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -38,7 +38,7 @@
 	return 0;
 }
 
-struct dentry_operations sysv_dentry_operations = {
+const struct dentry_operations sysv_dentry_operations = {
 	.d_hash		= sysv_hash,
 };
 
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 38ebe3f..5784a31 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -170,7 +170,7 @@
 extern const struct file_operations sysv_dir_operations;
 extern const struct address_space_operations sysv_aops;
 extern const struct super_operations sysv_sops;
-extern struct dentry_operations sysv_dentry_operations;
+extern const struct dentry_operations sysv_dentry_operations;
 
 
 enum {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1182b66..c5c9835 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2034,7 +2034,8 @@
 	/* 'fill_super()' opens ubi again so we must close it here */
 	ubi_close_volume(ubi);
 
-	return simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+	return 0;
 
 out_deact:
 	up_write(&sb->s_umount);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd4..2bb788a 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -206,7 +206,7 @@
 					((char *)bh->b_data)[(bit + i) >> 3]);
 			} else {
 				if (inode)
-					DQUOT_FREE_BLOCK(inode, 1);
+					vfs_dq_free_block(inode, 1);
 				udf_add_free_space(sbi, sbi->s_partition, 1);
 			}
 		}
@@ -261,11 +261,11 @@
 		while (bit < (sb->s_blocksize << 3) && block_count > 0) {
 			if (!udf_test_bit(bit, bh->b_data))
 				goto out;
-			else if (DQUOT_PREALLOC_BLOCK(inode, 1))
+			else if (vfs_dq_prealloc_block(inode, 1))
 				goto out;
 			else if (!udf_clear_bit(bit, bh->b_data)) {
 				udf_debug("bit already cleared for block %d\n", bit);
-				DQUOT_FREE_BLOCK(inode, 1);
+				vfs_dq_free_block(inode, 1);
 				goto out;
 			}
 			block_count--;
@@ -393,7 +393,7 @@
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+	if (inode && vfs_dq_alloc_block(inode, 1)) {
 		mutex_unlock(&sbi->s_alloc_mutex);
 		*err = -EDQUOT;
 		return 0;
@@ -452,7 +452,7 @@
 	/* We do this up front - There are some error conditions that
 	   could occure, but.. oh well */
 	if (inode)
-		DQUOT_FREE_BLOCK(inode, count);
+		vfs_dq_free_block(inode, count);
 	if (udf_add_free_space(sbi, sbi->s_partition, count))
 		mark_buffer_dirty(sbi->s_lvid_bh);
 
@@ -700,7 +700,7 @@
 		epos.offset -= adsize;
 
 		alloc_count = (elen >> sb->s_blocksize_bits);
-		if (inode && DQUOT_PREALLOC_BLOCK(inode,
+		if (inode && vfs_dq_prealloc_block(inode,
 			alloc_count > block_count ? block_count : alloc_count))
 			alloc_count = 0;
 		else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@
 	goal_eloc.logicalBlockNum++;
 	goal_elen -= sb->s_blocksize;
 
-	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
+	if (inode && vfs_dq_alloc_block(inode, 1)) {
 		brelse(goal_epos.bh);
 		mutex_unlock(&sbi->s_alloc_mutex);
 		*err = -EDQUOT;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc842..47dbe56 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -36,8 +36,8 @@
 	 * Note: we must free any quota before locking the superblock,
 	 * as writing the quota to disk may need the lock as well.
 	 */
-	DQUOT_FREE_INODE(inode);
-	DQUOT_DROP(inode);
+	vfs_dq_free_inode(inode);
+	vfs_dq_drop(inode);
 
 	clear_inode(inode);
 
@@ -154,8 +154,8 @@
 	insert_inode_hash(inode);
 	mark_inode_dirty(inode);
 
-	if (DQUOT_ALLOC_INODE(inode)) {
-		DQUOT_DROP(inode);
+	if (vfs_dq_alloc_inode(inode)) {
+		vfs_dq_drop(inode);
 		inode->i_flags |= S_NOQUOTA;
 		inode->i_nlink = 0;
 		iput(inode);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0d9ada1..54c16ec 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -85,7 +85,7 @@
 				   "bit already cleared for fragment %u", i);
 	}
 	
-	DQUOT_FREE_BLOCK (inode, count);
+	vfs_dq_free_block(inode, count);
 
 	
 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@
 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
 			ufs_clusteracct (sb, ucpi, blkno, 1);
-		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
+		vfs_dq_free_block(inode, uspi->s_fpb);
 
 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
 		uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@
 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
 	for (i = oldcount; i < newcount; i++)
 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
-	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+	if (vfs_dq_alloc_block(inode, count)) {
 		*err = -EDQUOT;
 		return 0;
 	}
@@ -664,7 +664,7 @@
 		for (i = count; i < uspi->s_fpb; i++)
 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
 		i = uspi->s_fpb - count;
-		DQUOT_FREE_BLOCK(inode, i);
+		vfs_dq_free_block(inode, i);
 
 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
 		uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@
 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
 	if (result == INVBLOCK)
 		return 0;
-	if(DQUOT_ALLOC_BLOCK(inode, count)) {
+	if (vfs_dq_alloc_block(inode, count)) {
 		*err = -EDQUOT;
 		return 0;
 	}
@@ -747,7 +747,7 @@
 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
 		ufs_clusteracct (sb, ucpi, blkno, -1);
-	if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
+	if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
 		*err = -EDQUOT;
 		return INVBLOCK;
 	}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 6f5dcf0..3527c00 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -95,8 +95,8 @@
 
 	is_directory = S_ISDIR(inode->i_mode);
 
-	DQUOT_FREE_INODE(inode);
-	DQUOT_DROP(inode);
+	vfs_dq_free_inode(inode);
+	vfs_dq_drop(inode);
 
 	clear_inode (inode);
 
@@ -355,8 +355,8 @@
 
 	unlock_super (sb);
 
-	if (DQUOT_ALLOC_INODE(inode)) {
-		DQUOT_DROP(inode);
+	if (vfs_dq_alloc_inode(inode)) {
+		vfs_dq_drop(inode);
 		err = -EDQUOT;
 		goto fail_without_unlock;
 	}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 39f8778..3d2512c 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -622,7 +622,6 @@
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
 	mode_t mode;
-	unsigned i;
 
 	/*
 	 * Copy data to the in-core inode.
@@ -655,11 +654,12 @@
 
 	
 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
-			ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
+		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
+		       sizeof(ufs_inode->ui_u2.ui_addr));
 	} else {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
-			ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
+		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
+		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
+		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
 	}
 	return 0;
 }
@@ -669,7 +669,6 @@
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
 	mode_t mode;
-	unsigned i;
 
 	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 	/*
@@ -704,12 +703,12 @@
 	*/
 
 	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
-			ufsi->i_u1.u2_i_data[i] =
-				ufs2_inode->ui_u2.ui_addr.ui_db[i];
+		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
+		       sizeof(ufs2_inode->ui_u2.ui_addr));
 	} else {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
-			ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
+		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
+		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
+		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
 	}
 	return 0;
 }
@@ -781,7 +780,6 @@
 {
 	struct super_block *sb = inode->i_sb;
  	struct ufs_inode_info *ufsi = UFS_I(inode);
- 	unsigned i;
 
 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
@@ -809,12 +807,12 @@
 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
 	} else if (inode->i_blocks) {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
-			ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
+		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
+		       sizeof(ufs_inode->ui_u2.ui_addr));
 	}
 	else {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
-			ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+		       sizeof(ufs_inode->ui_u2.ui_symlink));
 	}
 
 	if (!inode->i_nlink)
@@ -825,7 +823,6 @@
 {
 	struct super_block *sb = inode->i_sb;
  	struct ufs_inode_info *ufsi = UFS_I(inode);
- 	unsigned i;
 
 	UFSD("ENTER\n");
 	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
@@ -850,11 +847,11 @@
 		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
 	} else if (inode->i_blocks) {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
-			ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i];
+		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
+		       sizeof(ufs_inode->ui_u2.ui_addr));
 	} else {
-		for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
-			ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
+		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
+		       sizeof(ufs_inode->ui_u2.ui_symlink));
  	}
 
 	if (!inode->i_nlink)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index e3a9b1f..23119fe 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -147,7 +147,7 @@
 	} else {
 		/* fast symlink */
 		inode->i_op = &ufs_fast_symlink_inode_operations;
-		memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l);
+		memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
 		inode->i_size = l-1;
 	}
 	mark_inode_dirty(inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 261a1c2..e1c1fc5 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -636,6 +636,7 @@
 	unsigned block_size, super_block_size;
 	unsigned flags;
 	unsigned super_block_offset;
+	unsigned maxsymlen;
 	int ret = -EINVAL;
 
 	uspi = NULL;
@@ -1069,6 +1070,16 @@
 		uspi->s_maxsymlinklen =
 		    fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
 
+	if (uspi->fs_magic == UFS2_MAGIC)
+		maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR);
+	else
+		maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR);
+	if (uspi->s_maxsymlinklen > maxsymlen) {
+		ufs_warning(sb, __func__, "ufs_read_super: excessive maximum "
+			    "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
+		uspi->s_maxsymlinklen = maxsymlen;
+	}
+
 	inode = ufs_iget(sb, UFS_ROOTINO);
 	if (IS_ERR(inode)) {
 		ret = PTR_ERR(inode);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 11c0351..69b3427 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -23,7 +23,7 @@
 struct ufs_inode_info {
 	union {
 		__fs32	i_data[15];
-		__u8	i_symlink[4*15];
+		__u8	i_symlink[2 * 4 * 15];
 		__fs64	u2_i_data[15];
 	} i_u1;
 	__u32	i_flags;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e5f4ae9..c19a93c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -758,6 +758,8 @@
 
 	int (*proc_init)(struct drm_minor *minor);
 	void (*proc_cleanup)(struct drm_minor *minor);
+	int (*debugfs_init)(struct drm_minor *minor);
+	void (*debugfs_cleanup)(struct drm_minor *minor);
 
 	/**
 	 * Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +795,48 @@
 #define DRM_MINOR_CONTROL 2
 #define DRM_MINOR_RENDER 3
 
+
+/**
+ * debugfs node list. This structure represents a debugfs file to
+ * be created by the drm core
+ */
+struct drm_debugfs_list {
+	const char *name; /** file name */
+	int (*show)(struct seq_file*, void*); /** show callback */
+	u32 driver_features; /**< Required driver features for this entry */
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_debugfs_node {
+	struct list_head list;
+	struct drm_minor *minor;
+	struct drm_debugfs_list *debugfs_ent;
+	struct dentry *dent;
+};
+
+/**
+ * Info file list entry. This structure represents a debugfs or proc file to
+ * be created by the drm core
+ */
+struct drm_info_list {
+	const char *name; /** file name */
+	int (*show)(struct seq_file*, void*); /** show callback */
+	u32 driver_features; /**< Required driver features for this entry */
+	void *data;
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_info_node {
+	struct list_head list;
+	struct drm_minor *minor;
+	struct drm_info_list *info_ent;
+	struct dentry *dent;
+};
+
 /**
  * DRM minor structure. This structure represents a drm minor number.
  */
@@ -802,7 +846,12 @@
 	dev_t device;			/**< Device number for mknod */
 	struct device kdev;		/**< Linux device */
 	struct drm_device *dev;
-	struct proc_dir_entry *dev_root;  /**< proc directory entry */
+
+	struct proc_dir_entry *proc_root;  /**< proc directory entry */
+	struct drm_info_node proc_nodes;
+	struct dentry *debugfs_root;
+	struct drm_info_node debugfs_nodes;
+
 	struct drm_master *master; /* currently active master for this node */
 	struct list_head master_list;
 	struct drm_mode_group mode_group;
@@ -1258,6 +1307,7 @@
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
+extern struct dentry *drm_debugfs_root;
 
 extern struct idr drm_minors_idr;
 
@@ -1268,6 +1318,31 @@
 			 struct proc_dir_entry *root);
 extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
 
+				/* Debugfs support */
+#if defined(CONFIG_DEBUG_FS)
+extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+			    struct dentry *root);
+extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
+				    struct dentry *root, struct drm_minor *minor);
+extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+                                    struct drm_minor *minor);
+extern int drm_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+				/* Info file support */
+extern int drm_name_info(struct seq_file *m, void *data);
+extern int drm_vm_info(struct seq_file *m, void *data);
+extern int drm_queues_info(struct seq_file *m, void *data);
+extern int drm_bufs_info(struct seq_file *m, void *data);
+extern int drm_vblank_info(struct seq_file *m, void *data);
+extern int drm_clients_info(struct seq_file *m, void* data);
+extern int drm_gem_name_info(struct seq_file *m, void *data);
+extern int drm_gem_object_info(struct seq_file *m, void* data);
+
+#if DRM_DEBUG_CODE
+extern int drm_vma_info(struct seq_file *m, void *data);
+#endif
+
 				/* Scatter Gather Support (drm_scatter.h) */
 extern void drm_sg_cleanup(struct drm_sg_mem * entry);
 extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 5165f24..76c4c82 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -418,4 +418,6 @@
 	{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
 	{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
 	{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+	{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+	{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
 	{0, 0, 0}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd7ac79..f19fd90 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -165,15 +165,8 @@
 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
 
 void mark_buffer_async_write(struct buffer_head *bh);
-void invalidate_bdev(struct block_device *);
-int sync_blockdev(struct block_device *bdev);
 void __wait_on_buffer(struct buffer_head *);
 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
-int fsync_bdev(struct block_device *);
-struct super_block *freeze_bdev(struct block_device *);
-int thaw_bdev(struct block_device *, struct super_block *);
-int fsync_super(struct super_block *);
-int fsync_no_super(struct block_device *);
 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
 			unsigned size);
 struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fd2194..b880864 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -125,6 +125,13 @@
 	char		d_name[256];
 };
 
+struct compat_ustat {
+	compat_daddr_t		f_tfree;
+	compat_ino_t		f_tinode;
+	char			f_fname[6];
+	char			f_fpack[6];
+};
+
 typedef union compat_sigval {
 	compat_int_t	sival_int;
 	compat_uptr_t	sival_ptr;
@@ -178,6 +185,7 @@
 		unsigned nsems, const struct compat_timespec __user *timeout);
 asmlinkage long compat_sys_keyctl(u32 option,
 			      u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
 
 asmlinkage ssize_t compat_sys_readv(unsigned long fd,
 		const struct compat_iovec __user *vec, unsigned long vlen);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c66d224..1515636 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -112,7 +112,7 @@
 	struct list_head d_subdirs;	/* our children */
 	struct list_head d_alias;	/* inode alias list */
 	unsigned long d_time;		/* used by d_revalidate */
-	struct dentry_operations *d_op;
+	const struct dentry_operations *d_op;
 	struct super_block *d_sb;	/* The root of the dentry tree */
 	void *d_fsdata;			/* fs-specific data */
 
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 4d078e9..c6b3ca3 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -25,10 +25,12 @@
 #include <linux/types.h>
 #include <linux/firewire-constants.h>
 
-#define FW_CDEV_EVENT_BUS_RESET		0x00
-#define FW_CDEV_EVENT_RESPONSE		0x01
-#define FW_CDEV_EVENT_REQUEST		0x02
-#define FW_CDEV_EVENT_ISO_INTERRUPT	0x03
+#define FW_CDEV_EVENT_BUS_RESET			0x00
+#define FW_CDEV_EVENT_RESPONSE			0x01
+#define FW_CDEV_EVENT_REQUEST			0x02
+#define FW_CDEV_EVENT_ISO_INTERRUPT		0x03
+#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED	0x04
+#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED	0x05
 
 /**
  * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -136,7 +138,24 @@
  * This event is sent when the controller has completed an &fw_cdev_iso_packet
  * with the %FW_CDEV_ISO_INTERRUPT bit set.  In the receive case, the headers
  * stripped of all packets up until and including the interrupt packet are
- * returned in the @header field.
+ * returned in the @header field.  The amount of header data per packet is as
+ * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
+ *
+ * In version 1 of this ABI, header data consisted of the 1394 isochronous
+ * packet header, followed by quadlets from the packet payload if
+ * &fw_cdev_create_iso_context.header_size > 4.
+ *
+ * In version 2 of this ABI, header data consist of the 1394 isochronous
+ * packet header, followed by a timestamp quadlet if
+ * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
+ * packet payload if &fw_cdev_create_iso_context.header_size > 8.
+ *
+ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
+ *
+ * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
+ * 4 bits tcode, 4 bits sy, in big endian byte order.  Format of timestamp:
+ * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
+ * order.
  */
 struct fw_cdev_event_iso_interrupt {
 	__u64 closure;
@@ -147,12 +166,44 @@
 };
 
 /**
+ * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
+ * @closure:	See &fw_cdev_event_common;
+ *		set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
+ * @type:	%FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
+ *		%FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
+ * @handle:	Reference by which an allocated resource can be deallocated
+ * @channel:	Isochronous channel which was (de)allocated, if any
+ * @bandwidth:	Bandwidth allocation units which were (de)allocated, if any
+ *
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
+ * resource was allocated at the IRM.  The client has to check @channel and
+ * @bandwidth for whether the allocation actually succeeded.
+ *
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
+ * resource was deallocated at the IRM.  It is also sent when automatic
+ * reallocation after a bus reset failed.
+ *
+ * @channel is <0 if no channel was (de)allocated or if reallocation failed.
+ * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
+ */
+struct fw_cdev_event_iso_resource {
+	__u64 closure;
+	__u32 type;
+	__u32 handle;
+	__s32 channel;
+	__s32 bandwidth;
+};
+
+/**
  * union fw_cdev_event - Convenience union of fw_cdev_event_ types
  * @common:        Valid for all types
  * @bus_reset:     Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
  * @response:      Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
  * @request:       Valid if @common.type == %FW_CDEV_EVENT_REQUEST
  * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
+ * @iso_resource:  Valid if @common.type ==
+ *				%FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
+ *				%FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
  *
  * Convenience union for userspace use.  Events could be read(2) into an
  * appropriately aligned char buffer and then cast to this union for further
@@ -163,33 +214,47 @@
  * not fit will be discarded so that the next read(2) will return a new event.
  */
 union fw_cdev_event {
-	struct fw_cdev_event_common common;
-	struct fw_cdev_event_bus_reset bus_reset;
-	struct fw_cdev_event_response response;
-	struct fw_cdev_event_request request;
-	struct fw_cdev_event_iso_interrupt iso_interrupt;
+	struct fw_cdev_event_common		common;
+	struct fw_cdev_event_bus_reset		bus_reset;
+	struct fw_cdev_event_response		response;
+	struct fw_cdev_event_request		request;
+	struct fw_cdev_event_iso_interrupt	iso_interrupt;
+	struct fw_cdev_event_iso_resource	iso_resource;
 };
 
-#define FW_CDEV_IOC_GET_INFO		_IOWR('#', 0x00, struct fw_cdev_get_info)
-#define FW_CDEV_IOC_SEND_REQUEST	_IOW('#', 0x01, struct fw_cdev_send_request)
-#define FW_CDEV_IOC_ALLOCATE		_IOWR('#', 0x02, struct fw_cdev_allocate)
-#define FW_CDEV_IOC_DEALLOCATE		_IOW('#', 0x03, struct fw_cdev_deallocate)
-#define FW_CDEV_IOC_SEND_RESPONSE	_IOW('#', 0x04, struct fw_cdev_send_response)
-#define FW_CDEV_IOC_INITIATE_BUS_RESET	_IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
-#define FW_CDEV_IOC_ADD_DESCRIPTOR	_IOWR('#', 0x06, struct fw_cdev_add_descriptor)
-#define FW_CDEV_IOC_REMOVE_DESCRIPTOR	_IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+/* available since kernel version 2.6.22 */
+#define FW_CDEV_IOC_GET_INFO           _IOWR('#', 0x00, struct fw_cdev_get_info)
+#define FW_CDEV_IOC_SEND_REQUEST        _IOW('#', 0x01, struct fw_cdev_send_request)
+#define FW_CDEV_IOC_ALLOCATE           _IOWR('#', 0x02, struct fw_cdev_allocate)
+#define FW_CDEV_IOC_DEALLOCATE          _IOW('#', 0x03, struct fw_cdev_deallocate)
+#define FW_CDEV_IOC_SEND_RESPONSE       _IOW('#', 0x04, struct fw_cdev_send_response)
+#define FW_CDEV_IOC_INITIATE_BUS_RESET  _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
+#define FW_CDEV_IOC_ADD_DESCRIPTOR     _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
+#define FW_CDEV_IOC_REMOVE_DESCRIPTOR   _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
+#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
+#define FW_CDEV_IOC_QUEUE_ISO          _IOWR('#', 0x09, struct fw_cdev_queue_iso)
+#define FW_CDEV_IOC_START_ISO           _IOW('#', 0x0a, struct fw_cdev_start_iso)
+#define FW_CDEV_IOC_STOP_ISO            _IOW('#', 0x0b, struct fw_cdev_stop_iso)
 
-#define FW_CDEV_IOC_CREATE_ISO_CONTEXT	_IOWR('#', 0x08, struct fw_cdev_create_iso_context)
-#define FW_CDEV_IOC_QUEUE_ISO		_IOWR('#', 0x09, struct fw_cdev_queue_iso)
-#define FW_CDEV_IOC_START_ISO		_IOW('#', 0x0a, struct fw_cdev_start_iso)
-#define FW_CDEV_IOC_STOP_ISO		_IOW('#', 0x0b, struct fw_cdev_stop_iso)
-#define FW_CDEV_IOC_GET_CYCLE_TIMER	_IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
+/* available since kernel version 2.6.24 */
+#define FW_CDEV_IOC_GET_CYCLE_TIMER     _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
 
-/* FW_CDEV_VERSION History
- *
- * 1	Feb 18, 2007:  Initial version.
+/* available since kernel version 2.6.30 */
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE       _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE      _IOW('#', 0x0e, struct fw_cdev_deallocate)
+#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE   _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
+#define FW_CDEV_IOC_GET_SPEED                     _IO('#', 0x11) /* returns speed code */
+#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST       _IOW('#', 0x12, struct fw_cdev_send_request)
+#define FW_CDEV_IOC_SEND_STREAM_PACKET           _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
+
+/*
+ * FW_CDEV_VERSION History
+ *  1  (2.6.22)  - initial version
+ *  2  (2.6.30)  - changed &fw_cdev_event_iso_interrupt.header if
+ *                 &fw_cdev_create_iso_context.header_size is 8 or more
  */
-#define FW_CDEV_VERSION		1
+#define FW_CDEV_VERSION 2
 
 /**
  * struct fw_cdev_get_info - General purpose information ioctl
@@ -201,7 +266,7 @@
  *		case, @rom_length is updated with the actual length of the
  *		configuration ROM.
  * @rom:	If non-zero, address of a buffer to be filled by a copy of the
- *		local node's configuration ROM
+ *		device's configuration ROM
  * @bus_reset:	If non-zero, address of a buffer to be filled by a
  *		&struct fw_cdev_event_bus_reset with the current state
  *		of the bus.  This does not cause a bus reset to happen.
@@ -229,7 +294,7 @@
  * Send a request to the device.  This ioctl implements all outgoing requests.
  * Both quadlet and block request specify the payload as a pointer to the data
  * in the @data field.  Once the transaction completes, the kernel writes an
- * &fw_cdev_event_request event back.  The @closure field is passed back to
+ * &fw_cdev_event_response event back.  The @closure field is passed back to
  * user space in the response event.
  */
 struct fw_cdev_send_request {
@@ -284,9 +349,9 @@
 };
 
 /**
- * struct fw_cdev_deallocate - Free an address range allocation
- * @handle:	Handle to the address range, as returned by the kernel when the
- *		range was allocated
+ * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
+ * @handle:	Handle to the address range or iso resource, as returned by the
+ *		kernel when the range or resource was allocated
  */
 struct fw_cdev_deallocate {
 	__u32 handle;
@@ -329,6 +394,9 @@
  * If successful, the kernel adds the descriptor and writes back a handle to the
  * kernel-side object to be used for later removal of the descriptor block and
  * immediate key.
+ *
+ * This ioctl affects the configuration ROMs of all local nodes.
+ * The ioctl only succeeds on device files which represent a local node.
  */
 struct fw_cdev_add_descriptor {
 	__u32 immediate;
@@ -344,7 +412,7 @@
  *		descriptor was added
  *
  * Remove a descriptor block and accompanying immediate key from the local
- * node's configuration ROM.
+ * nodes' configuration ROMs.
  */
 struct fw_cdev_remove_descriptor {
 	__u32 handle;
@@ -370,6 +438,9 @@
  *
  * If a context was successfully created, the kernel writes back a handle to the
  * context, which must be passed in for subsequent operations on that context.
+ *
+ * Note that the effect of a @header_size > 4 depends on
+ * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
  */
 struct fw_cdev_create_iso_context {
 	__u32 type;
@@ -473,10 +544,91 @@
  * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
  * and also the system clock.  This allows to express the receive time of an
  * isochronous packet as a system time with microsecond accuracy.
+ *
+ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
+ * 12 bits cycleOffset, in host byte order.
  */
 struct fw_cdev_get_cycle_timer {
 	__u64 local_time;
 	__u32 cycle_timer;
 };
 
+/**
+ * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
+ * @closure:	Passed back to userspace in correponding iso resource events
+ * @channels:	Isochronous channels of which one is to be (de)allocated
+ * @bandwidth:	Isochronous bandwidth units to be (de)allocated
+ * @handle:	Handle to the allocation, written by the kernel (only valid in
+ *		case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
+ *
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
+ * isochronous channel and/or of isochronous bandwidth at the isochronous
+ * resource manager (IRM).  Only one of the channels specified in @channels is
+ * allocated.  An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
+ * communication with the IRM, indicating success or failure in the event data.
+ * The kernel will automatically reallocate the resources after bus resets.
+ * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
+ * will be sent.  The kernel will also automatically deallocate the resources
+ * when the file descriptor is closed.
+ *
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
+ * deallocation of resources which were allocated as described above.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
+ *
+ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
+ * without automatic re- or deallocation.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
+ * indicating success or failure in its data.
+ *
+ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
+ * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
+ * instead of allocated.
+ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
+ *
+ * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
+ * for the lifetime of the fd or handle.
+ * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
+ * for the duration of a bus generation.
+ *
+ * @channels is a host-endian bitfield with the least significant bit
+ * representing channel 0 and the most significant bit representing channel 63:
+ * 1ULL << c for each channel c that is a candidate for (de)allocation.
+ *
+ * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
+ * one quadlet of data (payload or header data) at speed S1600.
+ */
+struct fw_cdev_allocate_iso_resource {
+	__u64 closure;
+	__u64 channels;
+	__u32 bandwidth;
+	__u32 handle;
+};
+
+/**
+ * struct fw_cdev_send_stream_packet - send an asynchronous stream packet
+ * @length:	Length of outgoing payload, in bytes
+ * @tag:	Data format tag
+ * @channel:	Isochronous channel to transmit to
+ * @sy:		Synchronization code
+ * @closure:	Passed back to userspace in the response event
+ * @data:	Userspace pointer to payload
+ * @generation:	The bus generation where packet is valid
+ * @speed:	Speed to transmit at
+ *
+ * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
+ * to every device which is listening to the specified channel.  The kernel
+ * writes an &fw_cdev_event_response event which indicates success or failure of
+ * the transmission.
+ */
+struct fw_cdev_send_stream_packet {
+	__u32 length;
+	__u32 tag;
+	__u32 channel;
+	__u32 sy;
+	__u64 closure;
+	__u64 data;
+	__u32 generation;
+	__u32 speed;
+};
+
 #endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cd44f7..42436ae 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1064,34 +1064,147 @@
 extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
 extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
 #else /* !CONFIG_FILE_LOCKING */
-#define fcntl_getlk(a, b) ({ -EINVAL; })
-#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk(struct file *file, struct flock __user *user)
+{
+	return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+			      unsigned int cmd, struct flock __user *user)
+{
+	return -EACCES;
+}
+
 #if BITS_PER_LONG == 32
-#define fcntl_getlk64(a, b) ({ -EINVAL; })
-#define fcntl_setlk64(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
+{
+	return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+				unsigned int cmd, struct flock64 __user *user)
+{
+	return -EACCES;
+}
 #endif
-#define fcntl_setlease(a, b, c) ({ 0; })
-#define fcntl_getlease(a) ({ 0; })
-#define locks_init_lock(a) ({ })
-#define __locks_copy_lock(a, b) ({ })
-#define locks_copy_lock(a, b) ({ })
-#define locks_remove_posix(a, b) ({ })
-#define locks_remove_flock(a) ({ })
-#define posix_test_lock(a, b) ({ 0; })
-#define posix_lock_file(a, b, c) ({ -ENOLCK; })
-#define posix_lock_file_wait(a, b) ({ -ENOLCK; })
-#define posix_unblock_lock(a, b) (-ENOENT)
-#define vfs_test_lock(a, b) ({ 0; })
-#define vfs_lock_file(a, b, c, d) (-ENOLCK)
-#define vfs_cancel_lock(a, b) ({ 0; })
-#define flock_lock_file_wait(a, b) ({ -ENOLCK; })
-#define __break_lease(a, b) ({ 0; })
-#define lease_get_mtime(a, b) ({ })
-#define generic_setlease(a, b, c) ({ -EINVAL; })
-#define vfs_setlease(a, b, c) ({ -EINVAL; })
-#define lease_modify(a, b) ({ -EINVAL; })
-#define lock_may_read(a, b, c) ({ 1; })
-#define lock_may_write(a, b, c) ({ 1; })
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+	return 0;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+	return 0;
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+	return;
+}
+
+static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+	return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+	return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+	return;
+}
+
+static inline void locks_remove_flock(struct file *filp)
+{
+	return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+	return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+				  struct file_lock *conflock)
+{
+	return -ENOLCK;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+	return -ENOLCK;
+}
+
+static inline int posix_unblock_lock(struct file *filp,
+				     struct file_lock *waiter)
+{
+	return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+	return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+				struct file_lock *fl, struct file_lock *conf)
+{
+	return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+	return 0;
+}
+
+static inline int flock_lock_file_wait(struct file *filp,
+				       struct file_lock *request)
+{
+	return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode)
+{
+	return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+{
+	return;
+}
+
+static inline int generic_setlease(struct file *filp, long arg,
+				    struct file_lock **flp)
+{
+	return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, long arg,
+			       struct file_lock **lease)
+{
+	return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lock **before, int arg)
+{
+	return -EINVAL;
+}
+
+static inline int lock_may_read(struct inode *inode, loff_t start,
+				unsigned long len)
+{
+	return 1;
+}
+
+static inline int lock_may_write(struct inode *inode, loff_t start,
+				 unsigned long len)
+{
+	return 1;
+}
+
 #endif /* !CONFIG_FILE_LOCKING */
 
 
@@ -1607,7 +1720,7 @@
 extern int get_sb_pseudo(struct file_system_type *, char *,
 	const struct super_operations *ops, unsigned long,
 	struct vfsmount *mnt);
-extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
 int __put_super_and_need_restart(struct super_block *sb);
 
 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1688,13 +1801,44 @@
 	return 0;
 }
 #else /* !CONFIG_FILE_LOCKING */
-#define locks_mandatory_locked(a) ({ 0; })
-#define locks_mandatory_area(a, b, c, d, e) ({ 0; })
-#define __mandatory_lock(a) ({ 0; })
-#define mandatory_lock(a) ({ 0; })
-#define locks_verify_locked(a) ({ 0; })
-#define locks_verify_truncate(a, b, c) ({ 0; })
-#define break_lease(a, b) ({ 0; })
+static inline int locks_mandatory_locked(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int locks_mandatory_area(int rw, struct inode *inode,
+				       struct file *filp, loff_t offset,
+				       size_t count)
+{
+	return 0;
+}
+
+static inline int __mandatory_lock(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int mandatory_lock(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int locks_verify_locked(struct inode *inode)
+{
+	return 0;
+}
+
+static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
+					size_t size)
+{
+	return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+	return 0;
+}
+
 #endif /* CONFIG_FILE_LOCKING */
 
 /* fs/open.c */
@@ -1731,6 +1875,13 @@
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
 extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern void invalidate_bdev(struct block_device *);
+extern int sync_blockdev(struct block_device *bdev);
+extern struct super_block *freeze_bdev(struct block_device *);
+extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+extern int fsync_bdev(struct block_device *);
+extern int fsync_super(struct super_block *);
+extern int fsync_no_super(struct block_device *);
 #else
 static inline void bd_forget(struct inode *inode) {}
 #endif
@@ -1882,7 +2033,6 @@
 	if (file)
 		atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
 }
-extern int do_pipe(int *);
 extern int do_pipe_flags(int *, int);
 extern struct file *create_read_pipe(struct file *f, int flags);
 extern struct file *create_write_pipe(int flags);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index f69e66d..30b06c8 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,7 +204,7 @@
 /* linux/fs/ncpfs/dir.c */
 extern const struct inode_operations ncp_dir_inode_operations;
 extern const struct file_operations ncp_dir_operations;
-extern struct dentry_operations ncp_root_dentry_operations;
+extern const struct dentry_operations ncp_root_dentry_operations;
 int ncp_conn_logged_in(struct super_block *);
 int ncp_date_dos2unix(__le16 time, __le16 date);
 void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index db867b0..8cc8807 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -415,7 +415,7 @@
 extern const struct inode_operations nfs3_dir_inode_operations;
 #endif /* CONFIG_NFS_V3 */
 extern const struct file_operations nfs_dir_operations;
-extern struct dentry_operations nfs_dentry_operations;
+extern const struct dentry_operations nfs_dentry_operations;
 
 extern void nfs_force_lookup_revalidate(struct inode *dir);
 extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2e5f000..43a713f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -785,7 +785,7 @@
  */
 struct nfs_rpc_ops {
 	u32	version;		/* Protocol version */
-	struct dentry_operations *dentry_ops;
+	const struct dentry_operations *dentry_ops;
 	const struct inode_operations *dir_inode_ops;
 	const struct inode_operations *file_inode_ops;
 
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d72d5d8..78c4889 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -198,6 +198,7 @@
 	qsize_t dqb_bhardlimit;	/* absolute limit on disk blks alloc */
 	qsize_t dqb_bsoftlimit;	/* preferred limit on disk blks */
 	qsize_t dqb_curspace;	/* current used space */
+	qsize_t dqb_rsvspace;   /* current reserved space for delalloc*/
 	qsize_t dqb_ihardlimit;	/* absolute limit on allocated inodes */
 	qsize_t dqb_isoftlimit;	/* preferred inode limit */
 	qsize_t dqb_curinodes;	/* current # allocated inodes */
@@ -276,8 +277,6 @@
 	struct mem_dqblk dq_dqb;	/* Diskquota usage */
 };
 
-#define NODQUOT (struct dquot *)NULL
-
 #define QUOTA_OK          0
 #define NO_QUOTA          1
 
@@ -308,6 +307,14 @@
 	int (*release_dquot) (struct dquot *);		/* Quota is going to be deleted from disk */
 	int (*mark_dirty) (struct dquot *);		/* Dquot is marked dirty */
 	int (*write_info) (struct super_block *, int);	/* Write of quota "superblock" */
+	/* reserve quota for delayed block allocation */
+	int (*reserve_space) (struct inode *, qsize_t, int);
+	/* claim reserved quota for delayed alloc */
+	int (*claim_space) (struct inode *, qsize_t);
+	/* release rsved quota for delayed alloc */
+	void (*release_rsv) (struct inode *, qsize_t);
+	/* get reserved quota for delayed alloc */
+	qsize_t (*get_reserved_space) (struct inode *);
 };
 
 /* Operations handling requests from userspace */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 0b35b3a..36353d9 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -35,6 +35,11 @@
 int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
 int dquot_alloc_inode(const struct inode *inode, qsize_t number);
 
+int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
+int dquot_claim_space(struct inode *inode, qsize_t number);
+void dquot_release_reserved_space(struct inode *inode, qsize_t number);
+qsize_t dquot_get_reserved_space(struct inode *inode);
+
 int dquot_free_space(struct inode *inode, qsize_t number);
 int dquot_free_inode(const struct inode *inode, qsize_t number);
 
@@ -183,6 +188,16 @@
 	return ret;
 }
 
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+	if (sb_any_quota_active(inode->i_sb)) {
+		/* Used space is updated in alloc_space() */
+		if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
+			return 1;
+	}
+	return 0;
+}
+
 static inline int vfs_dq_alloc_inode(struct inode *inode)
 {
 	if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +208,31 @@
 	return 0;
 }
 
+/*
+ * Convert in-memory reserved quotas to real consumed quotas
+ */
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+	if (sb_any_quota_active(inode->i_sb)) {
+		if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
+			return 1;
+	} else
+		inode_add_bytes(inode, nr);
+
+	mark_inode_dirty(inode);
+	return 0;
+}
+
+/*
+ * Release reserved (in-memory) quotas
+ */
+static inline
+void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+	if (sb_any_quota_active(inode->i_sb))
+		inode->i_sb->dq_op->release_rsv(inode, nr);
+}
+
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
 	if (sb_any_quota_active(inode->i_sb))
@@ -339,6 +379,22 @@
 	return 0;
 }
 
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+	return 0;
+}
+
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+	return vfs_dq_alloc_space(inode, nr);
+}
+
+static inline
+int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+	return 0;
+}
+
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
 	inode_sub_bytes(inode, nr);
@@ -354,67 +410,48 @@
 
 static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
 {
-	return vfs_dq_prealloc_space_nodirty(inode,
-			nr << inode->i_sb->s_blocksize_bits);
+	return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
 {
-	return vfs_dq_prealloc_space(inode,
-			nr << inode->i_sb->s_blocksize_bits);
+	return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
 {
- 	return vfs_dq_alloc_space_nodirty(inode,
-			nr << inode->i_sb->s_blocksize_bits);
+	return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
 {
-	return vfs_dq_alloc_space(inode,
-			nr << inode->i_sb->s_blocksize_bits);
+	return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
+{
+	return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
+{
+	return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
+}
+
+static inline
+void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
+{
+	vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
 }
 
 static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
 {
-	vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
+	vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
 }
 
 static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
 {
-	vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
+	vfs_dq_free_space(inode, nr << inode->i_blkbits);
 }
 
-/*
- * Define uppercase equivalents for compatibility with old function names
- * Can go away when we think all users have been converted (15/04/2008)
- */
-#define DQUOT_INIT(inode) vfs_dq_init(inode)
-#define DQUOT_DROP(inode) vfs_dq_drop(inode)
-#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
-				vfs_dq_prealloc_space_nodirty(inode, nr)
-#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
-#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
-				vfs_dq_alloc_space_nodirty(inode, nr)
-#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
-				vfs_dq_prealloc_block_nodirty(inode, nr)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
-				vfs_dq_alloc_block_nodirty(inode, nr)
-#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
-#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
-#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
-				vfs_dq_free_space_nodirty(inode, nr)
-#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
-				vfs_dq_free_block_nodirty(inode, nr)
-#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
-#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
-#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
-#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
-#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
-#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
-
 #endif /* _LINUX_QUOTAOPS_ */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4..c500ca7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1071,7 +1071,8 @@
 		mutex_unlock(&cgroup_mutex);
 	}
 
-	return simple_set_mnt(mnt, sb);
+	simple_set_mnt(mnt, sb);
+	return 0;
 
  free_cg_links:
 	free_cg_links(&tmp_cg_links);
@@ -1627,7 +1628,7 @@
 static int cgroup_create_file(struct dentry *dentry, int mode,
 				struct super_block *sb)
 {
-	static struct dentry_operations cgroup_dops = {
+	static const struct dentry_operations cgroup_dops = {
 		.d_iput = cgroup_diput,
 	};
 
diff --git a/net/socket.c b/net/socket.c
index af0205f..0b14b79 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -328,7 +328,7 @@
 				dentry->d_inode->i_ino);
 }
 
-static struct dentry_operations sockfs_dentry_operations = {
+static const struct dentry_operations sockfs_dentry_operations = {
 	.d_delete = sockfs_delete_dentry,
 	.d_dname  = sockfs_dname,
 };
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 577385a..9ced062 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -480,7 +480,7 @@
 	return 1;
 }
 
-static struct dentry_operations rpc_dentry_operations = {
+static const struct dentry_operations rpc_dentry_operations = {
 	.d_delete = rpc_delete_dentry,
 };