Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6:
  [PATCH] get rid of __exit_files(), __exit_fs() and __put_fs_struct()
  [PATCH] proc_readfd_common() race fix
  [PATCH] double-free of inode on alloc_file() failure exit in create_write_pipe()
  [PATCH] teach seq_file to discard entries
  [PATCH] umount_tree() will unhash everything itself
  [PATCH] get rid of more nameidata passing in namespace.c
  [PATCH] switch a bunch of LSM hooks from nameidata to path
  [PATCH] lock exclusively in collect_mounts() and drop_collected_mounts()
  [PATCH] move a bunch of declarations to fs/internal.h
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed21737..cd13e13 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -266,17 +266,6 @@
 	depends on !IA64_HP_SIM
 	default y
 
-config IA64_SGI_SN_XP
-	tristate "Support communication between SGI SSIs"
-	depends on IA64_GENERIC || IA64_SGI_SN2
-	select IA64_UNCACHED_ALLOCATOR
-	help
-	  An SGI machine can be divided into multiple Single System
-	  Images which act independently of each other and have
-	  hardware based memory protection from the others.  Enabling
-	  this feature will allow for direct communication between SSIs
-	  based on a network adapter and DMA messaging.
-
 config FORCE_MAX_ZONEORDER
 	int "MAX_ORDER (11 - 17)"  if !HUGETLB_PAGE
 	range 11 17  if !HUGETLB_PAGE
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 90ef338..f065093 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -194,8 +194,8 @@
 			unw_init_running(kdump_cpu_freeze, NULL);
 		break;
 	case DIE_MCA_MONARCH_LEAVE:
-		/* die_register->signr indicate if MCA is recoverable */
-		if (kdump_on_fatal_mca && !args->signr) {
+		/* *(nd->data) indicate if MCA is recoverable */
+		if (kdump_on_fatal_mca && !(*(nd->data))) {
 			atomic_set(&kdump_in_progress, 1);
 			*(nd->monarch_cpu) = -1;
 			machine_kdump_on_init();
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index b0be4a2..e49ad8c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -570,6 +570,7 @@
 	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
 .ret3:
 (pUStk)	cmp.eq.unc p6,p0=r0,r0			// p6 <- pUStk
+(pUStk)	rsm psr.i				// disable interrupts
 	br.cond.sptk .work_pending_syscall_end
 
 strace_error:
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index e51bced..705176b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -109,6 +109,20 @@
 # define IA64_MCA_DEBUG(fmt...)
 #endif
 
+#define NOTIFY_INIT(event, regs, arg, spin)				\
+do {									\
+	if ((notify_die((event), "INIT", (regs), (arg), 0, 0)		\
+			== NOTIFY_STOP) && ((spin) == 1))		\
+		ia64_mca_spin(__func__);				\
+} while (0)
+
+#define NOTIFY_MCA(event, regs, arg, spin)				\
+do {									\
+	if ((notify_die((event), "MCA", (regs), (arg), 0, 0)		\
+			== NOTIFY_STOP) && ((spin) == 1))		\
+		ia64_mca_spin(__func__);				\
+} while (0)
+
 /* Used by mca_asm.S */
 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -766,9 +780,8 @@
 
 	/* Mask all interrupts */
 	local_irq_save(flags);
-	if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
-		       (long)&nd, 0, 0) == NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+
+	NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
 
 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
 	/* Register with the SAL monarch that the slave has
@@ -776,17 +789,13 @@
 	 */
 	ia64_sal_mc_rendez();
 
-	if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
-		       (long)&nd, 0, 0) == NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
 
 	/* Wait for the monarch cpu to exit. */
 	while (monarch_cpu != -1)
 	       cpu_relax();	/* spin until monarch leaves */
 
-	if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
-		       (long)&nd, 0, 0) == NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
 
 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 	/* Enable all interrupts */
@@ -1256,7 +1265,7 @@
 	int recover, cpu = smp_processor_id();
 	struct task_struct *previous_current;
 	struct ia64_mca_notify_die nd =
-		{ .sos = sos, .monarch_cpu = &monarch_cpu };
+		{ .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
 	static atomic_t mca_count;
 	static cpumask_t mca_cpu;
 
@@ -1272,9 +1281,7 @@
 
 	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
 
-	if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
-			== NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
 
 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
 	if (sos->monarch) {
@@ -1288,13 +1295,12 @@
 		 * does not work.
 		 */
 		ia64_mca_wakeup_all();
-		if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
-				== NOTIFY_STOP)
-			ia64_mca_spin(__func__);
 	} else {
 		while (cpu_isset(cpu, mca_cpu))
 			cpu_relax();	/* spin until monarch wakes us */
-        }
+	}
+
+	NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
 
 	/* Get the MCA error record and log it */
 	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -1320,9 +1326,7 @@
 		mca_insert_tr(0x2); /*Reload dynamic itrs*/
 	}
 
-	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
-			== NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
 
 	if (atomic_dec_return(&mca_count) > 0) {
 		int i;
@@ -1643,7 +1647,7 @@
 	struct ia64_mca_notify_die nd =
 		{ .sos = sos, .monarch_cpu = &monarch_cpu };
 
-	(void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
+	NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
 
 	mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
 		sos->proc_state_param, cpu, sos->monarch);
@@ -1680,17 +1684,15 @@
 		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
 		while (monarch_cpu == -1)
 		       cpu_relax();	/* spin until monarch enters */
-		if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
-				== NOTIFY_STOP)
-			ia64_mca_spin(__func__);
-		if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
-				== NOTIFY_STOP)
-			ia64_mca_spin(__func__);
+
+		NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
+		NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
+
 		while (monarch_cpu != -1)
 		       cpu_relax();	/* spin until monarch leaves */
-		if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
-				== NOTIFY_STOP)
-			ia64_mca_spin(__func__);
+
+		NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
+
 		mprintk("Slave on cpu %d returning to normal service.\n", cpu);
 		set_curr_task(cpu, previous_current);
 		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1699,9 +1701,7 @@
 	}
 
 	monarch_cpu = cpu;
-	if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
-			== NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
 
 	/*
 	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1716,12 +1716,9 @@
 	 * to default_monarch_init_process() above and just print all the
 	 * tasks.
 	 */
-	if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
-			== NOTIFY_STOP)
-		ia64_mca_spin(__func__);
-	if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
-			== NOTIFY_STOP)
-		ia64_mca_spin(__func__);
+	NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
+	NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
+
 	mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
 	atomic_dec(&monarchs);
 	set_curr_task(cpu, previous_current);
@@ -1953,7 +1950,7 @@
 			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
 				"%ld to %ld milliseconds\n", timeout, isrv.v0);
 			timeout = isrv.v0;
-			(void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
+			NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
 			continue;
 		}
 		printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d1d24f4..c8e4037 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5511,7 +5511,7 @@
 }
 
 static int
-pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
+pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
 {
 	struct task_struct *task;
 	pfm_context_t *ctx;
@@ -5591,7 +5591,7 @@
 
 		start_cycles = ia64_get_itc();
 
-		ret = pfm_do_interrupt_handler(irq, arg, regs);
+		ret = pfm_do_interrupt_handler(arg, regs);
 
 		total_cycles = ia64_get_itc();
 
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 688a3c27..0591038 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,7 +4,7 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
-# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc.  All Rights Reserved.
 #
 
 EXTRA_CFLAGS += -Iarch/ia64/sn/include
@@ -15,9 +15,4 @@
 				   sn2/
 obj-$(CONFIG_IA64_GENERIC)      += machvec.o
 obj-$(CONFIG_SGI_TIOCX)		+= tiocx.o
-obj-$(CONFIG_IA64_SGI_SN_XP)	+= xp.o
-xp-y				:= xp_main.o xp_nofault.o
-obj-$(CONFIG_IA64_SGI_SN_XP)	+= xpc.o
-xpc-y				:= xpc_main.o xpc_channel.o xpc_partition.o
-obj-$(CONFIG_IA64_SGI_SN_XP)	+= xpnet.o
 obj-$(CONFIG_PCI_MSI)		+= msi_sn.o
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 0101c79..08b0d9b 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -187,8 +187,8 @@
 {
 
 	if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
-			"SN_hub_error", (void *)hubdev_info)) {
-		printk("hub_error_init: Failed to request_irq for 0x%p\n",
+			"SN_hub_error", hubdev_info)) {
+		printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
 		    hubdev_info);
 		return;
 	}
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 9b3c113..94e5845 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -655,7 +655,8 @@
  *
  * Simply call tioce_do_dma_map() to create a map with the barrier bit set
  * in the address.
- */ static u64
+ */
+static u64
 tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
 {
 	return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
@@ -668,7 +669,8 @@
  *
  * Handle a CE error interrupt.  Simply a wrapper around a SAL call which
  * defers processing to the SGI prom.
- */ static irqreturn_t
+ */
+static irqreturn_t
 tioce_error_intr_handler(int irq, void *arg)
 {
 	struct tioce_common *soft = arg;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index be1cc51..ef522ae 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -530,7 +530,8 @@
 		sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
 				  dev->bus_id);
 		device_remove_attrs(dev->bus, dev);
-		klist_del(&dev->knode_bus);
+		if (klist_node_attached(&dev->knode_bus))
+			klist_del(&dev->knode_bus);
 
 		pr_debug("bus: '%s': remove device %s\n",
 			 dev->bus->name, dev->bus_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c4568b8..7b76fd3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -62,7 +62,7 @@
  */
 int device_pm_add(struct device *dev)
 {
-	int error = 0;
+	int error;
 
 	pr_debug("PM: Adding info for %s:%s\n",
 		 dev->bus ? dev->bus->name : "No Bus",
@@ -70,18 +70,15 @@
 	mutex_lock(&dpm_list_mtx);
 	if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) {
 		if (dev->parent->power.sleeping)
-			dev_warn(dev,
-				"parent %s is sleeping, will not add\n",
+			dev_warn(dev, "parent %s is sleeping\n",
 				dev->parent->bus_id);
 		else
-			dev_warn(dev, "devices are sleeping, will not add\n");
+			dev_warn(dev, "all devices are sleeping\n");
 		WARN_ON(true);
-		error = -EBUSY;
-	} else {
-		error = dpm_sysfs_add(dev);
-		if (!error)
-			list_add_tail(&dev->power.entry, &dpm_active);
 	}
+	error = dpm_sysfs_add(dev);
+	if (!error)
+		list_add_tail(&dev->power.entry, &dpm_active);
 	mutex_unlock(&dpm_list_mtx);
 	return error;
 }
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 8536480..7bd7663 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -108,7 +108,7 @@
 #ifndef CONFIG_BLK_DEV_XIP
 	gfp_flags |= __GFP_HIGHMEM;
 #endif
-	page = alloc_page(GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO);
+	page = alloc_page(gfp_flags);
 	if (!page)
 		return NULL;
 
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f0b00ec..e03c67d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -44,8 +44,8 @@
 
 #ifdef CONFIG_HID_DEBUG
 int hid_debug = 0;
-module_param_named(debug, hid_debug, bool, 0600);
-MODULE_PARM_DESC(debug, "Turn HID debugging mode on and off");
+module_param_named(debug, hid_debug, int, 0600);
+MODULE_PARM_DESC(debug, "HID debugging (0=off, 1=probing info, 2=continuous data dumping)");
 EXPORT_SYMBOL_GPL(hid_debug);
 #endif
 
@@ -97,7 +97,7 @@
 	field->index = report->maxfield++;
 	report->field[field->index] = field;
 	field->usage = (struct hid_usage *)(field + 1);
-	field->value = (unsigned *)(field->usage + usages);
+	field->value = (s32 *)(field->usage + usages);
 	field->report = report;
 
 	return field;
@@ -830,7 +830,8 @@
  * reporting to the layer).
  */
 
-void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt)
+static void hid_input_field(struct hid_device *hid, struct hid_field *field,
+			    __u8 *data, int interrupt)
 {
 	unsigned n;
 	unsigned count = field->report_count;
@@ -876,7 +877,6 @@
 exit:
 	kfree(value);
 }
-EXPORT_SYMBOL_GPL(hid_input_field);
 
 /*
  * Output the field into the report.
@@ -988,8 +988,13 @@
 
 	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
 		hid->hiddev_report_event(hid, report);
-	if (hid->claimed & HID_CLAIMED_HIDRAW)
-		hidraw_report_event(hid, data, size);
+	if (hid->claimed & HID_CLAIMED_HIDRAW) {
+		/* numbered reports need to be passed with the report num */
+		if (report_enum->numbered)
+			hidraw_report_event(hid, data - 1, size + 1);
+		else
+			hidraw_report_event(hid, data, size);
+	}
 
 	for (n = 0; n < report->maxfield; n++)
 		hid_input_field(hid, report->field[n], data, interrupt);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5c24fe4..f88714b 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -498,7 +498,7 @@
 EXPORT_SYMBOL_GPL(hid_dump_device);
 
 void hid_dump_input(struct hid_usage *usage, __s32 value) {
-	if (!hid_debug)
+	if (hid_debug < 2)
 		return;
 
 	printk(KERN_DEBUG "hid-debug: input ");
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index dceadd0..4c2052c 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -276,6 +276,21 @@
 	return 1;
 }
 
+static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *input,
+			      unsigned long **bit, int *max)
+{
+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+		return 0;
+
+	switch (usage->hid & HID_USAGE) {
+		case 0x2003: map_key_clear(KEY_ZOOMIN);		break;
+		case 0x2103: map_key_clear(KEY_ZOOMOUT);	break;
+		default:
+			return 0;
+	}
+	return 1;
+}
+
 #define VENDOR_ID_BELKIN			0x1020
 #define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD	0x0006
 
@@ -306,6 +321,9 @@
 #define VENDOR_ID_PETALYNX			0x18b1
 #define DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
 
+#define VENDOR_ID_SUNPLUS			0x04fc
+#define DEVICE_ID_SUNPLUS_WDESKTOP		0x05d8
+
 static const struct hid_input_blacklist {
 	__u16 idVendor;
 	__u16 idProduct;
@@ -332,8 +350,10 @@
 	{ VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e },
 
 	{ VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote },
-	
-	{ 0, 0, 0 }
+
+	{ VENDOR_ID_SUNPLUS, DEVICE_ID_SUNPLUS_WDESKTOP, quirk_sunplus_wdesktop },
+
+	{ 0, 0, NULL }
 };
 
 int hidinput_mapping_quirks(struct hid_usage *usage, 
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 7160fa6..18f0910 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -71,6 +71,14 @@
 	  Note: if you say N here, this device will still be supported, but without
 	  force feedback.
 
+config LOGIRUMBLEPAD2_FF
+	bool "Logitech Rumblepad 2 support"
+	depends on HID_FF
+	select INPUT_FF_MEMLESS if USB_HID
+	help
+	  Say Y here if you want to enable force feedback support for Logitech
+	  Rumblepad 2 devices.
+
 config PANTHERLORD_FF
 	bool "PantherLord/GreenAsia based device support"
 	depends on HID_FF
@@ -80,8 +88,8 @@
 	  or adapter and want to enable force feedback support for it.
 
 config THRUSTMASTER_FF
-	bool "ThrustMaster devices support (EXPERIMENTAL)"
-	depends on HID_FF && EXPERIMENTAL
+	bool "ThrustMaster devices support"
+	depends on HID_FF
 	select INPUT_FF_MEMLESS if USB_HID
 	help
 	  Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 8e6ab5b..00a7b70 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -16,6 +16,9 @@
 ifeq ($(CONFIG_LOGITECH_FF),y)
 	usbhid-objs	+= hid-lgff.o
 endif
+ifeq ($(CONFIG_LOGIRUMBLEPAD2_FF),y)
+	usbhid-objs	+= hid-lg2ff.o
+endif
 ifeq ($(CONFIG_PANTHERLORD_FF),y)
 	usbhid-objs	+= hid-plff.o
 endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index d95979f..e0d805f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -82,6 +82,7 @@
 
 	spin_lock_irqsave(&usbhid->inlock, flags);
 	if (hid->open > 0 && !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
+			!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
 			!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
 		rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC);
 		if (rc != 0)
@@ -155,7 +156,7 @@
 	spin_lock_irqsave(&usbhid->inlock, flags);
 
 	/* Stop when disconnected */
-	if (usb_get_intfdata(usbhid->intf) == NULL)
+	if (test_bit(HID_DISCONNECTED, &usbhid->iofl))
 		goto done;
 
 	/* If it has been a while since the last error, we'll assume
@@ -341,7 +342,7 @@
 	if (usbhid->outhead != usbhid->outtail) {
 		if (hid_submit_out(hid)) {
 			clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
-			wake_up(&hid->wait);
+			wake_up(&usbhid->wait);
 		}
 		spin_unlock_irqrestore(&usbhid->outlock, flags);
 		return;
@@ -349,7 +350,7 @@
 
 	clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
 	spin_unlock_irqrestore(&usbhid->outlock, flags);
-	wake_up(&hid->wait);
+	wake_up(&usbhid->wait);
 }
 
 /*
@@ -391,7 +392,7 @@
 	if (usbhid->ctrlhead != usbhid->ctrltail) {
 		if (hid_submit_ctrl(hid)) {
 			clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
-			wake_up(&hid->wait);
+			wake_up(&usbhid->wait);
 		}
 		spin_unlock_irqrestore(&usbhid->ctrllock, flags);
 		return;
@@ -399,7 +400,7 @@
 
 	clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
 	spin_unlock_irqrestore(&usbhid->ctrllock, flags);
-	wake_up(&hid->wait);
+	wake_up(&usbhid->wait);
 }
 
 void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir)
@@ -478,8 +479,9 @@
 {
 	struct usbhid_device *usbhid = hid->driver_data;
 
-	if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) &&
-					!test_bit(HID_OUT_RUNNING, &usbhid->iofl)),
+	if (!wait_event_timeout(usbhid->wait,
+				(!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) &&
+				!test_bit(HID_OUT_RUNNING, &usbhid->iofl)),
 					10*HZ)) {
 		dbg_hid("timeout waiting for ctrl or out queue to clear\n");
 		return -1;
@@ -610,10 +612,11 @@
 /*
  * Traverse the supplied list of reports and find the longest
  */
-static void hid_find_max_report(struct hid_device *hid, unsigned int type, int *max)
+static void hid_find_max_report(struct hid_device *hid, unsigned int type,
+		unsigned int *max)
 {
 	struct hid_report *report;
-	int size;
+	unsigned int size;
 
 	list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
 		size = ((report->size - 1) >> 3) + 1;
@@ -705,9 +708,9 @@
 	struct hid_descriptor *hdesc;
 	struct hid_device *hid;
 	u32 quirks = 0;
-	unsigned rsize = 0;
+	unsigned int insize = 0, rsize = 0;
 	char *rdesc;
-	int n, len, insize = 0;
+	int n, len;
 	struct usbhid_device *usbhid;
 
 	quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
@@ -800,6 +803,22 @@
 		goto fail;
 	}
 
+	hid->name[0] = 0;
+
+	if (dev->manufacturer)
+		strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
+
+	if (dev->product) {
+		if (dev->manufacturer)
+			strlcat(hid->name, " ", sizeof(hid->name));
+		strlcat(hid->name, dev->product, sizeof(hid->name));
+	}
+
+	if (!strlen(hid->name))
+		snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x",
+			 le16_to_cpu(dev->descriptor.idVendor),
+			 le16_to_cpu(dev->descriptor.idProduct));
+
 	for (n = 0; n < interface->desc.bNumEndpoints; n++) {
 
 		struct usb_endpoint_descriptor *endpoint;
@@ -812,6 +831,14 @@
 
 		interval = endpoint->bInterval;
 
+		/* Some vendors give fullspeed interval on highspeed devides */
+		if (quirks & HID_QUIRK_FULLSPEED_INTERVAL  &&
+		    dev->speed == USB_SPEED_HIGH) {
+			interval = fls(endpoint->bInterval*8);
+			printk(KERN_INFO "%s: Fixing fullspeed to highspeed interval: %d -> %d\n",
+			       hid->name, endpoint->bInterval, interval);
+		}
+
 		/* Change the polling interval of mice. */
 		if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
 			interval = hid_mousepoll_interval;
@@ -844,8 +871,7 @@
 		goto fail;
 	}
 
-	init_waitqueue_head(&hid->wait);
-
+	init_waitqueue_head(&usbhid->wait);
 	INIT_WORK(&usbhid->reset_work, hid_reset);
 	setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
 
@@ -859,22 +885,6 @@
 	usbhid->intf = intf;
 	usbhid->ifnum = interface->desc.bInterfaceNumber;
 
-	hid->name[0] = 0;
-
-	if (dev->manufacturer)
-		strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
-
-	if (dev->product) {
-		if (dev->manufacturer)
-			strlcat(hid->name, " ", sizeof(hid->name));
-		strlcat(hid->name, dev->product, sizeof(hid->name));
-	}
-
-	if (!strlen(hid->name))
-		snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x",
-			 le16_to_cpu(dev->descriptor.idVendor),
-			 le16_to_cpu(dev->descriptor.idProduct));
-
 	hid->bus = BUS_USB;
 	hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
 	hid->product = le16_to_cpu(dev->descriptor.idProduct);
@@ -932,6 +942,7 @@
 
 	spin_lock_irq(&usbhid->inlock);	/* Sync with error handler */
 	usb_set_intfdata(intf, NULL);
+	set_bit(HID_DISCONNECTED, &usbhid->iofl);
 	spin_unlock_irq(&usbhid->inlock);
 	usb_kill_urb(usbhid->urbin);
 	usb_kill_urb(usbhid->urbout);
diff --git a/drivers/hid/usbhid/hid-ff.c b/drivers/hid/usbhid/hid-ff.c
index 4c210e1..1d0dac5 100644
--- a/drivers/hid/usbhid/hid-ff.c
+++ b/drivers/hid/usbhid/hid-ff.c
@@ -59,6 +59,9 @@
 	{ 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
 	{ 0x46d, 0xca03, hid_lgff_init }, /* Logitech MOMO force wheel */
 #endif
+#ifdef CONFIG_LOGIRUMBLEPAD2_FF
+	{ 0x46d, 0xc218, hid_lg2ff_init }, /* Logitech Rumblepad 2 */
+#endif
 #ifdef CONFIG_PANTHERLORD_FF
 	{ 0x810, 0x0001, hid_plff_init }, /* "Twin USB Joystick" */
 	{ 0xe8f, 0x0003, hid_plff_init }, /* "GreenAsia Inc.    USB Joystick     " */
diff --git a/drivers/hid/usbhid/hid-lg2ff.c b/drivers/hid/usbhid/hid-lg2ff.c
new file mode 100644
index 0000000..d469bd0
--- /dev/null
+++ b/drivers/hid/usbhid/hid-lg2ff.c
@@ -0,0 +1,114 @@
+/*
+ *  Force feedback support for Logitech Rumblepad 2
+ *
+ *  Copyright (c) 2008 Anssi Hannula <anssi.hannula@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include "usbhid.h"
+
+struct lg2ff_device {
+	struct hid_report *report;
+};
+
+static int play_effect(struct input_dev *dev, void *data,
+			 struct ff_effect *effect)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct lg2ff_device *lg2ff = data;
+	int weak, strong;
+
+	strong = effect->u.rumble.strong_magnitude;
+	weak = effect->u.rumble.weak_magnitude;
+
+	if (weak || strong) {
+		weak = weak * 0xff / 0xffff;
+		strong = strong * 0xff / 0xffff;
+
+		lg2ff->report->field[0]->value[0] = 0x51;
+		lg2ff->report->field[0]->value[2] = weak;
+		lg2ff->report->field[0]->value[4] = strong;
+	} else {
+		lg2ff->report->field[0]->value[0] = 0xf3;
+		lg2ff->report->field[0]->value[2] = 0x00;
+		lg2ff->report->field[0]->value[4] = 0x00;
+	}
+
+	usbhid_submit_report(hid, lg2ff->report, USB_DIR_OUT);
+	return 0;
+}
+
+int hid_lg2ff_init(struct hid_device *hid)
+{
+	struct lg2ff_device *lg2ff;
+	struct hid_report *report;
+	struct hid_input *hidinput = list_entry(hid->inputs.next,
+						struct hid_input, list);
+	struct list_head *report_list =
+			&hid->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct input_dev *dev = hidinput->input;
+	int error;
+
+	if (list_empty(report_list)) {
+		printk(KERN_ERR "hid-lg2ff: no output report found\n");
+		return -ENODEV;
+	}
+
+	report = list_entry(report_list->next, struct hid_report, list);
+
+	if (report->maxfield < 1) {
+		printk(KERN_ERR "hid-lg2ff: output report is empty\n");
+		return -ENODEV;
+	}
+	if (report->field[0]->report_count < 7) {
+		printk(KERN_ERR "hid-lg2ff: not enough values in the field\n");
+		return -ENODEV;
+	}
+
+	lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
+	if (!lg2ff)
+		return -ENOMEM;
+
+	set_bit(FF_RUMBLE, dev->ffbit);
+
+	error = input_ff_create_memless(dev, lg2ff, play_effect);
+	if (error) {
+		kfree(lg2ff);
+		return error;
+	}
+
+	lg2ff->report = report;
+	report->field[0]->value[0] = 0xf3;
+	report->field[0]->value[1] = 0x00;
+	report->field[0]->value[2] = 0x00;
+	report->field[0]->value[3] = 0x00;
+	report->field[0]->value[4] = 0x00;
+	report->field[0]->value[5] = 0x00;
+	report->field[0]->value[6] = 0x00;
+
+	usbhid_submit_report(hid, report, USB_DIR_OUT);
+
+	printk(KERN_INFO "Force feedback for Logitech Rumblepad 2 by "
+	       "Anssi Hannula <anssi.hannula@gmail.com>\n");
+
+	return 0;
+}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e29a057..28ddc3f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -32,6 +32,9 @@
 #define USB_VENDOR_ID_ADS_TECH 		0x06e1
 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X	0xa155
 
+#define USB_VENDOR_ID_AFATECH		0x15a4
+#define USB_DEVICE_ID_AFATECH_AF9016	0x9016
+
 #define USB_VENDOR_ID_AIPTEK		0x08ca
 #define USB_DEVICE_ID_AIPTEK_01		0x0001
 #define USB_DEVICE_ID_AIPTEK_10		0x0010
@@ -124,6 +127,9 @@
 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
 #define USB_DEVICE_ID_DELORME_EM_LT20	0x0200
 
+#define USB_VENDOR_ID_DMI		0x0c0b
+#define USB_DEVICE_ID_DMI_ENC		0x5fab
+
 #define USB_VENDOR_ID_ELO		0x04E7
 #define USB_DEVICE_ID_ELO_TS2700	0x0020
 
@@ -199,17 +205,6 @@
 #define USB_DEVICE_ID_GTCO_502		0x0502
 #define USB_DEVICE_ID_GTCO_503		0x0503
 #define USB_DEVICE_ID_GTCO_504		0x0504
-#define USB_DEVICE_ID_GTCO_600		0x0600
-#define USB_DEVICE_ID_GTCO_601		0x0601
-#define USB_DEVICE_ID_GTCO_602		0x0602
-#define USB_DEVICE_ID_GTCO_603		0x0603
-#define USB_DEVICE_ID_GTCO_604		0x0604
-#define USB_DEVICE_ID_GTCO_605		0x0605
-#define USB_DEVICE_ID_GTCO_606		0x0606
-#define USB_DEVICE_ID_GTCO_607		0x0607
-#define USB_DEVICE_ID_GTCO_608		0x0608
-#define USB_DEVICE_ID_GTCO_609		0x0609
-#define USB_DEVICE_ID_GTCO_609		0x0609
 #define USB_DEVICE_ID_GTCO_1000		0x1000
 #define USB_DEVICE_ID_GTCO_1001		0x1001
 #define USB_DEVICE_ID_GTCO_1002		0x1002
@@ -320,6 +315,7 @@
 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500	0xc512
 #define USB_DEVICE_ID_MX3000_RECEIVER	0xc513
 #define USB_DEVICE_ID_DINOVO_EDGE	0xc714
+#define USB_DEVICE_ID_DINOVO_MINI	0xc71f
 
 #define USB_VENDOR_ID_MCC		0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS	0x0076
@@ -332,6 +328,7 @@
 #define USB_VENDOR_ID_MICROSOFT		0x045e
 #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
 #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_DESKTOP_RECV_1028 0x00f9
 #define USB_DEVICE_ID_MS_NE4K		0x00db
 #define USB_DEVICE_ID_MS_LK6K		0x00f9
 
@@ -377,6 +374,9 @@
 #define USB_VENDOR_ID_SUN		0x0430
 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE	0xcdab
 
+#define USB_VENDOR_ID_SUNPLUS		0x04fc
+#define USB_DEVICE_ID_SUNPLUS_WDESKTOP	0x05d8
+
 #define USB_VENDOR_ID_TOPMAX		0x0663
 #define USB_DEVICE_ID_TOPMAX_COBRAPAD	0x0103
 
@@ -435,9 +435,13 @@
 	{ USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
 	
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
+
+	{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
 	{ USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV },
 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
+	{ USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
 
 	{ USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT },
@@ -518,16 +522,6 @@
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_600, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_601, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_602, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_603, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_604, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_605, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_606, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_607, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_608, HID_QUIRK_IGNORE },
-	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_609, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE },
@@ -601,6 +595,7 @@
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
@@ -608,7 +603,7 @@
 	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
-	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
 	{ USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -719,6 +714,7 @@
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_DESKTOP_RECV_1028, HID_QUIRK_RDESC_MICROSOFT_RECV_1028 },
 
 	{ USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER },
 
@@ -728,6 +724,8 @@
 
 	{ USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE },
 
+	{ USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP, HID_QUIRK_RDESC_SUNPLUS_WDESKTOP },
+
 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
 	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
 
@@ -793,8 +791,8 @@
  *
  * Returns: 0 OK, -error on failure.
  */
-int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
-		const u32 quirks)
+static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
+				const u32 quirks)
 {
 	struct quirks_list_struct *q_new, *q;
 	int list_edited = 0;
@@ -1002,6 +1000,17 @@
 	}
 }
 
+static void usbhid_fixup_sunplus_wdesktop(unsigned char *rdesc, int rsize)
+{
+	if (rsize >= 107 && rdesc[104] == 0x26
+			 && rdesc[105] == 0x80
+			 && rdesc[106] == 0x03) {
+		printk(KERN_INFO "Fixing up Sunplus Wireless Desktop report descriptor\n");
+		rdesc[105] = rdesc[110] = 0x03;
+		rdesc[106] = rdesc[111] = 0x21;
+	}
+}
+
 /*
  * Samsung IrDA remote controller (reports as Cypress USB Mouse).
  *
@@ -1089,6 +1098,28 @@
 	}
 }
 
+/*
+ * Microsoft Wireless Desktop Receiver (Model 1028) has several
+ * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+ */
+static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
+{
+	if (rsize == 571 && rdesc[284] == 0x19
+	                 && rdesc[286] == 0x2a
+	                 && rdesc[304] == 0x19
+	                 && rdesc[306] == 0x29
+	                 && rdesc[352] == 0x1a
+	                 && rdesc[355] == 0x2a
+			 && rdesc[557] == 0x19
+			 && rdesc[559] == 0x29) {
+		printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
+		rdesc[284] = rdesc[304] = rdesc[558] = 0x35;
+		rdesc[352] = 0x36;
+		rdesc[286] = rdesc[355] = 0x46;
+		rdesc[306] = rdesc[559] = 0x45;
+	}
+}
+
 static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
 {
 	if ((quirks & HID_QUIRK_RDESC_CYMOTION))
@@ -1112,6 +1143,11 @@
 	if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE)
 		usbhid_fixup_samsung_irda_descriptor(rdesc, rsize);
 
+	if (quirks & HID_QUIRK_RDESC_MICROSOFT_RECV_1028)
+		usbhid_fixup_microsoft_descriptor(rdesc, rsize);
+
+	if (quirks & HID_QUIRK_RDESC_SUNPLUS_WDESKTOP)
+		usbhid_fixup_sunplus_wdesktop(rdesc, rsize);
 }
 
 /**
@@ -1150,5 +1186,4 @@
 		else if (paramVendor == idVendor && paramProduct == idProduct)
 			__usbhid_fixup_report_descriptor(quirks, rdesc, rsize);
 	}
-
 }
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 5fc4019..95cc192 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -393,6 +393,153 @@
 /*
  * "ioctl" file op
  */
+static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg)
+{
+	struct hid_device *hid = hiddev->hid;
+	struct hiddev_report_info rinfo;
+	struct hiddev_usage_ref_multi *uref_multi = NULL;
+	struct hiddev_usage_ref *uref;
+	struct hid_report *report;
+	struct hid_field *field;
+	int i;
+
+	uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
+	if (!uref_multi)
+		return -ENOMEM;
+	uref = &uref_multi->uref;
+	if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+		if (copy_from_user(uref_multi, user_arg,
+				   sizeof(*uref_multi)))
+			goto fault;
+	} else {
+		if (copy_from_user(uref, user_arg, sizeof(*uref)))
+			goto fault;
+	}
+
+	switch (cmd) {
+	case HIDIOCGUCODE:
+		rinfo.report_type = uref->report_type;
+		rinfo.report_id = uref->report_id;
+		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
+			goto inval;
+
+		if (uref->field_index >= report->maxfield)
+			goto inval;
+
+		field = report->field[uref->field_index];
+		if (uref->usage_index >= field->maxusage)
+			goto inval;
+
+		uref->usage_code = field->usage[uref->usage_index].hid;
+
+		if (copy_to_user(user_arg, uref, sizeof(*uref)))
+			goto fault;
+
+		kfree(uref_multi);
+		return 0;
+
+	default:
+		if (cmd != HIDIOCGUSAGE &&
+		    cmd != HIDIOCGUSAGES &&
+		    uref->report_type == HID_REPORT_TYPE_INPUT)
+			goto inval;
+
+		if (uref->report_id == HID_REPORT_ID_UNKNOWN) {
+			field = hiddev_lookup_usage(hid, uref);
+			if (field == NULL)
+				goto inval;
+		} else {
+			rinfo.report_type = uref->report_type;
+			rinfo.report_id = uref->report_id;
+			if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
+				goto inval;
+
+			if (uref->field_index >= report->maxfield)
+				goto inval;
+
+			field = report->field[uref->field_index];
+
+			if (cmd == HIDIOCGCOLLECTIONINDEX) {
+				if (uref->usage_index >= field->maxusage)
+					goto inval;
+			} else if (uref->usage_index >= field->report_count)
+				goto inval;
+
+			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+				  uref->usage_index + uref_multi->num_values > field->report_count))
+				goto inval;
+			}
+
+		switch (cmd) {
+		case HIDIOCGUSAGE:
+			uref->value = field->value[uref->usage_index];
+			if (copy_to_user(user_arg, uref, sizeof(*uref)))
+				goto fault;
+			goto goodreturn;
+
+		case HIDIOCSUSAGE:
+			field->value[uref->usage_index] = uref->value;
+			goto goodreturn;
+
+		case HIDIOCGCOLLECTIONINDEX:
+			kfree(uref_multi);
+			return field->usage[uref->usage_index].collection_index;
+		case HIDIOCGUSAGES:
+			for (i = 0; i < uref_multi->num_values; i++)
+				uref_multi->values[i] =
+				    field->value[uref->usage_index + i];
+			if (copy_to_user(user_arg, uref_multi,
+					 sizeof(*uref_multi)))
+				goto fault;
+			goto goodreturn;
+		case HIDIOCSUSAGES:
+			for (i = 0; i < uref_multi->num_values; i++)
+				field->value[uref->usage_index + i] =
+				    uref_multi->values[i];
+			goto goodreturn;
+		}
+
+goodreturn:
+		kfree(uref_multi);
+		return 0;
+fault:
+		kfree(uref_multi);
+		return -EFAULT;
+inval:
+		kfree(uref_multi);
+		return -EINVAL;
+	}
+}
+
+static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg)
+{
+	struct hid_device *hid = hiddev->hid;
+	struct usb_device *dev = hid_to_usb_dev(hid);
+	int idx, len;
+	char *buf;
+
+	if (get_user(idx, (int __user *)user_arg))
+		return -EFAULT;
+
+	if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL)
+		return -ENOMEM;
+
+	if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) {
+		kfree(buf);
+		return -EINVAL;
+	}
+
+	if (copy_to_user(user_arg+sizeof(int), buf, len+1)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+
+	return len;
+}
+
 static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct hiddev_list *list = file->private_data;
@@ -402,8 +549,6 @@
 	struct hiddev_collection_info cinfo;
 	struct hiddev_report_info rinfo;
 	struct hiddev_field_info finfo;
-	struct hiddev_usage_ref_multi *uref_multi = NULL;
-	struct hiddev_usage_ref *uref;
 	struct hiddev_devinfo dinfo;
 	struct hid_report *report;
 	struct hid_field *field;
@@ -470,30 +615,7 @@
 		}
 
 	case HIDIOCGSTRING:
-		{
-			int idx, len;
-			char *buf;
-
-			if (get_user(idx, (int __user *)arg))
-				return -EFAULT;
-
-			if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL)
-				return -ENOMEM;
-
-			if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) {
-				kfree(buf);
-				return -EINVAL;
-			}
-
-			if (copy_to_user(user_arg+sizeof(int), buf, len+1)) {
-				kfree(buf);
-				return -EFAULT;
-			}
-
-			kfree(buf);
-
-			return len;
-		}
+		return hiddev_ioctl_string(hiddev, cmd, user_arg);
 
 	case HIDIOCINITREPORT:
 		usbhid_init_reports(hid);
@@ -578,121 +700,13 @@
 		return 0;
 
 	case HIDIOCGUCODE:
-		uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
-		if (!uref_multi)
-			return -ENOMEM;
-		uref = &uref_multi->uref;
-		if (copy_from_user(uref, user_arg, sizeof(*uref)))
-			goto fault;
-
-		rinfo.report_type = uref->report_type;
-		rinfo.report_id = uref->report_id;
-		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-			goto inval;
-
-		if (uref->field_index >= report->maxfield)
-			goto inval;
-
-		field = report->field[uref->field_index];
-		if (uref->usage_index >= field->maxusage)
-			goto inval;
-
-		uref->usage_code = field->usage[uref->usage_index].hid;
-
-		if (copy_to_user(user_arg, uref, sizeof(*uref)))
-			goto fault;
-
-		kfree(uref_multi);
-		return 0;
-
+		/* fall through */
 	case HIDIOCGUSAGE:
 	case HIDIOCSUSAGE:
 	case HIDIOCGUSAGES:
 	case HIDIOCSUSAGES:
 	case HIDIOCGCOLLECTIONINDEX:
-		uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
-		if (!uref_multi)
-			return -ENOMEM;
-		uref = &uref_multi->uref;
-		if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
-			if (copy_from_user(uref_multi, user_arg,
-					   sizeof(*uref_multi)))
-				goto fault;
-		} else {
-			if (copy_from_user(uref, user_arg, sizeof(*uref)))
-				goto fault;
-		}
-
-		if (cmd != HIDIOCGUSAGE &&
-		    cmd != HIDIOCGUSAGES &&
-		    uref->report_type == HID_REPORT_TYPE_INPUT)
-			goto inval;
-
-		if (uref->report_id == HID_REPORT_ID_UNKNOWN) {
-			field = hiddev_lookup_usage(hid, uref);
-			if (field == NULL)
-				goto inval;
-		} else {
-			rinfo.report_type = uref->report_type;
-			rinfo.report_id = uref->report_id;
-			if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-				goto inval;
-
-			if (uref->field_index >= report->maxfield)
-				goto inval;
-
-			field = report->field[uref->field_index];
-
-			if (cmd == HIDIOCGCOLLECTIONINDEX) {
-				if (uref->usage_index >= field->maxusage)
-					goto inval;
-			} else if (uref->usage_index >= field->report_count)
-				goto inval;
-
-			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-				  uref->usage_index + uref_multi->num_values > field->report_count))
-				goto inval;
-			}
-
-		switch (cmd) {
-			case HIDIOCGUSAGE:
-				uref->value = field->value[uref->usage_index];
-				if (copy_to_user(user_arg, uref, sizeof(*uref)))
-					goto fault;
-				goto goodreturn;
-
-			case HIDIOCSUSAGE:
-				field->value[uref->usage_index] = uref->value;
-				goto goodreturn;
-
-			case HIDIOCGCOLLECTIONINDEX:
-				kfree(uref_multi);
-				return field->usage[uref->usage_index].collection_index;
-			case HIDIOCGUSAGES:
-				for (i = 0; i < uref_multi->num_values; i++)
-					uref_multi->values[i] =
-					    field->value[uref->usage_index + i];
-				if (copy_to_user(user_arg, uref_multi,
-						 sizeof(*uref_multi)))
-					goto fault;
-				goto goodreturn;
-			case HIDIOCSUSAGES:
-				for (i = 0; i < uref_multi->num_values; i++)
-					field->value[uref->usage_index + i] =
-					    uref_multi->values[i];
-				goto goodreturn;
-		}
-
-goodreturn:
-		kfree(uref_multi);
-		return 0;
-fault:
-		kfree(uref_multi);
-		return -EFAULT;
-inval:
-		kfree(uref_multi);
-		return -EINVAL;
+		return hiddev_ioctl_usage(hiddev, cmd, user_arg);
 
 	case HIDIOCGCOLLECTIONINFO:
 		if (copy_from_user(&cinfo, user_arg, sizeof(cinfo)))
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 0023f96..62d2d7c 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/timer.h>
+#include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/input.h>
 
@@ -77,7 +78,7 @@
 	unsigned long stop_retry;                                       /* Time to give up, in jiffies */
 	unsigned int retry_delay;                                       /* Delay length in ms */
 	struct work_struct reset_work;                                  /* Task context for resets */
-
+	wait_queue_head_t wait;						/* For sleeping */
 };
 
 #define	hid_to_usb_dev(hid_dev) \
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 014dfa5..7137a17 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -1,45 +1,16 @@
 #
-# Character device configuration
+# I2C algorithm drivers configuration
 #
 
-menu "I2C Algorithms"
-
 config I2C_ALGOBIT
-	tristate "I2C bit-banging interfaces"
-	help
-	  This allows you to use a range of I2C adapters called bit-banging
-	  adapters.  Say Y if you own an I2C adapter belonging to this class
-	  and then say Y to the specific driver for you adapter below.
-
-	  This support is also available as a module.  If so, the module 
-	  will be called i2c-algo-bit.
+	tristate
 
 config I2C_ALGOPCF
-	tristate "I2C PCF 8584 interfaces"
-	help
-	  This allows you to use a range of I2C adapters called PCF adapters.
-	  Say Y if you own an I2C adapter belonging to this class and then say
-	  Y to the specific driver for you adapter below.
-
-	  This support is also available as a module.  If so, the module 
-	  will be called i2c-algo-pcf.
+	tristate
 
 config I2C_ALGOPCA
-	tristate "I2C PCA 9564 interfaces"
-	help
-	  This allows you to use a range of I2C adapters called PCA adapters.
-	  Say Y if you own an I2C adapter belonging to this class and then say
-	  Y to the specific driver for you adapter below.
-
-	  This support is also available as a module.  If so, the module 
-	  will be called i2c-algo-pca.
+	tristate
 
 config I2C_ALGO_SGI
-	tristate "I2C SGI interfaces"
+	tristate
 	depends on SGI_IP22 || SGI_IP32 || X86_VISWS
-	help
-	  Supports the SGI interfaces like the ones found on SGI Indy VINO
-	  or SGI O2 MACE.
-
-endmenu
-
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 2a16211..e954a20b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -1,6 +1,7 @@
 /*
- *  i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters                
+ *  i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters
  *    Copyright (C) 2004 Arcom Control Systems
+ *    Copyright (C) 2008 Pengutronix
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -21,14 +22,10 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
-#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pca.h>
-#include "i2c-algo-pca.h"
-
-#define DRIVER "i2c-algo-pca"
 
 #define DEB1(fmt, args...) do { if (i2c_debug>=1) printk(fmt, ## args); } while(0)
 #define DEB2(fmt, args...) do { if (i2c_debug>=2) printk(fmt, ## args); } while(0)
@@ -36,15 +33,15 @@
 
 static int i2c_debug;
 
-#define pca_outw(adap, reg, val) adap->write_byte(adap, reg, val)
-#define pca_inw(adap, reg) adap->read_byte(adap, reg)
+#define pca_outw(adap, reg, val) adap->write_byte(adap->data, reg, val)
+#define pca_inw(adap, reg) adap->read_byte(adap->data, reg)
 
 #define pca_status(adap) pca_inw(adap, I2C_PCA_STA)
-#define pca_clock(adap) adap->get_clock(adap)
-#define pca_own(adap) adap->get_own(adap)
+#define pca_clock(adap) adap->i2c_clock
 #define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
 #define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
-#define pca_wait(adap) adap->wait_for_interrupt(adap)
+#define pca_wait(adap) adap->wait_for_completion(adap->data)
+#define pca_reset(adap) adap->reset_chip(adap->data)
 
 /*
  * Generate a start condition on the i2c bus.
@@ -99,7 +96,7 @@
  *
  * returns after the address has been sent
  */
-static void pca_address(struct i2c_algo_pca_data *adap, 
+static void pca_address(struct i2c_algo_pca_data *adap,
 			struct i2c_msg *msg)
 {
 	int sta = pca_get_con(adap);
@@ -108,9 +105,9 @@
 	addr = ( (0x7f & msg->addr) << 1 );
 	if (msg->flags & I2C_M_RD )
 		addr |= 1;
-	DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n", 
+	DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
 	     msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
-	
+
 	pca_outw(adap, I2C_PCA_DAT, addr);
 
 	sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI);
@@ -124,7 +121,7 @@
  *
  * Returns after the byte has been transmitted
  */
-static void pca_tx_byte(struct i2c_algo_pca_data *adap, 
+static void pca_tx_byte(struct i2c_algo_pca_data *adap,
 			__u8 b)
 {
 	int sta = pca_get_con(adap);
@@ -142,19 +139,19 @@
  *
  * returns immediately.
  */
-static void pca_rx_byte(struct i2c_algo_pca_data *adap, 
+static void pca_rx_byte(struct i2c_algo_pca_data *adap,
 			__u8 *b, int ack)
 {
 	*b = pca_inw(adap, I2C_PCA_DAT);
 	DEB2("=== READ %#04x %s\n", *b, ack ? "ACK" : "NACK");
 }
 
-/* 
+/*
  * Setup ACK or NACK for next received byte and wait for it to arrive.
  *
  * Returns after next byte has arrived.
  */
-static void pca_rx_ack(struct i2c_algo_pca_data *adap, 
+static void pca_rx_ack(struct i2c_algo_pca_data *adap,
 		       int ack)
 {
 	int sta = pca_get_con(adap);
@@ -168,15 +165,6 @@
 	pca_wait(adap);
 }
 
-/* 
- * Reset the i2c bus / SIO 
- */
-static void pca_reset(struct i2c_algo_pca_data *adap)
-{
-	/* apparently only an external reset will do it. not a lot can be done */
-	printk(KERN_ERR DRIVER ": Haven't figured out how to do a reset yet\n");
-}
-
 static int pca_xfer(struct i2c_adapter *i2c_adap,
                     struct i2c_msg *msgs,
                     int num)
@@ -187,7 +175,7 @@
 	int numbytes = 0;
 	int state;
 	int ret;
-	int timeout = 100;
+	int timeout = i2c_adap->timeout;
 
 	while ((state = pca_status(adap)) != 0xf8 && timeout--) {
 		msleep(10);
@@ -203,14 +191,14 @@
 		for (curmsg = 0; curmsg < num; curmsg++) {
 			int addr, i;
 			msg = &msgs[curmsg];
-			
+
 			addr = (0x7f & msg->addr) ;
-		
+
 			if (msg->flags & I2C_M_RD )
-				printk(KERN_INFO "    [%02d] RD %d bytes from %#02x [%#02x, ...]\n", 
+				printk(KERN_INFO "    [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
 				       curmsg, msg->len, addr, (addr<<1) | 1);
 			else {
-				printk(KERN_INFO "    [%02d] WR %d bytes to %#02x [%#02x%s", 
+				printk(KERN_INFO "    [%02d] WR %d bytes to %#02x [%#02x%s",
 				       curmsg, msg->len, addr, addr<<1,
 				       msg->len == 0 ? "" : ", ");
 				for(i=0; i < msg->len; i++)
@@ -237,7 +225,7 @@
 		case 0x10: /* A repeated start condition has been transmitted */
 			pca_address(adap, msg);
 			break;
-			
+
 		case 0x18: /* SLA+W has been transmitted; ACK has been received */
 		case 0x28: /* Data byte in I2CDAT has been transmitted; ACK has been received */
 			if (numbytes < msg->len) {
@@ -287,7 +275,7 @@
 		case 0x38: /* Arbitration lost during SLA+W, SLA+R or data bytes */
 			DEB2("Arbitration lost\n");
 			goto out;
-			
+
 		case 0x58: /* Data byte has been received; NOT ACK has been returned */
 			if ( numbytes == msg->len - 1 ) {
 				pca_rx_byte(adap, &msg->buf[numbytes], 0);
@@ -317,16 +305,16 @@
 			pca_reset(adap);
 			goto out;
 		default:
-			printk(KERN_ERR DRIVER ": unhandled SIO state 0x%02x\n", state);
+			dev_err(&i2c_adap->dev, "unhandled SIO state 0x%02x\n", state);
 			break;
 		}
-		
+
 	}
 
 	ret = curmsg;
  out:
 	DEB1(KERN_CRIT "}}} transfered %d/%d messages. "
-	     "status is %#04x. control is %#04x\n", 
+	     "status is %#04x. control is %#04x\n",
 	     curmsg, num, pca_status(adap),
 	     pca_get_con(adap));
 	return ret;
@@ -337,53 +325,65 @@
         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-static int pca_init(struct i2c_algo_pca_data *adap)
-{
-	static int freqs[] = {330,288,217,146,88,59,44,36};
-	int own, clock;
-
-	own = pca_own(adap);
-	clock = pca_clock(adap);
-	DEB1(KERN_INFO DRIVER ": own address is %#04x\n", own);
-	DEB1(KERN_INFO DRIVER ": clock freqeuncy is %dkHz\n", freqs[clock]);
-
-	pca_outw(adap, I2C_PCA_ADR, own << 1);
-
-	pca_set_con(adap, I2C_PCA_CON_ENSIO | clock);
-	udelay(500); /* 500 µs for oscilator to stabilise */
-
-	return 0;
-}
-
 static const struct i2c_algorithm pca_algo = {
 	.master_xfer	= pca_xfer,
 	.functionality	= pca_func,
 };
 
-/* 
- * registering functions to load algorithms at runtime 
+static int pca_init(struct i2c_adapter *adap)
+{
+	static int freqs[] = {330,288,217,146,88,59,44,36};
+	int clock;
+	struct i2c_algo_pca_data *pca_data = adap->algo_data;
+
+	if (pca_data->i2c_clock > 7) {
+		printk(KERN_WARNING "%s: Invalid I2C clock speed selected. Trying default.\n",
+			adap->name);
+		pca_data->i2c_clock = I2C_PCA_CON_59kHz;
+	}
+
+	adap->algo = &pca_algo;
+
+	pca_reset(pca_data);
+
+	clock = pca_clock(pca_data);
+	DEB1(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]);
+
+	pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
+	udelay(500); /* 500 us for oscilator to stabilise */
+
+	return 0;
+}
+
+/*
+ * registering functions to load algorithms at runtime
  */
 int i2c_pca_add_bus(struct i2c_adapter *adap)
 {
-	struct i2c_algo_pca_data *pca_adap = adap->algo_data;
 	int rval;
 
-	/* register new adapter to i2c module... */
-	adap->algo = &pca_algo;
-
-	adap->timeout = 100;		/* default values, should	*/
-	adap->retries = 3;		/* be replaced by defines	*/
-
-	if ((rval = pca_init(pca_adap)))
+	rval = pca_init(adap);
+	if (rval)
 		return rval;
 
-	rval = i2c_add_adapter(adap);
-
-	return rval;
+	return i2c_add_adapter(adap);
 }
 EXPORT_SYMBOL(i2c_pca_add_bus);
 
-MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>");
+int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
+{
+	int rval;
+
+	rval = pca_init(adap);
+	if (rval)
+		return rval;
+
+	return i2c_add_numbered_adapter(adap);
+}
+EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
+
+MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
+	"Wolfram Sang <w.sang@pengutronix.de>");
 MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/i2c/algos/i2c-algo-pca.h b/drivers/i2c/algos/i2c-algo-pca.h
deleted file mode 100644
index 2fee07e..0000000
--- a/drivers/i2c/algos/i2c-algo-pca.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef I2C_PCA9564_H
-#define I2C_PCA9564_H 1
-
-#define I2C_PCA_STA		0x00 /* STATUS  Read Only  */
-#define I2C_PCA_TO		0x00 /* TIMEOUT Write Only */
-#define I2C_PCA_DAT		0x01 /* DATA    Read/Write */
-#define I2C_PCA_ADR		0x02 /* OWN ADR Read/Write */
-#define I2C_PCA_CON		0x03 /* CONTROL Read/Write */
-
-#define I2C_PCA_CON_AA		0x80 /* Assert Acknowledge */
-#define I2C_PCA_CON_ENSIO	0x40 /* Enable */
-#define I2C_PCA_CON_STA		0x20 /* Start */
-#define I2C_PCA_CON_STO		0x10 /* Stop */
-#define I2C_PCA_CON_SI		0x08 /* Serial Interrupt */
-#define I2C_PCA_CON_CR		0x07 /* Clock Rate (MASK) */
-
-#define I2C_PCA_CON_330kHz	0x00
-#define I2C_PCA_CON_288kHz	0x01
-#define I2C_PCA_CON_217kHz	0x02
-#define I2C_PCA_CON_146kHz	0x03
-#define I2C_PCA_CON_88kHz	0x04
-#define I2C_PCA_CON_59kHz	0x05
-#define I2C_PCA_CON_44kHz	0x06
-#define I2C_PCA_CON_36kHz	0x07
-
-#endif /* I2C_PCA9564_H */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index b04c995..48438cc 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -100,9 +100,12 @@
 
 config I2C_BLACKFIN_TWI
 	tristate "Blackfin TWI I2C support"
-	depends on BF534 || BF536 || BF537
+	depends on BLACKFIN
 	help
-	  This is the TWI I2C device driver for Blackfin 534/536/537/54x.
+	  This is the TWI I2C device driver for Blackfin BF522, BF525,
+	  BF527, BF534, BF536, BF537 and BF54x. For other Blackfin processors,
+	  please don't use this driver.
+
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-bfin-twi.
 
@@ -135,7 +138,7 @@
 	  This supports the PCF8584 ISA bus I2C adapter.  Say Y if you own
 	  such an adapter.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called i2c-elektor.
 
 config I2C_GPIO
@@ -190,7 +193,7 @@
 	select I2C_ALGOBIT
 	help
 	  If you say yes to this option, support will be included for the Intel
-	  810/815 family of mainboard I2C interfaces.  Specifically, the 
+	  810/815 family of mainboard I2C interfaces.  Specifically, the
 	  following versions of the chipset are supported:
 	    i810AA
 	    i810AB
@@ -246,10 +249,10 @@
 
 config I2C_IBM_IIC
 	tristate "IBM PPC 4xx on-chip I2C interface"
-	depends on IBM_OCP
+	depends on 4xx
 	help
-	  Say Y here if you want to use IIC peripheral found on 
-	  embedded IBM PPC 4xx based systems. 
+	  Say Y here if you want to use IIC peripheral found on
+	  embedded IBM PPC 4xx based systems.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-ibm_iic.
@@ -269,7 +272,7 @@
 	depends on ARCH_IXP2000
 	select I2C_ALGOBIT
 	help
-	  Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based 
+	  Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
 	  system and are using GPIO lines for an I2C bus.
 
 	  This support is also available as a module. If so, the module
@@ -354,7 +357,7 @@
 	  on the parport driver.  This is meant for embedded systems. Don't say
 	  Y here if you intend to say Y or M there.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called i2c-parport.
 
 config I2C_PARPORT_LIGHT
@@ -372,12 +375,12 @@
 	  the clean but heavy parport handling is not an option.  The
 	  drawback is a reduced portability and the impossibility to
 	  daisy-chain other parallel port devices.
-	  
+
 	  Don't say Y here if you said Y or M to i2c-parport.  Saying M to
 	  both is possible but both modules should not be loaded at the same
 	  time.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called i2c-parport-light.
 
 config I2C_PASEMI
@@ -401,7 +404,7 @@
 
 	  This driver is deprecated in favor of the savagefb driver.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called i2c-prosavage.
 
 config I2C_S3C2410
@@ -417,7 +420,7 @@
 	depends on PCI
 	select I2C_ALGOBIT
 	help
-	  If you say yes to this option, support will be included for the 
+	  If you say yes to this option, support will be included for the
 	  S3 Savage 4 I2C interface.
 
 	  This driver is deprecated in favor of the savagefb driver.
@@ -452,7 +455,7 @@
 
 	  If you don't know what to do here, say N.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called scx200_i2c.
 
 	  This driver is deprecated and will be dropped soon. Use i2c-gpio
@@ -483,14 +486,14 @@
 
 	  If you don't know what to do here, say N.
 
-	  This support is also available as a module.  If so, the module 
+	  This support is also available as a module.  If so, the module
 	  will be called scx200_acb.
 
 config I2C_SIS5595
 	tristate "SiS 5595"
 	depends on PCI
 	help
-	  If you say yes to this option, support will be included for the 
+	  If you say yes to this option, support will be included for the
 	  SiS5595 SMBus (a subset of I2C) interface.
 
 	  This driver can also be built as a module.  If so, the module
@@ -500,7 +503,7 @@
 	tristate "SiS 630/730"
 	depends on PCI
 	help
-	  If you say yes to this option, support will be included for the 
+	  If you say yes to this option, support will be included for the
 	  SiS630 and SiS730 SMBus (a subset of I2C) interface.
 
 	  This driver can also be built as a module.  If so, the module
@@ -632,9 +635,9 @@
 	select I2C_ALGOPCA
 	default n
 	help
-	  This driver supports ISA boards using the Philips PCA 9564
-	  Parallel bus to I2C bus controller
-	  
+	  This driver supports ISA boards using the Philips PCA9564
+	  parallel bus to I2C bus controller.
+
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-pca-isa.
 
@@ -643,6 +646,17 @@
 	  delays when I2C/SMBus chip drivers are loaded (e.g. at boot
 	  time).  If unsure, say N.
 
+config I2C_PCA_PLATFORM
+	tristate "PCA9564 as platform device"
+	select I2C_ALGOPCA
+	default n
+	help
+	  This driver supports a memory mapped Philips PCA9564
+	  parallel bus to I2C bus controller.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-pca-platform.
+
 config I2C_MV64XXX
 	tristate "Marvell mv64xxx I2C Controller"
 	depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL
@@ -672,4 +686,23 @@
 	  This driver can also be built as module. If so, the module
 	  will be called i2c-pmcmsp.
 
+config I2C_SH7760
+	tristate "Renesas SH7760 I2C Controller"
+	depends on CPU_SUBTYPE_SH7760
+	help
+	  This driver supports the 2 I2C interfaces on the Renesas SH7760.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-sh7760.
+
+config I2C_SH_MOBILE
+	tristate "SuperH Mobile I2C Controller"
+	depends on SUPERH
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in I2C interface on the Renesas SH-Mobile processor.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-sh_mobile.
+
 endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index ea7068f..e8c882a 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@
 obj-$(CONFIG_I2C_PARPORT_LIGHT)	+= i2c-parport-light.o
 obj-$(CONFIG_I2C_PASEMI)	+= i2c-pasemi.o
 obj-$(CONFIG_I2C_PCA_ISA)	+= i2c-pca-isa.o
+obj-$(CONFIG_I2C_PCA_PLATFORM)	+= i2c-pca-platform.o
 obj-$(CONFIG_I2C_PIIX4)		+= i2c-piix4.o
 obj-$(CONFIG_I2C_PMCMSP)	+= i2c-pmcmsp.o
 obj-$(CONFIG_I2C_PNX)		+= i2c-pnx.o
@@ -37,6 +38,8 @@
 obj-$(CONFIG_I2C_PXA)		+= i2c-pxa.o
 obj-$(CONFIG_I2C_S3C2410)	+= i2c-s3c2410.o
 obj-$(CONFIG_I2C_SAVAGE4)	+= i2c-savage4.o
+obj-$(CONFIG_I2C_SH7760)	+= i2c-sh7760.o
+obj-$(CONFIG_I2C_SH_MOBILE)	+= i2c-sh_mobile.o
 obj-$(CONFIG_I2C_SIBYTE)	+= i2c-sibyte.o
 obj-$(CONFIG_I2C_SIMTEC)	+= i2c-simtec.o
 obj-$(CONFIG_I2C_SIS5595)	+= i2c-sis5595.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c09b036..73d6194 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -298,7 +298,7 @@
 #endif
 
 /* work with "modprobe at91_i2c" from hotplugging or coldplugging */
-MODULE_ALIAS("at91_i2c");
+MODULE_ALIAS("platform:at91_i2c");
 
 static struct platform_driver at91_i2c_driver = {
 	.probe		= at91_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 1953b26..491718f 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -472,6 +472,7 @@
 MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
 MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:au1xpsc_smbus");
 
 module_init (i2c_au1550_init);
 module_exit (i2c_au1550_exit);
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 7dbdaeb..48d084b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -1,25 +1,11 @@
 /*
- * drivers/i2c/busses/i2c-bfin-twi.c
+ * Blackfin On-Chip Two Wire Interface Driver
  *
- * Description: Driver for Blackfin Two Wire Interface
+ * Copyright 2005-2007 Analog Devices Inc.
  *
- * Author:      sonicz  <sonic.zhang@analog.com>
+ * Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (c) 2005-2007 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * Licensed under the GPL-2 or later.
  */
 
 #include <linux/module.h>
@@ -34,14 +20,16 @@
 #include <linux/platform_device.h>
 
 #include <asm/blackfin.h>
+#include <asm/portmux.h>
 #include <asm/irq.h>
 
 #define POLL_TIMEOUT       (2 * HZ)
 
 /* SMBus mode*/
-#define TWI_I2C_MODE_STANDARD		0x01
-#define TWI_I2C_MODE_STANDARDSUB	0x02
-#define TWI_I2C_MODE_COMBINED		0x04
+#define TWI_I2C_MODE_STANDARD		1
+#define TWI_I2C_MODE_STANDARDSUB	2
+#define TWI_I2C_MODE_COMBINED		3
+#define TWI_I2C_MODE_REPEAT		4
 
 struct bfin_twi_iface {
 	int			irq;
@@ -58,39 +46,74 @@
 	struct timer_list	timeout_timer;
 	struct i2c_adapter	adap;
 	struct completion	complete;
+	struct i2c_msg 		*pmsg;
+	int			msg_num;
+	int			cur_msg;
+	void __iomem		*regs_base;
 };
 
-static struct bfin_twi_iface twi_iface;
+
+#define DEFINE_TWI_REG(reg, off) \
+static inline u16 read_##reg(struct bfin_twi_iface *iface) \
+	{ return bfin_read16(iface->regs_base + (off)); } \
+static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \
+	{ bfin_write16(iface->regs_base + (off), v); }
+
+DEFINE_TWI_REG(CLKDIV, 0x00)
+DEFINE_TWI_REG(CONTROL, 0x04)
+DEFINE_TWI_REG(SLAVE_CTL, 0x08)
+DEFINE_TWI_REG(SLAVE_STAT, 0x0C)
+DEFINE_TWI_REG(SLAVE_ADDR, 0x10)
+DEFINE_TWI_REG(MASTER_CTL, 0x14)
+DEFINE_TWI_REG(MASTER_STAT, 0x18)
+DEFINE_TWI_REG(MASTER_ADDR, 0x1C)
+DEFINE_TWI_REG(INT_STAT, 0x20)
+DEFINE_TWI_REG(INT_MASK, 0x24)
+DEFINE_TWI_REG(FIFO_CTL, 0x28)
+DEFINE_TWI_REG(FIFO_STAT, 0x2C)
+DEFINE_TWI_REG(XMT_DATA8, 0x80)
+DEFINE_TWI_REG(XMT_DATA16, 0x84)
+DEFINE_TWI_REG(RCV_DATA8, 0x88)
+DEFINE_TWI_REG(RCV_DATA16, 0x8C)
+
+static const u16 pin_req[2][3] = {
+	{P_TWI0_SCL, P_TWI0_SDA, 0},
+	{P_TWI1_SCL, P_TWI1_SDA, 0},
+};
 
 static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
 {
-	unsigned short twi_int_status = bfin_read_TWI_INT_STAT();
-	unsigned short mast_stat = bfin_read_TWI_MASTER_STAT();
+	unsigned short twi_int_status = read_INT_STAT(iface);
+	unsigned short mast_stat = read_MASTER_STAT(iface);
 
 	if (twi_int_status & XMTSERV) {
 		/* Transmit next data */
 		if (iface->writeNum > 0) {
-			bfin_write_TWI_XMT_DATA8(*(iface->transPtr++));
+			write_XMT_DATA8(iface, *(iface->transPtr++));
 			iface->writeNum--;
 		}
 		/* start receive immediately after complete sending in
 		 * combine mode.
 		 */
-		else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
-			bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL()
-				| MDIR | RSTART);
-		} else if (iface->manual_stop)
-			bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL()
-				| STOP);
+		else if (iface->cur_mode == TWI_I2C_MODE_COMBINED)
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | MDIR | RSTART);
+		else if (iface->manual_stop)
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | STOP);
+		else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
+				iface->cur_msg+1 < iface->msg_num)
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | RSTART);
 		SSYNC();
 		/* Clear status */
-		bfin_write_TWI_INT_STAT(XMTSERV);
+		write_INT_STAT(iface, XMTSERV);
 		SSYNC();
 	}
 	if (twi_int_status & RCVSERV) {
 		if (iface->readNum > 0) {
 			/* Receive next data */
-			*(iface->transPtr) = bfin_read_TWI_RCV_DATA8();
+			*(iface->transPtr) = read_RCV_DATA8(iface);
 			if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
 				/* Change combine mode into sub mode after
 				 * read first data.
@@ -105,28 +128,33 @@
 			iface->transPtr++;
 			iface->readNum--;
 		} else if (iface->manual_stop) {
-			bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL()
-				| STOP);
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | STOP);
+			SSYNC();
+		} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
+				iface->cur_msg+1 < iface->msg_num) {
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | RSTART);
 			SSYNC();
 		}
 		/* Clear interrupt source */
-		bfin_write_TWI_INT_STAT(RCVSERV);
+		write_INT_STAT(iface, RCVSERV);
 		SSYNC();
 	}
 	if (twi_int_status & MERR) {
-		bfin_write_TWI_INT_STAT(MERR);
-		bfin_write_TWI_INT_MASK(0);
-		bfin_write_TWI_MASTER_STAT(0x3e);
-		bfin_write_TWI_MASTER_CTL(0);
+		write_INT_STAT(iface, MERR);
+		write_INT_MASK(iface, 0);
+		write_MASTER_STAT(iface, 0x3e);
+		write_MASTER_CTL(iface, 0);
 		SSYNC();
-		iface->result = -1;
+		iface->result = -EIO;
 		/* if both err and complete int stats are set, return proper
 		 * results.
 		 */
 		if (twi_int_status & MCOMP) {
-			bfin_write_TWI_INT_STAT(MCOMP);
-			bfin_write_TWI_INT_MASK(0);
-			bfin_write_TWI_MASTER_CTL(0);
+			write_INT_STAT(iface, MCOMP);
+			write_INT_MASK(iface, 0);
+			write_MASTER_CTL(iface, 0);
 			SSYNC();
 			/* If it is a quick transfer, only address bug no data,
 			 * not an err, return 1.
@@ -143,7 +171,7 @@
 		return;
 	}
 	if (twi_int_status & MCOMP) {
-		bfin_write_TWI_INT_STAT(MCOMP);
+		write_INT_STAT(iface, MCOMP);
 		SSYNC();
 		if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
 			if (iface->readNum == 0) {
@@ -152,28 +180,63 @@
 				 */
 				iface->readNum = 1;
 				iface->manual_stop = 1;
-				bfin_write_TWI_MASTER_CTL(
-					bfin_read_TWI_MASTER_CTL()
-					| (0xff << 6));
+				write_MASTER_CTL(iface,
+					read_MASTER_CTL(iface) | (0xff << 6));
 			} else {
 				/* set the readd number in other
 				 * combine mode.
 				 */
-				bfin_write_TWI_MASTER_CTL(
-					(bfin_read_TWI_MASTER_CTL() &
+				write_MASTER_CTL(iface,
+					(read_MASTER_CTL(iface) &
 					(~(0xff << 6))) |
-					( iface->readNum << 6));
+					(iface->readNum << 6));
 			}
 			/* remove restart bit and enable master receive */
-			bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() &
-				~RSTART);
-			bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() |
-				MEN | MDIR);
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) & ~RSTART);
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) | MEN | MDIR);
+			SSYNC();
+		} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
+				iface->cur_msg+1 < iface->msg_num) {
+			iface->cur_msg++;
+			iface->transPtr = iface->pmsg[iface->cur_msg].buf;
+			iface->writeNum = iface->readNum =
+				iface->pmsg[iface->cur_msg].len;
+			/* Set Transmit device address */
+			write_MASTER_ADDR(iface,
+				iface->pmsg[iface->cur_msg].addr);
+			if (iface->pmsg[iface->cur_msg].flags & I2C_M_RD)
+				iface->read_write = I2C_SMBUS_READ;
+			else {
+				iface->read_write = I2C_SMBUS_WRITE;
+				/* Transmit first data */
+				if (iface->writeNum > 0) {
+					write_XMT_DATA8(iface,
+						*(iface->transPtr++));
+					iface->writeNum--;
+					SSYNC();
+				}
+			}
+
+			if (iface->pmsg[iface->cur_msg].len <= 255)
+				write_MASTER_CTL(iface,
+				iface->pmsg[iface->cur_msg].len << 6);
+			else {
+				write_MASTER_CTL(iface, 0xff << 6);
+				iface->manual_stop = 1;
+			}
+			/* remove restart bit and enable master receive */
+			write_MASTER_CTL(iface,
+				read_MASTER_CTL(iface) & ~RSTART);
+			write_MASTER_CTL(iface, read_MASTER_CTL(iface) |
+				MEN | ((iface->read_write == I2C_SMBUS_READ) ?
+				MDIR : 0));
 			SSYNC();
 		} else {
 			iface->result = 1;
-			bfin_write_TWI_INT_MASK(0);
-			bfin_write_TWI_MASTER_CTL(0);
+			write_INT_MASK(iface, 0);
+			write_MASTER_CTL(iface, 0);
 			SSYNC();
 			complete(&iface->complete);
 		}
@@ -221,91 +284,85 @@
 {
 	struct bfin_twi_iface *iface = adap->algo_data;
 	struct i2c_msg *pmsg;
-	int i, ret;
 	int rc = 0;
 
-	if (!(bfin_read_TWI_CONTROL() & TWI_ENA))
+	if (!(read_CONTROL(iface) & TWI_ENA))
 		return -ENXIO;
 
-	while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) {
+	while (read_MASTER_STAT(iface) & BUSBUSY)
 		yield();
+
+	iface->pmsg = msgs;
+	iface->msg_num = num;
+	iface->cur_msg = 0;
+
+	pmsg = &msgs[0];
+	if (pmsg->flags & I2C_M_TEN) {
+		dev_err(&adap->dev, "10 bits addr not supported!\n");
+		return -EINVAL;
 	}
 
-	ret = 0;
-	for (i = 0; rc >= 0 && i < num; i++) {
-		pmsg = &msgs[i];
-		if (pmsg->flags & I2C_M_TEN) {
-			dev_err(&(adap->dev), "i2c-bfin-twi: 10 bits addr "
-				"not supported !\n");
-			rc = -EINVAL;
-			break;
+	iface->cur_mode = TWI_I2C_MODE_REPEAT;
+	iface->manual_stop = 0;
+	iface->transPtr = pmsg->buf;
+	iface->writeNum = iface->readNum = pmsg->len;
+	iface->result = 0;
+	iface->timeout_count = 10;
+	init_completion(&(iface->complete));
+	/* Set Transmit device address */
+	write_MASTER_ADDR(iface, pmsg->addr);
+
+	/* FIFO Initiation. Data in FIFO should be
+	 *  discarded before start a new operation.
+	 */
+	write_FIFO_CTL(iface, 0x3);
+	SSYNC();
+	write_FIFO_CTL(iface, 0);
+	SSYNC();
+
+	if (pmsg->flags & I2C_M_RD)
+		iface->read_write = I2C_SMBUS_READ;
+	else {
+		iface->read_write = I2C_SMBUS_WRITE;
+		/* Transmit first data */
+		if (iface->writeNum > 0) {
+			write_XMT_DATA8(iface, *(iface->transPtr++));
+			iface->writeNum--;
+			SSYNC();
 		}
-
-		iface->cur_mode = TWI_I2C_MODE_STANDARD;
-		iface->manual_stop = 0;
-		iface->transPtr = pmsg->buf;
-		iface->writeNum = iface->readNum = pmsg->len;
-		iface->result = 0;
-		iface->timeout_count = 10;
-		/* Set Transmit device address */
-		bfin_write_TWI_MASTER_ADDR(pmsg->addr);
-
-		/* FIFO Initiation. Data in FIFO should be
-		 *  discarded before start a new operation.
-		 */
-		bfin_write_TWI_FIFO_CTL(0x3);
-		SSYNC();
-		bfin_write_TWI_FIFO_CTL(0);
-		SSYNC();
-
-		if (pmsg->flags & I2C_M_RD)
-			iface->read_write = I2C_SMBUS_READ;
-		else {
-			iface->read_write = I2C_SMBUS_WRITE;
-			/* Transmit first data */
-			if (iface->writeNum > 0) {
-				bfin_write_TWI_XMT_DATA8(*(iface->transPtr++));
-				iface->writeNum--;
-				SSYNC();
-			}
-		}
-
-		/* clear int stat */
-		bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV);
-
-		/* Interrupt mask . Enable XMT, RCV interrupt */
-		bfin_write_TWI_INT_MASK(MCOMP | MERR |
-			((iface->read_write == I2C_SMBUS_READ)?
-			RCVSERV : XMTSERV));
-		SSYNC();
-
-		if (pmsg->len > 0 && pmsg->len <= 255)
-			bfin_write_TWI_MASTER_CTL(pmsg->len << 6);
-		else if (pmsg->len > 255) {
-			bfin_write_TWI_MASTER_CTL(0xff << 6);
-			iface->manual_stop = 1;
-		} else
-			break;
-
-		iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
-		add_timer(&iface->timeout_timer);
-
-		/* Master enable */
-		bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN |
-			((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
-			((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
-		SSYNC();
-
-		wait_for_completion(&iface->complete);
-
-		rc = iface->result;
-		if (rc == 1)
-			ret++;
-		else if (rc == -1)
-			break;
 	}
 
-	return ret;
+	/* clear int stat */
+	write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV);
+
+	/* Interrupt mask . Enable XMT, RCV interrupt */
+	write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
+	SSYNC();
+
+	if (pmsg->len <= 255)
+		write_MASTER_CTL(iface, pmsg->len << 6);
+	else {
+		write_MASTER_CTL(iface, 0xff << 6);
+		iface->manual_stop = 1;
+	}
+
+	iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
+	add_timer(&iface->timeout_timer);
+
+	/* Master enable */
+	write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
+		((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
+		((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
+	SSYNC();
+
+	wait_for_completion(&iface->complete);
+
+	rc = iface->result;
+
+	if (rc == 1)
+		return num;
+	else
+		return rc;
 }
 
 /*
@@ -319,12 +376,11 @@
 	struct bfin_twi_iface *iface = adap->algo_data;
 	int rc = 0;
 
-	if (!(bfin_read_TWI_CONTROL() & TWI_ENA))
+	if (!(read_CONTROL(iface) & TWI_ENA))
 		return -ENXIO;
 
-	while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) {
+	while (read_MASTER_STAT(iface) & BUSBUSY)
 		yield();
-	}
 
 	iface->writeNum = 0;
 	iface->readNum = 0;
@@ -392,19 +448,20 @@
 	iface->read_write = read_write;
 	iface->command = command;
 	iface->timeout_count = 10;
+	init_completion(&(iface->complete));
 
 	/* FIFO Initiation. Data in FIFO should be discarded before
 	 * start a new operation.
 	 */
-	bfin_write_TWI_FIFO_CTL(0x3);
+	write_FIFO_CTL(iface, 0x3);
 	SSYNC();
-	bfin_write_TWI_FIFO_CTL(0);
+	write_FIFO_CTL(iface, 0);
 
 	/* clear int stat */
-	bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV);
+	write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV);
 
 	/* Set Transmit device address */
-	bfin_write_TWI_MASTER_ADDR(addr);
+	write_MASTER_ADDR(iface, addr);
 	SSYNC();
 
 	iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
@@ -412,60 +469,64 @@
 
 	switch (iface->cur_mode) {
 	case TWI_I2C_MODE_STANDARDSUB:
-		bfin_write_TWI_XMT_DATA8(iface->command);
-		bfin_write_TWI_INT_MASK(MCOMP | MERR |
+		write_XMT_DATA8(iface, iface->command);
+		write_INT_MASK(iface, MCOMP | MERR |
 			((iface->read_write == I2C_SMBUS_READ) ?
 			RCVSERV : XMTSERV));
 		SSYNC();
 
 		if (iface->writeNum + 1 <= 255)
-			bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6);
+			write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
 		else {
-			bfin_write_TWI_MASTER_CTL(0xff << 6);
+			write_MASTER_CTL(iface, 0xff << 6);
 			iface->manual_stop = 1;
 		}
 		/* Master enable */
-		bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN |
+		write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
 			((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
 		break;
 	case TWI_I2C_MODE_COMBINED:
-		bfin_write_TWI_XMT_DATA8(iface->command);
-		bfin_write_TWI_INT_MASK(MCOMP | MERR | RCVSERV | XMTSERV);
+		write_XMT_DATA8(iface, iface->command);
+		write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
 		SSYNC();
 
 		if (iface->writeNum > 0)
-			bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6);
+			write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
 		else
-			bfin_write_TWI_MASTER_CTL(0x1 << 6);
+			write_MASTER_CTL(iface, 0x1 << 6);
 		/* Master enable */
-		bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN |
+		write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
 			((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
 		break;
 	default:
-		bfin_write_TWI_MASTER_CTL(0);
+		write_MASTER_CTL(iface, 0);
 		if (size != I2C_SMBUS_QUICK) {
 			/* Don't access xmit data register when this is a
 			 * read operation.
 			 */
 			if (iface->read_write != I2C_SMBUS_READ) {
 				if (iface->writeNum > 0) {
-					bfin_write_TWI_XMT_DATA8(*(iface->transPtr++));
+					write_XMT_DATA8(iface,
+						*(iface->transPtr++));
 					if (iface->writeNum <= 255)
-						bfin_write_TWI_MASTER_CTL(iface->writeNum << 6);
+						write_MASTER_CTL(iface,
+							iface->writeNum << 6);
 					else {
-						bfin_write_TWI_MASTER_CTL(0xff << 6);
+						write_MASTER_CTL(iface,
+							0xff << 6);
 						iface->manual_stop = 1;
 					}
 					iface->writeNum--;
 				} else {
-					bfin_write_TWI_XMT_DATA8(iface->command);
-					bfin_write_TWI_MASTER_CTL(1 << 6);
+					write_XMT_DATA8(iface, iface->command);
+					write_MASTER_CTL(iface, 1 << 6);
 				}
 			} else {
 				if (iface->readNum > 0 && iface->readNum <= 255)
-					bfin_write_TWI_MASTER_CTL(iface->readNum << 6);
+					write_MASTER_CTL(iface,
+						iface->readNum << 6);
 				else if (iface->readNum > 255) {
-					bfin_write_TWI_MASTER_CTL(0xff << 6);
+					write_MASTER_CTL(iface, 0xff << 6);
 					iface->manual_stop = 1;
 				} else {
 					del_timer(&iface->timeout_timer);
@@ -473,13 +534,13 @@
 				}
 			}
 		}
-		bfin_write_TWI_INT_MASK(MCOMP | MERR |
+		write_INT_MASK(iface, MCOMP | MERR |
 			((iface->read_write == I2C_SMBUS_READ) ?
 			RCVSERV : XMTSERV));
 		SSYNC();
 
 		/* Master enable */
-		bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN |
+		write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
 			((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
 			((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
 		break;
@@ -514,10 +575,10 @@
 
 static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state)
 {
-/*	struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/
+	struct bfin_twi_iface *iface = platform_get_drvdata(dev);
 
 	/* Disable TWI */
-	bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() & ~TWI_ENA);
+	write_CONTROL(iface, read_CONTROL(iface) & ~TWI_ENA);
 	SSYNC();
 
 	return 0;
@@ -525,24 +586,52 @@
 
 static int i2c_bfin_twi_resume(struct platform_device *dev)
 {
-/*	struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/
+	struct bfin_twi_iface *iface = platform_get_drvdata(dev);
 
 	/* Enable TWI */
-	bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA);
+	write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
 	SSYNC();
 
 	return 0;
 }
 
-static int i2c_bfin_twi_probe(struct platform_device *dev)
+static int i2c_bfin_twi_probe(struct platform_device *pdev)
 {
-	struct bfin_twi_iface *iface = &twi_iface;
+	struct bfin_twi_iface *iface;
 	struct i2c_adapter *p_adap;
+	struct resource *res;
 	int rc;
 
+	iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL);
+	if (!iface) {
+		dev_err(&pdev->dev, "Cannot allocate memory\n");
+		rc = -ENOMEM;
+		goto out_error_nomem;
+	}
+
 	spin_lock_init(&(iface->lock));
-	init_completion(&(iface->complete));
-	iface->irq = IRQ_TWI;
+
+	/* Find and map our resources */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		rc = -ENOENT;
+		goto out_error_get_res;
+	}
+
+	iface->regs_base = ioremap(res->start, res->end - res->start + 1);
+	if (iface->regs_base == NULL) {
+		dev_err(&pdev->dev, "Cannot map IO\n");
+		rc = -ENXIO;
+		goto out_error_ioremap;
+	}
+
+	iface->irq = platform_get_irq(pdev, 0);
+	if (iface->irq < 0) {
+		dev_err(&pdev->dev, "No IRQ specified\n");
+		rc = -ENOENT;
+		goto out_error_no_irq;
+	}
 
 	init_timer(&(iface->timeout_timer));
 	iface->timeout_timer.function = bfin_twi_timeout;
@@ -550,39 +639,63 @@
 
 	p_adap = &iface->adap;
 	p_adap->id = I2C_HW_BLACKFIN;
-	p_adap->nr = dev->id;
-	strlcpy(p_adap->name, dev->name, sizeof(p_adap->name));
+	p_adap->nr = pdev->id;
+	strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
 	p_adap->algo = &bfin_twi_algorithm;
 	p_adap->algo_data = iface;
 	p_adap->class = I2C_CLASS_ALL;
-	p_adap->dev.parent = &dev->dev;
+	p_adap->dev.parent = &pdev->dev;
+
+	rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
+	if (rc) {
+		dev_err(&pdev->dev, "Can't setup pin mux!\n");
+		goto out_error_pin_mux;
+	}
 
 	rc = request_irq(iface->irq, bfin_twi_interrupt_entry,
-		IRQF_DISABLED, dev->name, iface);
+		IRQF_DISABLED, pdev->name, iface);
 	if (rc) {
-		dev_err(&(p_adap->dev), "i2c-bfin-twi: can't get IRQ %d !\n",
-			iface->irq);
-		return -ENODEV;
+		dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq);
+		rc = -ENODEV;
+		goto out_error_req_irq;
 	}
 
 	/* Set TWI internal clock as 10MHz */
-	bfin_write_TWI_CONTROL(((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
+	write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
 
 	/* Set Twi interface clock as specified */
-	bfin_write_TWI_CLKDIV((( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ )
-			<< 8) | (( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ )
+	write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
+			<< 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
 			& 0xFF));
 
 	/* Enable TWI */
-	bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA);
+	write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
 	SSYNC();
 
 	rc = i2c_add_numbered_adapter(p_adap);
-	if (rc < 0)
-		free_irq(iface->irq, iface);
-	else
-		platform_set_drvdata(dev, iface);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Can't add i2c adapter!\n");
+		goto out_error_add_adapter;
+	}
 
+	platform_set_drvdata(pdev, iface);
+
+	dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+		"regs_base@%p\n", iface->regs_base);
+
+	return 0;
+
+out_error_add_adapter:
+	free_irq(iface->irq, iface);
+out_error_req_irq:
+out_error_no_irq:
+	peripheral_free_list(pin_req[pdev->id]);
+out_error_pin_mux:
+	iounmap(iface->regs_base);
+out_error_ioremap:
+out_error_get_res:
+	kfree(iface);
+out_error_nomem:
 	return rc;
 }
 
@@ -594,6 +707,9 @@
 
 	i2c_del_adapter(&(iface->adap));
 	free_irq(iface->irq, iface);
+	peripheral_free_list(pin_req[pdev->id]);
+	iounmap(iface->regs_base);
+	kfree(iface);
 
 	return 0;
 }
@@ -611,8 +727,6 @@
 
 static int __init i2c_bfin_twi_init(void)
 {
-	pr_info("I2C: Blackfin I2C TWI driver\n");
-
 	return platform_driver_register(&i2c_bfin_twi_driver);
 }
 
@@ -621,9 +735,10 @@
 	platform_driver_unregister(&i2c_bfin_twi_driver);
 }
 
-MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("I2C-Bus adapter routines for Blackfin TWI");
-MODULE_LICENSE("GPL");
-
 module_init(i2c_bfin_twi_init);
 module_exit(i2c_bfin_twi_exit);
+
+MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index fde2634..7ecbfc4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -328,7 +328,7 @@
 	int i;
 	int ret;
 
-	dev_dbg(dev->dev, "%s: msgs: %d\n", __FUNCTION__, num);
+	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
 
 	ret = i2c_davinci_wait_bus_not_busy(dev, 1);
 	if (ret < 0) {
@@ -342,7 +342,7 @@
 			return ret;
 	}
 
-	dev_dbg(dev->dev, "%s:%d ret: %d\n", __FUNCTION__, __LINE__, ret);
+	dev_dbg(dev->dev, "%s:%d ret: %d\n", __func__, __LINE__, ret);
 
 	return num;
 }
@@ -364,7 +364,7 @@
 	u16 w;
 
 	while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
-		dev_dbg(dev->dev, "%s: stat=0x%x\n", __FUNCTION__, stat);
+		dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
 		if (count++ == 100) {
 			dev_warn(dev->dev, "Too much work in one IRQ\n");
 			break;
@@ -553,6 +553,9 @@
 	return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c_davinci");
+
 static struct platform_driver davinci_i2c_driver = {
 	.probe		= davinci_i2c_probe,
 	.remove		= davinci_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 3ca19fc..7c1b762 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -220,3 +220,4 @@
 MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
 MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c-gpio");
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 22bb247..85dbf34 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -6,6 +6,9 @@
  * Copyright (c) 2003, 2004 Zultys Technologies.
  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
  *
+ * Copyright (c) 2008 PIKA Technologies
+ * Sean MacLennan <smaclennan@pikatech.com>
+ *
  * Based on original work by
  * 	Ian DaSilva  <idasilva@mvista.com>
  *      Armin Kuster <akuster@mvista.com>
@@ -39,12 +42,17 @@
 #include <asm/io.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
+
+#ifdef CONFIG_IBM_OCP
 #include <asm/ocp.h>
 #include <asm/ibm4xx.h>
+#else
+#include <linux/of_platform.h>
+#endif
 
 #include "i2c-ibm_iic.h"
 
-#define DRIVER_VERSION "2.1"
+#define DRIVER_VERSION "2.2"
 
 MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION);
 MODULE_LICENSE("GPL");
@@ -650,13 +658,14 @@
 	opb /= 1000000;
 
 	if (opb < 20 || opb > 150){
-		printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n",
+		printk(KERN_WARNING "ibm-iic: invalid OPB clock frequency %u MHz\n",
 			opb);
 		opb = opb < 20 ? 20 : 150;
 	}
 	return (u8)((opb + 9) / 10 - 1);
 }
 
+#ifdef CONFIG_IBM_OCP
 /*
  * Register single IIC interface
  */
@@ -672,7 +681,7 @@
 			ocp->def->index);
 
 	if (!(dev = kzalloc(sizeof(*dev), GFP_KERNEL))) {
-		printk(KERN_CRIT "ibm-iic%d: failed to allocate device data\n",
+		printk(KERN_ERR "ibm-iic%d: failed to allocate device data\n",
 			ocp->def->index);
 		return -ENOMEM;
 	}
@@ -687,7 +696,7 @@
 	}
 
 	if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){
-		printk(KERN_CRIT "ibm-iic%d: failed to ioremap device registers\n",
+		printk(KERN_ERR "ibm-iic%d: failed to ioremap device registers\n",
 			dev->idx);
 		ret = -ENXIO;
 		goto fail2;
@@ -745,7 +754,7 @@
 	adap->nr = dev->idx >= 0 ? dev->idx : 0;
 
 	if ((ret = i2c_add_numbered_adapter(adap)) < 0) {
-		printk(KERN_CRIT "ibm-iic%d: failed to register i2c adapter\n",
+		printk(KERN_ERR "ibm-iic%d: failed to register i2c adapter\n",
 			dev->idx);
 		goto fail;
 	}
@@ -778,7 +787,7 @@
 	struct ibm_iic_private* dev = (struct ibm_iic_private*)ocp_get_drvdata(ocp);
 	BUG_ON(dev == NULL);
 	if (i2c_del_adapter(&dev->adap)){
-		printk(KERN_CRIT "ibm-iic%d: failed to delete i2c adapter :(\n",
+		printk(KERN_ERR "ibm-iic%d: failed to delete i2c adapter :(\n",
 			dev->idx);
 		/* That's *very* bad, just shutdown IRQ ... */
 		if (dev->irq >= 0){
@@ -828,5 +837,181 @@
 	ocp_unregister_driver(&ibm_iic_driver);
 }
 
+#else  /* !CONFIG_IBM_OCP */
+
+static int __devinit iic_request_irq(struct of_device *ofdev,
+				     struct ibm_iic_private *dev)
+{
+	struct device_node *np = ofdev->node;
+	int irq;
+
+	if (iic_force_poll)
+		return NO_IRQ;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq == NO_IRQ) {
+		dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
+		return NO_IRQ;
+	}
+
+	/* Disable interrupts until we finish initialization, assumes
+	 *  level-sensitive IRQ setup...
+	 */
+	iic_interrupt_mode(dev, 0);
+	if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) {
+		dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
+		/* Fallback to the polling mode */
+		return NO_IRQ;
+	}
+
+	return irq;
+}
+
+/*
+ * Register single IIC interface
+ */
+static int __devinit iic_probe(struct of_device *ofdev,
+			       const struct of_device_id *match)
+{
+	struct device_node *np = ofdev->node;
+	struct ibm_iic_private *dev;
+	struct i2c_adapter *adap;
+	const u32 *indexp, *freq;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&ofdev->dev, "failed to allocate device data\n");
+		return -ENOMEM;
+	}
+
+	dev_set_drvdata(&ofdev->dev, dev);
+
+	indexp = of_get_property(np, "index", NULL);
+	if (!indexp) {
+		dev_err(&ofdev->dev, "no index specified\n");
+		ret = -EINVAL;
+		goto error_cleanup;
+	}
+	dev->idx = *indexp;
+
+	dev->vaddr = of_iomap(np, 0);
+	if (dev->vaddr == NULL) {
+		dev_err(&ofdev->dev, "failed to iomap device\n");
+		ret = -ENXIO;
+		goto error_cleanup;
+	}
+
+	init_waitqueue_head(&dev->wq);
+
+	dev->irq = iic_request_irq(ofdev, dev);
+	if (dev->irq == NO_IRQ)
+		dev_warn(&ofdev->dev, "using polling mode\n");
+
+	/* Board specific settings */
+	if (iic_force_fast || of_get_property(np, "fast-mode", NULL))
+		dev->fast_mode = 1;
+
+	freq = of_get_property(np, "clock-frequency", NULL);
+	if (freq == NULL) {
+		freq = of_get_property(np->parent, "clock-frequency", NULL);
+		if (freq == NULL) {
+			dev_err(&ofdev->dev, "Unable to get bus frequency\n");
+			ret = -EINVAL;
+			goto error_cleanup;
+		}
+	}
+
+	dev->clckdiv = iic_clckdiv(*freq);
+	dev_dbg(&ofdev->dev, "clckdiv = %d\n", dev->clckdiv);
+
+	/* Initialize IIC interface */
+	iic_dev_init(dev);
+
+	/* Register it with i2c layer */
+	adap = &dev->adap;
+	adap->dev.parent = &ofdev->dev;
+	strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
+	i2c_set_adapdata(adap, dev);
+	adap->id = I2C_HW_OCP;
+	adap->class = I2C_CLASS_HWMON;
+	adap->algo = &iic_algo;
+	adap->timeout = 1;
+	adap->nr = dev->idx;
+
+	ret = i2c_add_numbered_adapter(adap);
+	if (ret  < 0) {
+		dev_err(&ofdev->dev, "failed to register i2c adapter\n");
+		goto error_cleanup;
+	}
+
+	dev_info(&ofdev->dev, "using %s mode\n",
+		 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
+
+	return 0;
+
+error_cleanup:
+	if (dev->irq != NO_IRQ) {
+		iic_interrupt_mode(dev, 0);
+		free_irq(dev->irq, dev);
+	}
+
+	if (dev->vaddr)
+		iounmap(dev->vaddr);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+	kfree(dev);
+	return ret;
+}
+
+/*
+ * Cleanup initialized IIC interface
+ */
+static int __devexit iic_remove(struct of_device *ofdev)
+{
+	struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+
+	i2c_del_adapter(&dev->adap);
+
+	if (dev->irq != NO_IRQ) {
+		iic_interrupt_mode(dev, 0);
+		free_irq(dev->irq, dev);
+	}
+
+	iounmap(dev->vaddr);
+	kfree(dev);
+
+	return 0;
+}
+
+static const struct of_device_id ibm_iic_match[] = {
+	{ .compatible = "ibm,iic-405ex", },
+	{ .compatible = "ibm,iic-405gp", },
+	{ .compatible = "ibm,iic-440gp", },
+	{ .compatible = "ibm,iic-440gpx", },
+	{ .compatible = "ibm,iic-440grx", },
+	{}
+};
+
+static struct of_platform_driver ibm_iic_driver = {
+	.name	= "ibm-iic",
+	.match_table = ibm_iic_match,
+	.probe	= iic_probe,
+	.remove	= __devexit_p(iic_remove),
+};
+
+static int __init iic_init(void)
+{
+	return of_register_platform_driver(&ibm_iic_driver);
+}
+
+static void __exit iic_exit(void)
+{
+	of_unregister_platform_driver(&ibm_iic_driver);
+}
+#endif /* CONFIG_IBM_OCP */
+
 module_init(iic_init);
 module_exit(iic_exit);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index ab41400..39884e7 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -550,3 +550,4 @@
 MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>");
 MODULE_DESCRIPTION("IOP3xx iic algorithm and driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:IOP3xx-I2C");
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 6352121..5af9e65 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -164,4 +164,5 @@
 MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
 MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:IXP2000-I2C");
 
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index bbe787b..18beb0a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -392,6 +392,9 @@
 	return 0;
 };
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:fsl-i2c");
+
 /* Structure for a device driver */
 static struct platform_driver fsl_i2c_driver = {
 	.probe = fsl_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e417c2c..f145692 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -312,6 +312,9 @@
 	return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:ocores-i2c");
+
 static struct platform_driver ocores_i2c_driver = {
 	.probe  = ocores_i2c_probe,
 	.remove = __devexit_p(ocores_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7ba3177..e7eb7bf 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -693,3 +693,4 @@
 MODULE_AUTHOR("MontaVista Software, Inc. (and others)");
 MODULE_DESCRIPTION("TI OMAP I2C bus adapter");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:i2c_omap");
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 496ee87..a119784 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -1,6 +1,7 @@
 /*
  *  i2c-pca-isa.c driver for PCA9564 on ISA boards
  *    Copyright (C) 2004 Arcom Control Systems
+ *    Copyright (C) 2008 Pengutronix
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -22,11 +23,9 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
-#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
-
 #include <linux/isa.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pca.h>
@@ -34,13 +33,9 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
-#include "../algos/i2c-algo-pca.h"
-
+#define DRIVER "i2c-pca-isa"
 #define IO_SIZE 4
 
-#undef DEBUG_IO
-//#define DEBUG_IO
-
 static unsigned long base   = 0x330;
 static int irq 	  = 10;
 
@@ -48,22 +43,9 @@
  * in the actual clock rate */
 static int clock  = I2C_PCA_CON_59kHz;
 
-static int own    = 0x55;
-
 static wait_queue_head_t pca_wait;
 
-static int pca_isa_getown(struct i2c_algo_pca_data *adap)
-{
-	return (own);
-}
-
-static int pca_isa_getclock(struct i2c_algo_pca_data *adap)
-{
-	return (clock);
-}
-
-static void
-pca_isa_writebyte(struct i2c_algo_pca_data *adap, int reg, int val)
+static void pca_isa_writebyte(void *pd, int reg, int val)
 {
 #ifdef DEBUG_IO
 	static char *names[] = { "T/O", "DAT", "ADR", "CON" };
@@ -72,44 +54,49 @@
 	outb(val, base+reg);
 }
 
-static int
-pca_isa_readbyte(struct i2c_algo_pca_data *adap, int reg)
+static int pca_isa_readbyte(void *pd, int reg)
 {
 	int res = inb(base+reg);
 #ifdef DEBUG_IO
 	{
-		static char *names[] = { "STA", "DAT", "ADR", "CON" };	
+		static char *names[] = { "STA", "DAT", "ADR", "CON" };
 		printk("*** read  %s => %#04x\n", names[reg], res);
 	}
 #endif
 	return res;
 }
 
-static int pca_isa_waitforinterrupt(struct i2c_algo_pca_data *adap)
+static int pca_isa_waitforcompletion(void *pd)
 {
 	int ret = 0;
 
 	if (irq > -1) {
 		ret = wait_event_interruptible(pca_wait,
-					       pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI);
+					       pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI);
 	} else {
-		while ((pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) 
+		while ((pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
 			udelay(100);
 	}
 	return ret;
 }
 
+static void pca_isa_resetchip(void *pd)
+{
+	/* apparently only an external reset will do it. not a lot can be done */
+	printk(KERN_WARNING DRIVER ": Haven't figured out how to do a reset yet\n");
+}
+
 static irqreturn_t pca_handler(int this_irq, void *dev_id) {
 	wake_up_interruptible(&pca_wait);
 	return IRQ_HANDLED;
 }
 
 static struct i2c_algo_pca_data pca_isa_data = {
-	.get_own		= pca_isa_getown,
-	.get_clock		= pca_isa_getclock,
+	/* .data intentionally left NULL, not needed with ISA */
 	.write_byte		= pca_isa_writebyte,
 	.read_byte		= pca_isa_readbyte,
-	.wait_for_interrupt	= pca_isa_waitforinterrupt,
+	.wait_for_completion	= pca_isa_waitforcompletion,
+	.reset_chip		= pca_isa_resetchip,
 };
 
 static struct i2c_adapter pca_isa_ops = {
@@ -117,6 +104,7 @@
 	.id		= I2C_HW_A_ISA,
 	.algo_data	= &pca_isa_data,
 	.name		= "PCA9564 ISA Adapter",
+	.timeout	= 100,
 };
 
 static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
@@ -144,6 +132,7 @@
 		}
 	}
 
+	pca_isa_data.i2c_clock = clock;
 	if (i2c_pca_add_bus(&pca_isa_ops) < 0) {
 		dev_err(dev, "Failed to add i2c bus\n");
 		goto out_irq;
@@ -178,7 +167,7 @@
 	.remove		= __devexit_p(pca_isa_remove),
 	.driver = {
 		.owner	= THIS_MODULE,
-		.name	= "i2c-pca-isa",
+		.name	= DRIVER,
 	}
 };
 
@@ -204,7 +193,5 @@
 module_param(clock, int, 0);
 MODULE_PARM_DESC(clock, "Clock rate as described in table 1 of PCA9564 datasheet");
 
-module_param(own, int, 0); /* the driver can't do slave mode, so there's no real point in this */
-
 module_init(pca_isa_init);
 module_exit(pca_isa_exit);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
new file mode 100644
index 0000000..9d75f51
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -0,0 +1,298 @@
+/*
+ *  i2c_pca_platform.c
+ *
+ *  Platform driver for the PCA9564 I2C controller.
+ *
+ *  Copyright (C) 2008 Pengutronix
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c-algo-pca.h>
+#include <linux/i2c-pca-platform.h>
+#include <linux/gpio.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#define res_len(r)		((r)->end - (r)->start + 1)
+
+struct i2c_pca_pf_data {
+	void __iomem			*reg_base;
+	int				irq;	/* if 0, use polling */
+	int				gpio;
+	wait_queue_head_t		wait;
+	struct i2c_adapter		adap;
+	struct i2c_algo_pca_data	algo_data;
+	unsigned long			io_base;
+	unsigned long			io_size;
+};
+
+/* Read/Write functions for different register alignments */
+
+static int i2c_pca_pf_readbyte8(void *pd, int reg)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	return ioread8(i2c->reg_base + reg);
+}
+
+static int i2c_pca_pf_readbyte16(void *pd, int reg)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	return ioread8(i2c->reg_base + reg * 2);
+}
+
+static int i2c_pca_pf_readbyte32(void *pd, int reg)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	return ioread8(i2c->reg_base + reg * 4);
+}
+
+static void i2c_pca_pf_writebyte8(void *pd, int reg, int val)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	iowrite8(val, i2c->reg_base + reg);
+}
+
+static void i2c_pca_pf_writebyte16(void *pd, int reg, int val)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	iowrite8(val, i2c->reg_base + reg * 2);
+}
+
+static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	iowrite8(val, i2c->reg_base + reg * 4);
+}
+
+
+static int i2c_pca_pf_waitforcompletion(void *pd)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	int ret = 0;
+
+	if (i2c->irq) {
+		ret = wait_event_interruptible(i2c->wait,
+			i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+			& I2C_PCA_CON_SI);
+	} else {
+		/*
+		 * Do polling...
+		 * XXX: Could get stuck in extreme cases!
+		 *      Maybe add timeout, but using irqs is preferred anyhow.
+		 */
+		while ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+				& I2C_PCA_CON_SI) == 0)
+			udelay(100);
+	}
+
+	return ret;
+}
+
+static void i2c_pca_pf_dummyreset(void *pd)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+	printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n",
+		i2c->adap.name);
+}
+
+static void i2c_pca_pf_resetchip(void *pd)
+{
+	struct i2c_pca_pf_data *i2c = pd;
+
+	gpio_set_value(i2c->gpio, 0);
+	ndelay(100);
+	gpio_set_value(i2c->gpio, 1);
+}
+
+static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
+{
+	struct i2c_pca_pf_data *i2c = dev_id;
+
+	if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
+		return IRQ_NONE;
+
+	wake_up_interruptible(&i2c->wait);
+
+	return IRQ_HANDLED;
+}
+
+
+static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
+{
+	struct i2c_pca_pf_data *i2c;
+	struct resource *res;
+	struct i2c_pca9564_pf_platform_data *platform_data =
+				pdev->dev.platform_data;
+	int ret = 0;
+	int irq;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	/* If irq is 0, we do polling. */
+
+	if (res == NULL) {
+		ret = -ENODEV;
+		goto e_print;
+	}
+
+	if (!request_mem_region(res->start, res_len(res), res->name)) {
+		ret = -ENOMEM;
+		goto e_print;
+	}
+
+	i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL);
+	if (!i2c) {
+		ret = -ENOMEM;
+		goto e_alloc;
+	}
+
+	init_waitqueue_head(&i2c->wait);
+
+	i2c->reg_base = ioremap(res->start, res_len(res));
+	if (!i2c->reg_base) {
+		ret = -EIO;
+		goto e_remap;
+	}
+	i2c->io_base = res->start;
+	i2c->io_size = res_len(res);
+	i2c->irq = irq;
+
+	i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+	i2c->adap.owner = THIS_MODULE;
+	snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564 at 0x%08lx",
+		(unsigned long) res->start);
+	i2c->adap.algo_data = &i2c->algo_data;
+	i2c->adap.dev.parent = &pdev->dev;
+	i2c->adap.timeout = platform_data->timeout;
+
+	i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed;
+	i2c->algo_data.data = i2c;
+
+	switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
+	case IORESOURCE_MEM_32BIT:
+		i2c->algo_data.write_byte = i2c_pca_pf_writebyte32;
+		i2c->algo_data.read_byte = i2c_pca_pf_readbyte32;
+		break;
+	case IORESOURCE_MEM_16BIT:
+		i2c->algo_data.write_byte = i2c_pca_pf_writebyte16;
+		i2c->algo_data.read_byte = i2c_pca_pf_readbyte16;
+		break;
+	case IORESOURCE_MEM_8BIT:
+	default:
+		i2c->algo_data.write_byte = i2c_pca_pf_writebyte8;
+		i2c->algo_data.read_byte = i2c_pca_pf_readbyte8;
+		break;
+	}
+
+	i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion;
+
+	i2c->gpio = platform_data->gpio;
+	i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset;
+
+	/* Use gpio_is_valid() when in mainline */
+	if (i2c->gpio > -1) {
+		ret = gpio_request(i2c->gpio, i2c->adap.name);
+		if (ret == 0) {
+			gpio_direction_output(i2c->gpio, 1);
+			i2c->algo_data.reset_chip = i2c_pca_pf_resetchip;
+		} else {
+			printk(KERN_WARNING "%s: Registering gpio failed!\n",
+				i2c->adap.name);
+			i2c->gpio = ret;
+		}
+	}
+
+	if (irq) {
+		ret = request_irq(irq, i2c_pca_pf_handler,
+			IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
+		if (ret)
+			goto e_reqirq;
+	}
+
+	if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) {
+		ret = -ENODEV;
+		goto e_adapt;
+	}
+
+	platform_set_drvdata(pdev, i2c);
+
+	printk(KERN_INFO "%s registered.\n", i2c->adap.name);
+
+	return 0;
+
+e_adapt:
+	if (irq)
+		free_irq(irq, i2c);
+e_reqirq:
+	if (i2c->gpio > -1)
+		gpio_free(i2c->gpio);
+
+	iounmap(i2c->reg_base);
+e_remap:
+	kfree(i2c);
+e_alloc:
+	release_mem_region(res->start, res_len(res));
+e_print:
+	printk(KERN_ERR "Registering PCA9564 FAILED! (%d)\n", ret);
+	return ret;
+}
+
+static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
+{
+	struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
+	platform_set_drvdata(pdev, NULL);
+
+	i2c_del_adapter(&i2c->adap);
+
+	if (i2c->irq)
+		free_irq(i2c->irq, i2c);
+
+	if (i2c->gpio > -1)
+		gpio_free(i2c->gpio);
+
+	iounmap(i2c->reg_base);
+	release_mem_region(i2c->io_base, i2c->io_size);
+	kfree(i2c);
+
+	return 0;
+}
+
+static struct platform_driver i2c_pca_pf_driver = {
+	.probe = i2c_pca_pf_probe,
+	.remove = __devexit_p(i2c_pca_pf_remove),
+	.driver = {
+		.name = "i2c-pca-platform",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init i2c_pca_pf_init(void)
+{
+	return platform_driver_register(&i2c_pca_pf_driver);
+}
+
+static void __exit i2c_pca_pf_exit(void)
+{
+	platform_driver_unregister(&i2c_pca_pf_driver);
+}
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("I2C-PCA9564 platform driver");
+MODULE_LICENSE("GPL");
+
+module_init(i2c_pca_pf_init);
+module_exit(i2c_pca_pf_exit);
+
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index b03af56..63b3e2c 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -467,7 +467,7 @@
 	    (cmd->read_len == 0 || cmd->write_len == 0))) {
 		dev_err(&pmcmsptwi_adapter.dev,
 			"%s: Cannot transfer less than 1 byte\n",
-			__FUNCTION__);
+			__func__);
 		return -EINVAL;
 	}
 
@@ -475,7 +475,7 @@
 	    cmd->write_len > MSP_MAX_BYTES_PER_RW) {
 		dev_err(&pmcmsptwi_adapter.dev,
 			"%s: Cannot transfer more than %d bytes\n",
-			__FUNCTION__, MSP_MAX_BYTES_PER_RW);
+			__func__, MSP_MAX_BYTES_PER_RW);
 		return -EINVAL;
 	}
 
@@ -627,6 +627,9 @@
 	.name		= DRV_NAME,
 };
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" DRV_NAME);
+
 static struct platform_driver pmcmsptwi_driver = {
 	.probe  = pmcmsptwi_probe,
 	.remove	= __devexit_p(pmcmsptwi_remove),
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index f8d0dff..1ca2108 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -76,7 +76,7 @@
 {
 	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
 
-	dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __FUNCTION__,
+	dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __func__,
 		slave_addr, alg_data->mif.mode);
 
 	/* Check for 7 bit slave addresses only */
@@ -110,14 +110,14 @@
 	iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi,
 		  I2C_REG_STS(alg_data));
 
-	dev_dbg(&adap->dev, "%s(): sending %#x\n", __FUNCTION__,
+	dev_dbg(&adap->dev, "%s(): sending %#x\n", __func__,
 		(slave_addr << 1) | start_bit | alg_data->mif.mode);
 
 	/* Write the slave address, START bit and R/W bit */
 	iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode,
 		  I2C_REG_TX(alg_data));
 
-	dev_dbg(&adap->dev, "%s(): exit\n", __FUNCTION__);
+	dev_dbg(&adap->dev, "%s(): exit\n", __func__);
 
 	return 0;
 }
@@ -135,7 +135,7 @@
 	long timeout = 1000;
 
 	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	/* Write a STOP bit to TX FIFO */
 	iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data));
@@ -149,7 +149,7 @@
 	}
 
 	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 }
 
 /**
@@ -164,7 +164,7 @@
 	u32 val;
 
 	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	if (alg_data->mif.len > 0) {
 		/* We still have something to talk about... */
@@ -179,7 +179,7 @@
 		alg_data->mif.len--;
 		iowrite32(val, I2C_REG_TX(alg_data));
 
-		dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __FUNCTION__,
+		dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__,
 			val, alg_data->mif.len + 1);
 
 		if (alg_data->mif.len == 0) {
@@ -197,7 +197,7 @@
 			del_timer_sync(&alg_data->mif.timer);
 
 			dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
-				__FUNCTION__);
+				__func__);
 
 			complete(&alg_data->mif.complete);
 		}
@@ -213,13 +213,13 @@
 		/* Stop timer. */
 		del_timer_sync(&alg_data->mif.timer);
 		dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
-			"zero-xfer.\n", __FUNCTION__);
+			"zero-xfer.\n", __func__);
 
 		complete(&alg_data->mif.complete);
 	}
 
 	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	return 0;
 }
@@ -237,14 +237,14 @@
 	u32 ctl = 0;
 
 	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	/* Check, whether there is already data,
 	 * or we didn't 'ask' for it yet.
 	 */
 	if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
 		dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
-			"Rx-fifo...\n", __FUNCTION__);
+			"Rx-fifo...\n", __func__);
 
 		if (alg_data->mif.len == 1) {
 			/* Last byte, do not acknowledge next rcv. */
@@ -276,7 +276,7 @@
 	if (alg_data->mif.len > 0) {
 		val = ioread32(I2C_REG_RX(alg_data));
 		*alg_data->mif.buf++ = (u8) (val & 0xff);
-		dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __FUNCTION__, val,
+		dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val,
 			alg_data->mif.len);
 
 		alg_data->mif.len--;
@@ -300,7 +300,7 @@
 	}
 
 	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	return 0;
 }
@@ -312,7 +312,7 @@
 	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
 
 	dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
-		__FUNCTION__,
+		__func__,
 		ioread32(I2C_REG_STS(alg_data)),
 		ioread32(I2C_REG_CTL(alg_data)),
 		alg_data->mif.mode);
@@ -336,7 +336,7 @@
 		/* Slave did not acknowledge, generate a STOP */
 		dev_dbg(&adap->dev, "%s(): "
 			"Slave did not acknowledge, generating a STOP.\n",
-			__FUNCTION__);
+			__func__);
 		i2c_pnx_stop(adap);
 
 		/* Disable master interrupts. */
@@ -375,7 +375,7 @@
 	iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));
 
 	dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
-		 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)),
+		 __func__, ioread32(I2C_REG_STS(alg_data)),
 		 ioread32(I2C_REG_CTL(alg_data)));
 
 	return IRQ_HANDLED;
@@ -447,7 +447,7 @@
 	u32 stat = ioread32(I2C_REG_STS(alg_data));
 
 	dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n",
-		__FUNCTION__, num, ioread32(I2C_REG_STS(alg_data)));
+		__func__, num, ioread32(I2C_REG_STS(alg_data)));
 
 	bus_reset_if_active(adap);
 
@@ -473,7 +473,7 @@
 		alg_data->mif.ret = 0;
 		alg_data->last = (i == num - 1);
 
-		dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __FUNCTION__,
+		dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __func__,
 			alg_data->mif.mode,
 			alg_data->mif.len);
 
@@ -498,7 +498,7 @@
 		if (!(rc = alg_data->mif.ret))
 			completed++;
 		dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n",
-			__FUNCTION__, rc);
+			__func__, rc);
 
 		/* Clear TDI and AFI bits in case they are set. */
 		if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
@@ -522,7 +522,7 @@
 	alg_data->mif.len = 0;
 
 	dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n",
-		__FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
+		__func__, ioread32(I2C_REG_STS(alg_data)));
 
 	if (completed != num)
 		return ((rc < 0) ? rc : -EREMOTEIO);
@@ -563,7 +563,7 @@
 
 	if (!i2c_pnx || !i2c_pnx->adapter) {
 		dev_err(&pdev->dev, "%s: no platform data supplied\n",
-		       __FUNCTION__);
+		       __func__);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -697,6 +697,7 @@
 MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
 MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pnx-i2c");
 
 /* We need to make sure I2C is initialized before USB */
 subsys_initcall(i2c_adap_pnx_init);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 7813127..22f6d5c 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -263,6 +263,9 @@
 }
 
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c-powermac");
+
 static struct platform_driver i2c_powermac_driver = {
 	.probe = i2c_powermac_probe,
 	.remove = __devexit_p(i2c_powermac_remove),
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 6fd2d6a..eb69fba 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -155,7 +155,7 @@
 		readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c)));
 }
 
-#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __FUNCTION__)
+#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__)
 #else
 #define i2c_debug	0
 
@@ -1132,6 +1132,7 @@
 }
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-i2c");
 
 module_init(i2c_adap_pxa_init);
 module_exit(i2c_adap_pxa_exit);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c44ada5..1305ef1 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -276,12 +276,12 @@
 	switch (i2c->state) {
 
 	case STATE_IDLE:
-		dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __FUNCTION__);
+		dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
 		goto out;
 		break;
 
 	case STATE_STOP:
-		dev_err(i2c->dev, "%s: called in STATE_STOP\n", __FUNCTION__);
+		dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
 		s3c24xx_i2c_disable_irq(i2c);		
 		goto out_ack;
 
@@ -948,3 +948,4 @@
 MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c2410-i2c");
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
new file mode 100644
index 0000000..5e0e254
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -0,0 +1,577 @@
+/*
+ * I2C bus driver for the SH7760 I2C Interfaces.
+ *
+ * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
+ *
+ * licensed under the terms outlined in the file COPYING.
+ *
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/clock.h>
+#include <asm/i2c-sh7760.h>
+#include <asm/io.h>
+
+/* register offsets */
+#define I2CSCR		0x0		/* slave ctrl		*/
+#define I2CMCR		0x4		/* master ctrl		*/
+#define I2CSSR		0x8		/* slave status		*/
+#define I2CMSR		0xC		/* master status	*/
+#define I2CSIER		0x10		/* slave irq enable	*/
+#define I2CMIER		0x14		/* master irq enable	*/
+#define I2CCCR		0x18		/* clock dividers	*/
+#define I2CSAR		0x1c		/* slave address	*/
+#define I2CMAR		0x20		/* master address	*/
+#define I2CRXTX		0x24		/* data port		*/
+#define I2CFCR		0x28		/* fifo control		*/
+#define I2CFSR		0x2C		/* fifo status		*/
+#define I2CFIER		0x30		/* fifo irq enable	*/
+#define I2CRFDR		0x34		/* rx fifo count	*/
+#define I2CTFDR		0x38		/* tx fifo count	*/
+
+#define REGSIZE		0x3C
+
+#define MCR_MDBS	0x80		/* non-fifo mode switch	*/
+#define MCR_FSCL	0x40		/* override SCL pin	*/
+#define MCR_FSDA	0x20		/* override SDA pin	*/
+#define MCR_OBPC	0x10		/* override pins	*/
+#define MCR_MIE		0x08		/* master if enable	*/
+#define MCR_TSBE	0x04
+#define MCR_FSB		0x02		/* force stop bit	*/
+#define MCR_ESG		0x01		/* en startbit gen.	*/
+
+#define MSR_MNR		0x40		/* nack received	*/
+#define MSR_MAL		0x20		/* arbitration lost	*/
+#define MSR_MST		0x10		/* sent a stop		*/
+#define MSR_MDE		0x08
+#define MSR_MDT		0x04
+#define MSR_MDR		0x02
+#define MSR_MAT		0x01		/* slave addr xfer done	*/
+
+#define MIE_MNRE	0x40		/* nack irq en		*/
+#define MIE_MALE	0x20		/* arblos irq en	*/
+#define MIE_MSTE	0x10		/* stop irq en		*/
+#define MIE_MDEE	0x08
+#define MIE_MDTE	0x04
+#define MIE_MDRE	0x02
+#define MIE_MATE	0x01		/* address sent irq en	*/
+
+#define FCR_RFRST	0x02		/* reset rx fifo	*/
+#define FCR_TFRST	0x01		/* reset tx fifo	*/
+
+#define FSR_TEND	0x04		/* last byte sent	*/
+#define FSR_RDF		0x02		/* rx fifo trigger	*/
+#define FSR_TDFE	0x01		/* tx fifo empty	*/
+
+#define FIER_TEIE	0x04		/* tx fifo empty irq en	*/
+#define FIER_RXIE	0x02		/* rx fifo trig irq en	*/
+#define FIER_TXIE	0x01		/* tx fifo trig irq en	*/
+
+#define FIFO_SIZE	16
+
+struct cami2c {
+	void __iomem *iobase;
+	struct i2c_adapter adap;
+
+	/* message processing */
+	struct i2c_msg	*msg;
+#define IDF_SEND	1
+#define IDF_RECV	2
+#define IDF_STOP	4
+	int		flags;
+
+#define IDS_DONE	1
+#define IDS_ARBLOST	2
+#define IDS_NACK	4
+	int		status;
+	struct completion xfer_done;
+
+	int irq;
+	struct resource *ioarea;
+};
+
+static inline void OUT32(struct cami2c *cam, int reg, unsigned long val)
+{
+	ctrl_outl(val, (unsigned long)cam->iobase + reg);
+}
+
+static inline unsigned long IN32(struct cami2c *cam, int reg)
+{
+	return ctrl_inl((unsigned long)cam->iobase + reg);
+}
+
+static irqreturn_t sh7760_i2c_irq(int irq, void *ptr)
+{
+	struct cami2c *id = ptr;
+	struct i2c_msg *msg = id->msg;
+	char *data = msg->buf;
+	unsigned long msr, fsr, fier, len;
+
+	msr = IN32(id, I2CMSR);
+	fsr = IN32(id, I2CFSR);
+
+	/* arbitration lost */
+	if (msr & MSR_MAL) {
+		OUT32(id, I2CMCR, 0);
+		OUT32(id, I2CSCR, 0);
+		OUT32(id, I2CSAR, 0);
+		id->status |= IDS_DONE | IDS_ARBLOST;
+		goto out;
+	}
+
+	if (msr & MSR_MNR) {
+		/* NACK handling is very screwed up.  After receiving a
+		 * NAK IRQ one has to wait a bit  before writing to any
+		 * registers, or the ctl will lock up. After that delay
+		 * do a normal i2c stop. Then wait at least 1 ms before
+		 * attempting another transfer or ctl will stop working
+		 */
+		udelay(100);	/* wait or risk ctl hang */
+		OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
+		OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
+		OUT32(id, I2CFIER, 0);
+		OUT32(id, I2CMIER, MIE_MSTE);
+		OUT32(id, I2CSCR, 0);
+		OUT32(id, I2CSAR, 0);
+		id->status |= IDS_NACK;
+		msr &= ~MSR_MAT;
+		fsr = 0;
+		/* In some cases the MST bit is also set. */
+	}
+
+	/* i2c-stop was sent */
+	if (msr & MSR_MST) {
+		id->status |= IDS_DONE;
+		goto out;
+	}
+
+	/* i2c slave addr was sent; set to "normal" operation */
+	if (msr & MSR_MAT)
+		OUT32(id, I2CMCR, MCR_MIE);
+
+	fier = IN32(id, I2CFIER);
+
+	if (fsr & FSR_RDF) {
+		len = IN32(id, I2CRFDR);
+		if (msg->len <= len) {
+			if (id->flags & IDF_STOP) {
+				OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
+				OUT32(id, I2CFIER, 0);
+				/* manual says: wait >= 0.5 SCL times */
+				udelay(5);
+				/* next int should be MST */
+			} else {
+				id->status |= IDS_DONE;
+				/* keep the RDF bit: ctrl holds SCL low
+				 * until the setup for the next i2c_msg
+				 * clears this bit.
+				 */
+				fsr &= ~FSR_RDF;
+			}
+		}
+		while (msg->len && len) {
+			*data++ = IN32(id, I2CRXTX);
+			msg->len--;
+			len--;
+		}
+
+		if (msg->len) {
+			len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1
+						      : msg->len - 1;
+
+			OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xf) << 4));
+		}
+
+	} else if (id->flags & IDF_SEND) {
+		if ((fsr & FSR_TEND) && (msg->len < 1)) {
+			if (id->flags & IDF_STOP) {
+				OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
+			} else {
+				id->status |= IDS_DONE;
+				/* keep the TEND bit: ctl holds SCL low
+				 * until the setup for the next i2c_msg
+				 * clears this bit.
+				 */
+				fsr &= ~FSR_TEND;
+			}
+		}
+		if (fsr & FSR_TDFE) {
+			while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) {
+				OUT32(id, I2CRXTX, *data++);
+				msg->len--;
+			}
+
+			if (msg->len < 1) {
+				fier &= ~FIER_TXIE;
+				OUT32(id, I2CFIER, fier);
+			} else {
+				len = (msg->len >= FIFO_SIZE) ? 2 : 0;
+				OUT32(id, I2CFCR,
+					  FCR_RFRST | ((len & 3) << 2));
+			}
+		}
+	}
+out:
+	if (id->status & IDS_DONE) {
+		OUT32(id, I2CMIER, 0);
+		OUT32(id, I2CFIER, 0);
+		id->msg = NULL;
+		complete(&id->xfer_done);
+	}
+	/* clear status flags and ctrl resumes work */
+	OUT32(id, I2CMSR, ~msr);
+	OUT32(id, I2CFSR, ~fsr);
+	OUT32(id, I2CSSR, 0);
+
+	return IRQ_HANDLED;
+}
+
+
+/* prepare and start a master receive operation */
+static void sh7760_i2c_mrecv(struct cami2c *id)
+{
+	int len;
+
+	id->flags |= IDF_RECV;
+
+	/* set the slave addr reg; otherwise rcv wont work! */
+	OUT32(id, I2CSAR, 0xfe);
+	OUT32(id, I2CMAR, (id->msg->addr << 1) | 1);
+
+	/* adjust rx fifo trigger */
+	if (id->msg->len >= FIFO_SIZE)
+		len = FIFO_SIZE - 1;	/* trigger at fifo full */
+	else
+		len = id->msg->len - 1;	/* trigger before all received */
+
+	OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
+	OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xF) << 4));
+
+	OUT32(id, I2CMSR, 0);
+	OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
+	OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
+	OUT32(id, I2CFIER, FIER_RXIE);
+}
+
+/* prepare and start a master send operation */
+static void sh7760_i2c_msend(struct cami2c *id)
+{
+	int len;
+
+	id->flags |= IDF_SEND;
+
+	/* set the slave addr reg; otherwise xmit wont work! */
+	OUT32(id, I2CSAR, 0xfe);
+	OUT32(id, I2CMAR, (id->msg->addr << 1) | 0);
+
+	/* adjust tx fifo trigger */
+	if (id->msg->len >= FIFO_SIZE)
+		len = 2;	/* trig: 2 bytes left in TX fifo */
+	else
+		len = 0;	/* trig: 8 bytes left in TX fifo */
+
+	OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
+	OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2));
+
+	while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) {
+		OUT32(id, I2CRXTX, *(id->msg->buf));
+		(id->msg->len)--;
+		(id->msg->buf)++;
+	}
+
+	OUT32(id, I2CMSR, 0);
+	OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
+	OUT32(id, I2CFSR, 0);
+	OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
+	OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0));
+}
+
+static inline int sh7760_i2c_busy_check(struct cami2c *id)
+{
+	return (IN32(id, I2CMCR) & MCR_FSDA);
+}
+
+static int sh7760_i2c_master_xfer(struct i2c_adapter *adap,
+				  struct i2c_msg *msgs,
+				  int num)
+{
+	struct cami2c *id = adap->algo_data;
+	int i, retr;
+
+	if (sh7760_i2c_busy_check(id)) {
+		dev_err(&adap->dev, "sh7760-i2c%d: bus busy!\n", adap->nr);
+		return -EBUSY;
+	}
+
+	i = 0;
+	while (i < num) {
+		retr = adap->retries;
+retry:
+		id->flags = ((i == (num-1)) ? IDF_STOP : 0);
+		id->status = 0;
+		id->msg = msgs;
+		init_completion(&id->xfer_done);
+
+		if (msgs->flags & I2C_M_RD)
+			sh7760_i2c_mrecv(id);
+		else
+			sh7760_i2c_msend(id);
+
+		wait_for_completion(&id->xfer_done);
+
+		if (id->status == 0) {
+			num = -EIO;
+			break;
+		}
+
+		if (id->status & IDS_NACK) {
+			/* wait a bit or i2c module stops working */
+			mdelay(1);
+			num = -EREMOTEIO;
+			break;
+		}
+
+		if (id->status & IDS_ARBLOST) {
+			if (retr--) {
+				mdelay(2);
+				goto retry;
+			}
+			num = -EREMOTEIO;
+			break;
+		}
+
+		msgs++;
+		i++;
+	}
+
+	id->msg = NULL;
+	id->flags = 0;
+	id->status = 0;
+
+	OUT32(id, I2CMCR, 0);
+	OUT32(id, I2CMSR, 0);
+	OUT32(id, I2CMIER, 0);
+	OUT32(id, I2CFIER, 0);
+
+	/* reset slave module registers too: master mode enables slave
+	 * module for receive ops (ack, data). Without this reset,
+	 * eternal bus activity might be reported after NACK / ARBLOST.
+	 */
+	OUT32(id, I2CSCR, 0);
+	OUT32(id, I2CSAR, 0);
+	OUT32(id, I2CSSR, 0);
+
+	return num;
+}
+
+static u32 sh7760_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm sh7760_i2c_algo = {
+	.master_xfer	= sh7760_i2c_master_xfer,
+	.functionality	= sh7760_i2c_func,
+};
+
+/* calculate CCR register setting for a desired scl clock.  SCL clock is
+ * derived from I2C module clock  (iclk)  which in turn is derived from
+ * peripheral module clock (mclk, usually around 33MHz):
+ * iclk = mclk/(CDF + 1).  iclk must be < 20MHz.
+ * scl = iclk/(SCGD*8 + 20).
+ */
+static int __devinit calc_CCR(unsigned long scl_hz)
+{
+	struct clk *mclk;
+	unsigned long mck, m1, dff, odff, iclk;
+	signed char cdf, cdfm;
+	int scgd, scgdm, scgds;
+
+	mclk = clk_get(NULL, "module_clk");
+	if (IS_ERR(mclk)) {
+		return PTR_ERR(mclk);
+	} else {
+		mck = mclk->rate;
+		clk_put(mclk);
+	}
+
+	odff = scl_hz;
+	scgdm = cdfm = m1 = 0;
+	for (cdf = 3; cdf >= 0; cdf--) {
+		iclk = mck / (1 + cdf);
+		if (iclk >= 20000000)
+			continue;
+		scgds = ((iclk / scl_hz) - 20) >> 3;
+		for (scgd = scgds; (scgd < 63) && scgd <= scgds + 1; scgd++) {
+			m1 = iclk / (20 + (scgd << 3));
+			dff = abs(scl_hz - m1);
+			if (dff < odff) {
+				odff = dff;
+				cdfm = cdf;
+				scgdm = scgd;
+			}
+		}
+	}
+	/* fail if more than 25% off of requested SCL */
+	if (odff > (scl_hz >> 2))
+		return -EINVAL;
+
+	/* create a CCR register value */
+	return ((scgdm << 2) | cdfm);
+}
+
+static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
+{
+	struct sh7760_i2c_platdata *pd;
+	struct resource *res;
+	struct cami2c *id;
+	int ret;
+
+	pd = pdev->dev.platform_data;
+	if (!pd) {
+		dev_err(&pdev->dev, "no platform_data!\n");
+		ret = -ENODEV;
+		goto out0;
+	}
+
+	id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
+	if (!id) {
+		dev_err(&pdev->dev, "no mem for private data\n");
+		ret = -ENOMEM;
+		goto out0;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no mmio resources\n");
+		ret = -ENODEV;
+		goto out1;
+	}
+
+	id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name);
+	if (!id->ioarea) {
+		dev_err(&pdev->dev, "mmio already reserved\n");
+		ret = -EBUSY;
+		goto out1;
+	}
+
+	id->iobase = ioremap(res->start, REGSIZE);
+	if (!id->iobase) {
+		dev_err(&pdev->dev, "cannot ioremap\n");
+		ret = -ENODEV;
+		goto out2;
+	}
+
+	id->irq = platform_get_irq(pdev, 0);
+
+	id->adap.nr = pdev->id;
+	id->adap.algo = &sh7760_i2c_algo;
+	id->adap.class = I2C_CLASS_ALL;
+	id->adap.retries = 3;
+	id->adap.algo_data = id;
+	id->adap.dev.parent = &pdev->dev;
+	snprintf(id->adap.name, sizeof(id->adap.name),
+		"SH7760 I2C at %08lx", (unsigned long)res->start);
+
+	OUT32(id, I2CMCR, 0);
+	OUT32(id, I2CMSR, 0);
+	OUT32(id, I2CMIER, 0);
+	OUT32(id, I2CMAR, 0);
+	OUT32(id, I2CSIER, 0);
+	OUT32(id, I2CSAR, 0);
+	OUT32(id, I2CSCR, 0);
+	OUT32(id, I2CSSR, 0);
+	OUT32(id, I2CFIER, 0);
+	OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
+	OUT32(id, I2CFSR, 0);
+
+	ret = calc_CCR(pd->speed_khz * 1000);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n",
+			pd->speed_khz);
+		goto out3;
+	}
+	OUT32(id, I2CCCR, ret);
+
+	if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED,
+			SH7760_I2C_DEVNAME, id)) {
+		dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
+		ret = -EBUSY;
+		goto out3;
+	}
+
+	ret = i2c_add_numbered_adapter(&id->adap);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+		goto out4;
+	}
+
+	platform_set_drvdata(pdev, id);
+
+	dev_info(&pdev->dev, "%d kHz mmio %08x irq %d\n",
+		 pd->speed_khz, res->start, id->irq);
+
+	return 0;
+
+out4:
+	free_irq(id->irq, id);
+out3:
+	iounmap(id->iobase);
+out2:
+	release_resource(id->ioarea);
+	kfree(id->ioarea);
+out1:
+	kfree(id);
+out0:
+	return ret;
+}
+
+static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
+{
+	struct cami2c *id = platform_get_drvdata(pdev);
+
+	i2c_del_adapter(&id->adap);
+	free_irq(id->irq, id);
+	iounmap(id->iobase);
+	release_resource(id->ioarea);
+	kfree(id->ioarea);
+	kfree(id);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver sh7760_i2c_drv = {
+	.driver	= {
+		.name	= SH7760_I2C_DEVNAME,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sh7760_i2c_probe,
+	.remove		= __devexit_p(sh7760_i2c_remove),
+};
+
+static int __init sh7760_i2c_init(void)
+{
+	return platform_driver_register(&sh7760_i2c_drv);
+}
+
+static void __exit sh7760_i2c_exit(void)
+{
+	platform_driver_unregister(&sh7760_i2c_drv);
+}
+
+module_init(sh7760_i2c_init);
+module_exit(sh7760_i2c_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SH7760 I2C bus driver");
+MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
new file mode 100644
index 0000000..840e634f
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -0,0 +1,500 @@
+/*
+ * SuperH Mobile I2C Controller
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Portions of the code based on out-of-tree driver i2c-sh7343.c
+ * Copyright (c) 2006 Carlos Munoz <carlos@kenati.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+enum sh_mobile_i2c_op {
+	OP_START = 0,
+	OP_TX_ONLY,
+	OP_TX_STOP,
+	OP_TX_TO_RX,
+	OP_RX_ONLY,
+	OP_RX_STOP,
+};
+
+struct sh_mobile_i2c_data {
+	struct device *dev;
+	void __iomem *reg;
+	struct i2c_adapter adap;
+
+	struct clk *clk;
+	u_int8_t iccl;
+	u_int8_t icch;
+
+	spinlock_t lock;
+	wait_queue_head_t wait;
+	struct i2c_msg *msg;
+	int pos;
+	int sr;
+};
+
+#define NORMAL_SPEED		100000 /* FAST_SPEED 400000 */
+
+/* Register offsets */
+#define ICDR(pd)		(pd->reg + 0x00)
+#define ICCR(pd)		(pd->reg + 0x04)
+#define ICSR(pd)		(pd->reg + 0x08)
+#define ICIC(pd)		(pd->reg + 0x0c)
+#define ICCL(pd)		(pd->reg + 0x10)
+#define ICCH(pd)		(pd->reg + 0x14)
+
+/* Register bits */
+#define ICCR_ICE		0x80
+#define ICCR_RACK		0x40
+#define ICCR_TRS		0x10
+#define ICCR_BBSY		0x04
+#define ICCR_SCP		0x01
+
+#define ICSR_SCLM		0x80
+#define ICSR_SDAM		0x40
+#define SW_DONE			0x20
+#define ICSR_BUSY		0x10
+#define ICSR_AL			0x08
+#define ICSR_TACK		0x04
+#define ICSR_WAIT		0x02
+#define ICSR_DTE		0x01
+
+#define ICIC_ALE		0x08
+#define ICIC_TACKE		0x04
+#define ICIC_WAITE		0x02
+#define ICIC_DTEE		0x01
+
+static void activate_ch(struct sh_mobile_i2c_data *pd)
+{
+	/* Make sure the clock is enabled */
+	clk_enable(pd->clk);
+
+	/* Enable channel and configure rx ack */
+	iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+
+	/* Mask all interrupts */
+	iowrite8(0, ICIC(pd));
+
+	/* Set the clock */
+	iowrite8(pd->iccl, ICCL(pd));
+	iowrite8(pd->icch, ICCH(pd));
+}
+
+static void deactivate_ch(struct sh_mobile_i2c_data *pd)
+{
+	/* Clear/disable interrupts */
+	iowrite8(0, ICSR(pd));
+	iowrite8(0, ICIC(pd));
+
+	/* Disable channel */
+	iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+
+	/* Disable clock */
+	clk_disable(pd->clk);
+}
+
+static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
+			    enum sh_mobile_i2c_op op, unsigned char data)
+{
+	unsigned char ret = 0;
+	unsigned long flags;
+
+	dev_dbg(pd->dev, "op %d, data in 0x%02x\n", op, data);
+
+	spin_lock_irqsave(&pd->lock, flags);
+
+	switch (op) {
+	case OP_START:
+		iowrite8(0x94, ICCR(pd));
+		break;
+	case OP_TX_ONLY:
+		iowrite8(data, ICDR(pd));
+		break;
+	case OP_TX_STOP:
+		iowrite8(data, ICDR(pd));
+		iowrite8(0x90, ICCR(pd));
+		iowrite8(ICIC_ALE | ICIC_TACKE, ICIC(pd));
+		break;
+	case OP_TX_TO_RX:
+		iowrite8(data, ICDR(pd));
+		iowrite8(0x81, ICCR(pd));
+		break;
+	case OP_RX_ONLY:
+		ret = ioread8(ICDR(pd));
+		break;
+	case OP_RX_STOP:
+		ret = ioread8(ICDR(pd));
+		iowrite8(0xc0, ICCR(pd));
+		break;
+	}
+
+	spin_unlock_irqrestore(&pd->lock, flags);
+
+	dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret);
+	return ret;
+}
+
+static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
+{
+	struct platform_device *dev = dev_id;
+	struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
+	struct i2c_msg *msg = pd->msg;
+	unsigned char data, sr;
+	int wakeup = 0;
+
+	sr = ioread8(ICSR(pd));
+	pd->sr |= sr;
+
+	dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
+	       (msg->flags & I2C_M_RD) ? "read" : "write",
+	       pd->pos, msg->len);
+
+	if (sr & (ICSR_AL | ICSR_TACK)) {
+		iowrite8(0, ICIC(pd)); /* disable interrupts */
+		wakeup = 1;
+		goto do_wakeup;
+	}
+
+	if (pd->pos == msg->len) {
+		i2c_op(pd, OP_RX_ONLY, 0);
+		wakeup = 1;
+		goto do_wakeup;
+	}
+
+	if (pd->pos == -1) {
+		data = (msg->addr & 0x7f) << 1;
+		data |= (msg->flags & I2C_M_RD) ? 1 : 0;
+	} else
+		data = msg->buf[pd->pos];
+
+	if ((pd->pos == -1) || !(msg->flags & I2C_M_RD)) {
+		if (msg->flags & I2C_M_RD)
+			i2c_op(pd, OP_TX_TO_RX, data);
+		else if (pd->pos == (msg->len - 1)) {
+			i2c_op(pd, OP_TX_STOP, data);
+			wakeup = 1;
+		} else
+			i2c_op(pd, OP_TX_ONLY, data);
+	} else {
+		if (pd->pos == (msg->len - 1))
+			data = i2c_op(pd, OP_RX_STOP, 0);
+		else
+			data = i2c_op(pd, OP_RX_ONLY, 0);
+
+		msg->buf[pd->pos] = data;
+	}
+	pd->pos++;
+
+ do_wakeup:
+	if (wakeup) {
+		pd->sr |= SW_DONE;
+		wake_up(&pd->wait);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
+{
+	/* Initialize channel registers */
+	iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
+
+	/* Enable channel and configure rx ack */
+	iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
+
+	/* Set the clock */
+	iowrite8(pd->iccl, ICCL(pd));
+	iowrite8(pd->icch, ICCH(pd));
+
+	pd->msg = usr_msg;
+	pd->pos = -1;
+	pd->sr = 0;
+
+	/* Enable all interrupts except wait */
+	iowrite8(ioread8(ICIC(pd)) | ICIC_ALE | ICIC_TACKE | ICIC_DTEE,
+		 ICIC(pd));
+	return 0;
+}
+
+static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+			      struct i2c_msg *msgs,
+			      int num)
+{
+	struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+	struct i2c_msg	*msg;
+	int err = 0;
+	u_int8_t val;
+	int i, k, retry_count;
+
+	activate_ch(pd);
+
+	/* Process all messages */
+	for (i = 0; i < num; i++) {
+		msg = &msgs[i];
+
+		err = start_ch(pd, msg);
+		if (err)
+			break;
+
+		i2c_op(pd, OP_START, 0);
+
+		/* The interrupt handler takes care of the rest... */
+		k = wait_event_timeout(pd->wait,
+				       pd->sr & (ICSR_TACK | SW_DONE),
+				       5 * HZ);
+		if (!k)
+			dev_err(pd->dev, "Transfer request timed out\n");
+
+		retry_count = 10;
+again:
+		val = ioread8(ICSR(pd));
+
+		dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
+
+		if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
+			err = -EIO;
+			break;
+		}
+
+		/* the interrupt handler may wake us up before the
+		 * transfer is finished, so poll the hardware
+		 * until we're done.
+		 */
+
+		if (!(!(val & ICSR_BUSY) && (val & ICSR_SCLM) &&
+		      (val & ICSR_SDAM))) {
+			msleep(1);
+			if (retry_count--)
+				goto again;
+
+			err = -EIO;
+			dev_err(pd->dev, "Polling timed out\n");
+			break;
+		}
+	}
+
+	deactivate_ch(pd);
+
+	if (!err)
+		err = num;
+	return err;
+}
+
+static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm sh_mobile_i2c_algorithm = {
+	.functionality	= sh_mobile_i2c_func,
+	.master_xfer	= sh_mobile_i2c_xfer,
+};
+
+static void sh_mobile_i2c_setup_channel(struct platform_device *dev)
+{
+	struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
+	unsigned long peripheral_clk = clk_get_rate(pd->clk);
+	u_int32_t num;
+	u_int32_t denom;
+	u_int32_t tmp;
+
+	spin_lock_init(&pd->lock);
+	init_waitqueue_head(&pd->wait);
+
+	/* Calculate the value for iccl. From the data sheet:
+	 * iccl = (p clock / transfer rate) * (L / (L + H))
+	 * where L and H are the SCL low/high ratio (5/4 in this case).
+	 * We also round off the result.
+	 */
+	num = peripheral_clk * 5;
+	denom = NORMAL_SPEED * 9;
+	tmp = num * 10 / denom;
+	if (tmp % 10 >= 5)
+		pd->iccl = (u_int8_t)((num/denom) + 1);
+	else
+		pd->iccl = (u_int8_t)(num/denom);
+
+	/* Calculate the value for icch. From the data sheet:
+	   icch = (p clock / transfer rate) * (H / (L + H)) */
+	num = peripheral_clk * 4;
+	tmp = num * 10 / denom;
+	if (tmp % 10 >= 5)
+		pd->icch = (u_int8_t)((num/denom) + 1);
+	else
+		pd->icch = (u_int8_t)(num/denom);
+}
+
+static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
+{
+	struct resource *res;
+	int ret = -ENXIO;
+	int q, m;
+	int k = 0;
+	int n = 0;
+
+	while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
+		for (n = res->start; hook && n <= res->end; n++) {
+			if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED,
+					dev->dev.bus_id, dev))
+				goto rollback;
+		}
+		k++;
+	}
+
+	if (hook)
+		return k > 0 ? 0 : -ENOENT;
+
+	k--;
+	ret = 0;
+
+ rollback:
+	for (q = k; k >= 0; k--) {
+		for (m = n; m >= res->start; m--)
+			free_irq(m, dev);
+
+		res = platform_get_resource(dev, IORESOURCE_IRQ, k - 1);
+		m = res->end;
+	}
+
+	return ret;
+}
+
+static int sh_mobile_i2c_probe(struct platform_device *dev)
+{
+	struct sh_mobile_i2c_data *pd;
+	struct i2c_adapter *adap;
+	struct resource *res;
+	int size;
+	int ret;
+
+	pd = kzalloc(sizeof(struct sh_mobile_i2c_data), GFP_KERNEL);
+	if (pd == NULL) {
+		dev_err(&dev->dev, "cannot allocate private data\n");
+		return -ENOMEM;
+	}
+
+	pd->clk = clk_get(&dev->dev, "peripheral_clk");
+	if (IS_ERR(pd->clk)) {
+		dev_err(&dev->dev, "cannot get peripheral clock\n");
+		ret = PTR_ERR(pd->clk);
+		goto err;
+	}
+
+	ret = sh_mobile_i2c_hook_irqs(dev, 1);
+	if (ret) {
+		dev_err(&dev->dev, "cannot request IRQ\n");
+		goto err_clk;
+	}
+
+	pd->dev = &dev->dev;
+	platform_set_drvdata(dev, pd);
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&dev->dev, "cannot find IO resource\n");
+		ret = -ENOENT;
+		goto err_irq;
+	}
+
+	size = (res->end - res->start) + 1;
+
+	pd->reg = ioremap(res->start, size);
+	if (pd->reg == NULL) {
+		dev_err(&dev->dev, "cannot map IO\n");
+		ret = -ENXIO;
+		goto err_irq;
+	}
+
+	/* setup the private data */
+	adap = &pd->adap;
+	i2c_set_adapdata(adap, pd);
+
+	adap->owner = THIS_MODULE;
+	adap->algo = &sh_mobile_i2c_algorithm;
+	adap->dev.parent = &dev->dev;
+	adap->retries = 5;
+	adap->nr = dev->id;
+
+	strlcpy(adap->name, dev->name, sizeof(adap->name));
+
+	sh_mobile_i2c_setup_channel(dev);
+
+	ret = i2c_add_numbered_adapter(adap);
+	if (ret < 0) {
+		dev_err(&dev->dev, "cannot add numbered adapter\n");
+		goto err_all;
+	}
+
+	return 0;
+
+ err_all:
+	iounmap(pd->reg);
+ err_irq:
+	sh_mobile_i2c_hook_irqs(dev, 0);
+ err_clk:
+	clk_put(pd->clk);
+ err:
+	kfree(pd);
+	return ret;
+}
+
+static int sh_mobile_i2c_remove(struct platform_device *dev)
+{
+	struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
+
+	i2c_del_adapter(&pd->adap);
+	iounmap(pd->reg);
+	sh_mobile_i2c_hook_irqs(dev, 0);
+	clk_put(pd->clk);
+	kfree(pd);
+	return 0;
+}
+
+static struct platform_driver sh_mobile_i2c_driver = {
+	.driver		= {
+		.name		= "i2c-sh_mobile",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= sh_mobile_i2c_probe,
+	.remove		= sh_mobile_i2c_remove,
+};
+
+static int __init sh_mobile_i2c_adap_init(void)
+{
+	return platform_driver_register(&sh_mobile_i2c_driver);
+}
+
+static void __exit sh_mobile_i2c_adap_exit(void)
+{
+	platform_driver_unregister(&sh_mobile_i2c_driver);
+}
+
+module_init(sh_mobile_i2c_adap_init);
+module_exit(sh_mobile_i2c_adap_exit);
+
+MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 10af8d3..042fda2 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -159,6 +159,9 @@
 
 /* device driver */
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:simtec-i2c");
+
 static struct platform_driver simtec_i2c_driver = {
 	.driver		= {
 		.name		= "simtec-i2c",
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 081d957..4678bab 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -151,3 +151,4 @@
 
 MODULE_DESCRIPTION("ARM Versatile I2C bus driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:versatile-i2c");
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index f5e7a70..61abe0f 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -527,7 +527,7 @@
 	if (iface == NULL)
 		return -ENOMEM;
 
-	if (request_region(base, 8, iface->adapter.name) == 0) {
+	if (!request_region(base, 8, iface->adapter.name)) {
 		printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
 		       base, base + 8 - 1);
 		rc = -EBUSY;
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 2a31601..b1b45dd 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -658,7 +658,7 @@
 		OTG_CTRL_REG |= OTG_PULLUP;
 	}
 
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 	dump_regs(isp, "otg->isp1301");
 }
 
@@ -782,7 +782,7 @@
 		if (otg_ctrl & OTG_DRIVER_SEL) {
 			switch (isp->otg.state) {
 			case OTG_STATE_A_IDLE:
-				b_idle(isp, __FUNCTION__);
+				b_idle(isp, __func__);
 				break;
 			default:
 				break;
@@ -826,7 +826,7 @@
 						isp->otg.host->otg_port);
 	}
 
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 	return ret;
 }
 
@@ -837,7 +837,7 @@
 	if (!otg_dev)
 		return -ENODEV;
 
-	dump_regs(isp, __FUNCTION__);
+	dump_regs(isp, __func__);
 	/* some of these values are board-specific... */
 	OTG_SYSCON_2_REG |= OTG_EN
 		/* for B-device: */
@@ -853,9 +853,9 @@
 	update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE));
 	update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
 
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 	pr_debug("otg: %s, %s %06x\n",
-			state_name(isp), __FUNCTION__, OTG_CTRL_REG);
+			state_name(isp), __func__, OTG_CTRL_REG);
 
 	OTG_IRQ_EN_REG = DRIVER_SWITCH | OPRT_CHG
 			| B_SRP_TMROUT | B_HNP_FAIL
@@ -1041,11 +1041,11 @@
 						OTG1_DP_PULLDOWN);
 			isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1,
 						OTG1_DP_PULLUP);
-			dump_regs(isp, __FUNCTION__);
+			dump_regs(isp, __func__);
 #endif
 			/* FALLTHROUGH */
 		case OTG_STATE_B_SRP_INIT:
-			b_idle(isp, __FUNCTION__);
+			b_idle(isp, __func__);
 			OTG_CTRL_REG &= OTG_CTRL_REG & OTG_XCEIV_OUTPUTS;
 			/* FALLTHROUGH */
 		case OTG_STATE_B_IDLE:
@@ -1077,7 +1077,7 @@
 	 */
 	update_otg1(isp, isp_stat);
 	update_otg2(isp, isp_bstat);
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 #endif
 
 	dump_regs(isp, "isp1301->otg");
@@ -1310,7 +1310,7 @@
 	 */
 	isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV);
 
-	dump_regs(isp, __FUNCTION__);
+	dump_regs(isp, __func__);
 
 	return 0;
 
@@ -1365,7 +1365,7 @@
 	isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING,
 		INTR_VBUS_VLD);
 	dev_info(&isp->client.dev, "B-Peripheral sessions ok\n");
-	dump_regs(isp, __FUNCTION__);
+	dump_regs(isp, __func__);
 
 	/* If this has a Mini-AB connector, this mode is highly
 	 * nonstandard ... but can be handy for testing, so long
@@ -1416,7 +1416,7 @@
 
 	pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), OTG_CTRL_REG);
 #ifdef	CONFIG_USB_OTG
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 #endif
 	return 0;
 }
@@ -1463,7 +1463,7 @@
 	}
 	pr_debug("otg: HNP %s, %06x ...\n",
 		state_name(isp), OTG_CTRL_REG);
-	check_state(isp, __FUNCTION__);
+	check_state(isp, __func__);
 	return 0;
 #else
 	/* srp-only */
@@ -1601,7 +1601,7 @@
 	update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
 #endif
 
-	dump_regs(isp, __FUNCTION__);
+	dump_regs(isp, __func__);
 
 #ifdef	VERBOSE
 	mod_timer(&isp->timer, jiffies + TIMER_JIFFIES);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e186df6..6c7fa8d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1506,7 +1506,7 @@
 		read_write = I2C_SMBUS_READ;
 		if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
 			dev_err(&adapter->dev, "%s called with invalid "
-				"block proc call size (%d)\n", __FUNCTION__,
+				"block proc call size (%d)\n", __func__,
 				data->block[0]);
 			return -1;
 		}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 393e679..d34c14c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -200,16 +200,176 @@
 	return device_for_each_child(&adapter->dev, &addr, i2cdev_check);
 }
 
+static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
+		unsigned long arg)
+{
+	struct i2c_rdwr_ioctl_data rdwr_arg;
+	struct i2c_msg *rdwr_pa;
+	u8 __user **data_ptrs;
+	int i, res;
+
+	if (copy_from_user(&rdwr_arg,
+			   (struct i2c_rdwr_ioctl_data __user *)arg,
+			   sizeof(rdwr_arg)))
+		return -EFAULT;
+
+	/* Put an arbitrary limit on the number of messages that can
+	 * be sent at once */
+	if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
+		return -EINVAL;
+
+	rdwr_pa = (struct i2c_msg *)
+		kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
+		GFP_KERNEL);
+	if (!rdwr_pa)
+		return -ENOMEM;
+
+	if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
+			   rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
+		kfree(rdwr_pa);
+		return -EFAULT;
+	}
+
+	data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
+	if (data_ptrs == NULL) {
+		kfree(rdwr_pa);
+		return -ENOMEM;
+	}
+
+	res = 0;
+	for (i = 0; i < rdwr_arg.nmsgs; i++) {
+		/* Limit the size of the message to a sane amount;
+		 * and don't let length change either. */
+		if ((rdwr_pa[i].len > 8192) ||
+		    (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+			res = -EINVAL;
+			break;
+		}
+		data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
+		rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
+		if (rdwr_pa[i].buf == NULL) {
+			res = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(rdwr_pa[i].buf, data_ptrs[i],
+				   rdwr_pa[i].len)) {
+				++i; /* Needs to be kfreed too */
+				res = -EFAULT;
+			break;
+		}
+	}
+	if (res < 0) {
+		int j;
+		for (j = 0; j < i; ++j)
+			kfree(rdwr_pa[j].buf);
+		kfree(data_ptrs);
+		kfree(rdwr_pa);
+		return res;
+	}
+
+	res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
+	while (i-- > 0) {
+		if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
+			if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
+					 rdwr_pa[i].len))
+				res = -EFAULT;
+		}
+		kfree(rdwr_pa[i].buf);
+	}
+	kfree(data_ptrs);
+	kfree(rdwr_pa);
+	return res;
+}
+
+static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
+		unsigned long arg)
+{
+	struct i2c_smbus_ioctl_data data_arg;
+	union i2c_smbus_data temp;
+	int datasize, res;
+
+	if (copy_from_user(&data_arg,
+			   (struct i2c_smbus_ioctl_data __user *) arg,
+			   sizeof(struct i2c_smbus_ioctl_data)))
+		return -EFAULT;
+	if ((data_arg.size != I2C_SMBUS_BYTE) &&
+	    (data_arg.size != I2C_SMBUS_QUICK) &&
+	    (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
+	    (data_arg.size != I2C_SMBUS_WORD_DATA) &&
+	    (data_arg.size != I2C_SMBUS_PROC_CALL) &&
+	    (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
+	    (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
+	    (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
+	    (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
+		dev_dbg(&client->adapter->dev,
+			"size out of range (%x) in ioctl I2C_SMBUS.\n",
+			data_arg.size);
+		return -EINVAL;
+	}
+	/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
+	   so the check is valid if size==I2C_SMBUS_QUICK too. */
+	if ((data_arg.read_write != I2C_SMBUS_READ) &&
+	    (data_arg.read_write != I2C_SMBUS_WRITE)) {
+		dev_dbg(&client->adapter->dev,
+			"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
+			data_arg.read_write);
+		return -EINVAL;
+	}
+
+	/* Note that command values are always valid! */
+
+	if ((data_arg.size == I2C_SMBUS_QUICK) ||
+	    ((data_arg.size == I2C_SMBUS_BYTE) &&
+	    (data_arg.read_write == I2C_SMBUS_WRITE)))
+		/* These are special: we do not use data */
+		return i2c_smbus_xfer(client->adapter, client->addr,
+				      client->flags, data_arg.read_write,
+				      data_arg.command, data_arg.size, NULL);
+
+	if (data_arg.data == NULL) {
+		dev_dbg(&client->adapter->dev,
+			"data is NULL pointer in ioctl I2C_SMBUS.\n");
+		return -EINVAL;
+	}
+
+	if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
+	    (data_arg.size == I2C_SMBUS_BYTE))
+		datasize = sizeof(data_arg.data->byte);
+	else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
+		 (data_arg.size == I2C_SMBUS_PROC_CALL))
+		datasize = sizeof(data_arg.data->word);
+	else /* size == smbus block, i2c block, or block proc. call */
+		datasize = sizeof(data_arg.data->block);
+
+	if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
+	    (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+	    (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
+	    (data_arg.read_write == I2C_SMBUS_WRITE)) {
+		if (copy_from_user(&temp, data_arg.data, datasize))
+			return -EFAULT;
+	}
+	if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
+		/* Convert old I2C block commands to the new
+		   convention. This preserves binary compatibility. */
+		data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
+		if (data_arg.read_write == I2C_SMBUS_READ)
+			temp.block[0] = I2C_SMBUS_BLOCK_MAX;
+	}
+	res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+	      data_arg.read_write, data_arg.command, data_arg.size, &temp);
+	if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
+		     (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+		     (data_arg.read_write == I2C_SMBUS_READ))) {
+		if (copy_to_user(data_arg.data, &temp, datasize))
+			return -EFAULT;
+	}
+	return res;
+}
+
 static int i2cdev_ioctl(struct inode *inode, struct file *file,
 		unsigned int cmd, unsigned long arg)
 {
 	struct i2c_client *client = (struct i2c_client *)file->private_data;
-	struct i2c_rdwr_ioctl_data rdwr_arg;
-	struct i2c_smbus_ioctl_data data_arg;
-	union i2c_smbus_data temp;
-	struct i2c_msg *rdwr_pa;
-	u8 __user **data_ptrs;
-	int i,datasize,res;
 	unsigned long funcs;
 
 	dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
@@ -253,164 +413,11 @@
 		return put_user(funcs, (unsigned long __user *)arg);
 
 	case I2C_RDWR:
-		if (copy_from_user(&rdwr_arg,
-				   (struct i2c_rdwr_ioctl_data __user *)arg,
-				   sizeof(rdwr_arg)))
-			return -EFAULT;
-
-		/* Put an arbitrary limit on the number of messages that can
-		 * be sent at once */
-		if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
-			return -EINVAL;
-
-		rdwr_pa = (struct i2c_msg *)
-			kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
-			GFP_KERNEL);
-
-		if (rdwr_pa == NULL) return -ENOMEM;
-
-		if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
-				   rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
-			kfree(rdwr_pa);
-			return -EFAULT;
-		}
-
-		data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
-		if (data_ptrs == NULL) {
-			kfree(rdwr_pa);
-			return -ENOMEM;
-		}
-
-		res = 0;
-		for( i=0; i<rdwr_arg.nmsgs; i++ ) {
-			/* Limit the size of the message to a sane amount;
-			 * and don't let length change either. */
-			if ((rdwr_pa[i].len > 8192) ||
-			    (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
-				res = -EINVAL;
-				break;
-			}
-			data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
-			rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
-			if(rdwr_pa[i].buf == NULL) {
-				res = -ENOMEM;
-				break;
-			}
-			if(copy_from_user(rdwr_pa[i].buf,
-				data_ptrs[i],
-				rdwr_pa[i].len)) {
-					++i; /* Needs to be kfreed too */
-					res = -EFAULT;
-				break;
-			}
-		}
-		if (res < 0) {
-			int j;
-			for (j = 0; j < i; ++j)
-				kfree(rdwr_pa[j].buf);
-			kfree(data_ptrs);
-			kfree(rdwr_pa);
-			return res;
-		}
-
-		res = i2c_transfer(client->adapter,
-			rdwr_pa,
-			rdwr_arg.nmsgs);
-		while(i-- > 0) {
-			if( res>=0 && (rdwr_pa[i].flags & I2C_M_RD)) {
-				if(copy_to_user(
-					data_ptrs[i],
-					rdwr_pa[i].buf,
-					rdwr_pa[i].len)) {
-					res = -EFAULT;
-				}
-			}
-			kfree(rdwr_pa[i].buf);
-		}
-		kfree(data_ptrs);
-		kfree(rdwr_pa);
-		return res;
+		return i2cdev_ioctl_rdrw(client, arg);
 
 	case I2C_SMBUS:
-		if (copy_from_user(&data_arg,
-		                   (struct i2c_smbus_ioctl_data __user *) arg,
-		                   sizeof(struct i2c_smbus_ioctl_data)))
-			return -EFAULT;
-		if ((data_arg.size != I2C_SMBUS_BYTE) &&
-		    (data_arg.size != I2C_SMBUS_QUICK) &&
-		    (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
-		    (data_arg.size != I2C_SMBUS_WORD_DATA) &&
-		    (data_arg.size != I2C_SMBUS_PROC_CALL) &&
-		    (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
-		    (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
-		    (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
-		    (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
-			dev_dbg(&client->adapter->dev,
-				"size out of range (%x) in ioctl I2C_SMBUS.\n",
-				data_arg.size);
-			return -EINVAL;
-		}
-		/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
-		   so the check is valid if size==I2C_SMBUS_QUICK too. */
-		if ((data_arg.read_write != I2C_SMBUS_READ) &&
-		    (data_arg.read_write != I2C_SMBUS_WRITE)) {
-			dev_dbg(&client->adapter->dev,
-				"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
-				data_arg.read_write);
-			return -EINVAL;
-		}
+		return i2cdev_ioctl_smbus(client, arg);
 
-		/* Note that command values are always valid! */
-
-		if ((data_arg.size == I2C_SMBUS_QUICK) ||
-		    ((data_arg.size == I2C_SMBUS_BYTE) &&
-		    (data_arg.read_write == I2C_SMBUS_WRITE)))
-			/* These are special: we do not use data */
-			return i2c_smbus_xfer(client->adapter, client->addr,
-					      client->flags,
-					      data_arg.read_write,
-					      data_arg.command,
-					      data_arg.size, NULL);
-
-		if (data_arg.data == NULL) {
-			dev_dbg(&client->adapter->dev,
-				"data is NULL pointer in ioctl I2C_SMBUS.\n");
-			return -EINVAL;
-		}
-
-		if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
-		    (data_arg.size == I2C_SMBUS_BYTE))
-			datasize = sizeof(data_arg.data->byte);
-		else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
-		         (data_arg.size == I2C_SMBUS_PROC_CALL))
-			datasize = sizeof(data_arg.data->word);
-		else /* size == smbus block, i2c block, or block proc. call */
-			datasize = sizeof(data_arg.data->block);
-
-		if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
-		    (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
-		    (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
-		    (data_arg.read_write == I2C_SMBUS_WRITE)) {
-			if (copy_from_user(&temp, data_arg.data, datasize))
-				return -EFAULT;
-		}
-		if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
-			/* Convert old I2C block commands to the new
-			   convention. This preserves binary compatibility. */
-			data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
-			if (data_arg.read_write == I2C_SMBUS_READ)
-				temp.block[0] = I2C_SMBUS_BLOCK_MAX;
-		}
-		res = i2c_smbus_xfer(client->adapter,client->addr,client->flags,
-		      data_arg.read_write,
-		      data_arg.command,data_arg.size,&temp);
-		if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
-		              (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
-			      (data_arg.read_write == I2C_SMBUS_READ))) {
-			if (copy_to_user(data_arg.data, &temp, datasize))
-				return -EFAULT;
-		}
-		return res;
 	case I2C_RETRIES:
 		client->adapter->retries = arg;
 		break;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bb94ce7..297a48f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -360,4 +360,16 @@
 	  driver (SCSI/ATA) which supports enclosures
 	  or a SCSI enclosure device (SES) to use these services.
 
+config SGI_XP
+	tristate "Support communication between SGI SSIs"
+	depends on IA64_GENERIC || IA64_SGI_SN2
+	select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+	select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+	---help---
+	  An SGI machine can be divided into multiple Single System
+	  Images which act independently of each other and have
+	  hardware based memory protection from the others.  Enabling
+	  this feature will allow for direct communication between SSIs
+	  based on a network adapter and DMA messaging.
+
 endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4581b25..5914da4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -24,3 +24,4 @@
 obj-$(CONFIG_INTEL_MENLOW)	+= intel_menlow.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
+obj-$(CONFIG_SGI_XP)		+= sgi-xp/
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
new file mode 100644
index 0000000..b6e40a7
--- /dev/null
+++ b/drivers/misc/sgi-xp/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for SGI's XP devices.
+#
+
+obj-$(CONFIG_SGI_XP)		+= xp.o
+xp-y				:= xp_main.o xp_nofault.o
+
+obj-$(CONFIG_SGI_XP)		+= xpc.o
+xpc-y				:= xpc_main.o xpc_channel.o xpc_partition.o
+
+obj-$(CONFIG_SGI_XP)		+= xpnet.o
diff --git a/include/asm-ia64/sn/xp.h b/drivers/misc/sgi-xp/xp.h
similarity index 91%
rename from include/asm-ia64/sn/xp.h
rename to drivers/misc/sgi-xp/xp.h
index f7711b3..5515234 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -3,18 +3,15 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
-
 /*
  * External Cross Partition (XP) structures and defines.
  */
 
-
-#ifndef _ASM_IA64_SN_XP_H
-#define _ASM_IA64_SN_XP_H
-
+#ifndef _DRIVERS_MISC_SGIXP_XP_H
+#define _DRIVERS_MISC_SGIXP_XP_H
 
 #include <linux/cache.h>
 #include <linux/hardirq.h>
@@ -22,14 +19,12 @@
 #include <asm/sn/types.h>
 #include <asm/sn/bte.h>
 
-
 #ifdef USE_DBUG_ON
 #define DBUG_ON(condition)	BUG_ON(condition)
 #else
 #define DBUG_ON(condition)
 #endif
 
-
 /*
  * Define the maximum number of logically defined partitions the system
  * can support. It is constrained by the maximum number of hardware
@@ -43,7 +38,6 @@
  */
 #define XP_MAX_PARTITIONS	64
 
-
 /*
  * Define the number of u64s required to represent all the C-brick nasids
  * as a bitmap.  The cross-partition kernel modules deal only with
@@ -54,7 +48,6 @@
 #define XP_NASID_MASK_BYTES	((XP_MAX_PHYSNODE_ID + 7) / 8)
 #define XP_NASID_MASK_WORDS	((XP_MAX_PHYSNODE_ID + 63) / 64)
 
-
 /*
  * Wrapper for bte_copy() that should it return a failure status will retry
  * the bte_copy() once in the hope that the failure was due to a temporary
@@ -74,7 +67,6 @@
 	bte_result_t ret;
 	u64 pdst = ia64_tpa(vdst);
 
-
 	/*
 	 * Ensure that the physically mapped memory is contiguous.
 	 *
@@ -87,16 +79,15 @@
 
 	ret = bte_copy(src, pdst, len, mode, notification);
 	if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
-		if (!in_interrupt()) {
+		if (!in_interrupt())
 			cond_resched();
-		}
+
 		ret = bte_copy(src, pdst, len, mode, notification);
 	}
 
 	return ret;
 }
 
-
 /*
  * XPC establishes channel connections between the local partition and any
  * other partition that is currently up. Over these channels, kernel-level
@@ -122,7 +113,6 @@
 #error	XPC_NCHANNELS exceeds MAXIMUM allowed.
 #endif
 
-
 /*
  * The format of an XPC message is as follows:
  *
@@ -160,12 +150,10 @@
 	u64 payload;		/* user defined portion of message */
 };
 
-
 #define XPC_MSG_PAYLOAD_OFFSET	(u64) (&((struct xpc_msg *)0)->payload)
 #define XPC_MSG_SIZE(_payload_size) \
 		L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
 
-
 /*
  * Define the return values and values passed to user's callout functions.
  * (It is important to add new value codes at the end just preceding
@@ -267,10 +255,9 @@
 				/* 115: BTE end */
 	xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
 
-	xpcUnknownReason	/* 116: unknown reason -- must be last in list */
+	xpcUnknownReason	/* 116: unknown reason - must be last in enum */
 };
 
-
 /*
  * Define the callout function types used by XPC to update the user on
  * connection activity and state changes (via the user function registered by
@@ -375,12 +362,11 @@
  * =====================+================================+=====================
  */
 
-typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
-		int ch_number, void *data, void *key);
+typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
+				  int ch_number, void *data, void *key);
 
-typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
-		int ch_number, void *key);
-
+typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
+				 int ch_number, void *key);
 
 /*
  * The following is a registration entry. There is a global array of these,
@@ -398,50 +384,45 @@
  */
 struct xpc_registration {
 	struct mutex mutex;
-	xpc_channel_func func;		/* function to call */
-	void *key;			/* pointer to user's key */
-	u16 nentries;			/* #of msg entries in local msg queue */
-	u16 msg_size;			/* message queue's message size */
-	u32 assigned_limit;		/* limit on #of assigned kthreads */
-	u32 idle_limit;			/* limit on #of idle kthreads */
+	xpc_channel_func func;	/* function to call */
+	void *key;		/* pointer to user's key */
+	u16 nentries;		/* #of msg entries in local msg queue */
+	u16 msg_size;		/* message queue's message size */
+	u32 assigned_limit;	/* limit on #of assigned kthreads */
+	u32 idle_limit;		/* limit on #of idle kthreads */
 } ____cacheline_aligned;
 
-
 #define XPC_CHANNEL_REGISTERED(_c)	(xpc_registrations[_c].func != NULL)
 
-
 /* the following are valid xpc_allocate() flags */
-#define XPC_WAIT	0		/* wait flag */
-#define XPC_NOWAIT	1		/* no wait flag */
-
+#define XPC_WAIT	0	/* wait flag */
+#define XPC_NOWAIT	1	/* no wait flag */
 
 struct xpc_interface {
-	void (*connect)(int);
-	void (*disconnect)(int);
-	enum xpc_retval (*allocate)(partid_t, int, u32, void **);
-	enum xpc_retval (*send)(partid_t, int, void *);
-	enum xpc_retval (*send_notify)(partid_t, int, void *,
-						xpc_notify_func, void *);
-	void (*received)(partid_t, int, void *);
-	enum xpc_retval (*partid_to_nasids)(partid_t, void *);
+	void (*connect) (int);
+	void (*disconnect) (int);
+	enum xpc_retval (*allocate) (partid_t, int, u32, void **);
+	enum xpc_retval (*send) (partid_t, int, void *);
+	enum xpc_retval (*send_notify) (partid_t, int, void *,
+					xpc_notify_func, void *);
+	void (*received) (partid_t, int, void *);
+	enum xpc_retval (*partid_to_nasids) (partid_t, void *);
 };
 
-
 extern struct xpc_interface xpc_interface;
 
 extern void xpc_set_interface(void (*)(int),
-		void (*)(int),
-		enum xpc_retval (*)(partid_t, int, u32, void **),
-		enum xpc_retval (*)(partid_t, int, void *),
-		enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
-								void *),
-		void (*)(partid_t, int, void *),
-		enum xpc_retval (*)(partid_t, void *));
+			      void (*)(int),
+			      enum xpc_retval (*)(partid_t, int, u32, void **),
+			      enum xpc_retval (*)(partid_t, int, void *),
+			      enum xpc_retval (*)(partid_t, int, void *,
+						  xpc_notify_func, void *),
+			      void (*)(partid_t, int, void *),
+			      enum xpc_retval (*)(partid_t, void *));
 extern void xpc_clear_interface(void);
 
-
 extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
-						u16, u32, u32);
+				   u16, u32, u32);
 extern void xpc_disconnect(int);
 
 static inline enum xpc_retval
@@ -458,7 +439,7 @@
 
 static inline enum xpc_retval
 xpc_send_notify(partid_t partid, int ch_number, void *payload,
-			xpc_notify_func func, void *key)
+		xpc_notify_func func, void *key)
 {
 	return xpc_interface.send_notify(partid, ch_number, payload, func, key);
 }
@@ -475,11 +456,8 @@
 	return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
-
 extern u64 xp_nofault_PIOR_target;
 extern int xp_nofault_PIOR(void *);
 extern int xp_error_PIOR(void);
 
-
-#endif /* _ASM_IA64_SN_XP_H */
-
+#endif /* _DRIVERS_MISC_SGIXP_XP_H */
diff --git a/arch/ia64/sn/kernel/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
similarity index 69%
rename from arch/ia64/sn/kernel/xp_main.c
rename to drivers/misc/sgi-xp/xp_main.c
index b7ea466..1fbf99b 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition (XP) base.
  *
@@ -15,58 +14,64 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
- * Target of nofault PIO read.
+ * The export of xp_nofault_PIOR needs to happen here since it is defined
+ * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
+ * defined here.
  */
-u64 xp_nofault_PIOR_target;
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
 
+u64 xp_nofault_PIOR_target;
+EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
 
 /*
  * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
  * users of XPC.
  */
 struct xpc_registration xpc_registrations[XPC_NCHANNELS];
-
+EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
  * Initialize the XPC interface to indicate that XPC isn't loaded.
  */
-static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
+static enum xpc_retval
+xpc_notloaded(void)
+{
+	return xpcNotLoaded;
+}
 
 struct xpc_interface xpc_interface = {
-	(void (*)(int)) xpc_notloaded,
-	(void (*)(int)) xpc_notloaded,
-	(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
-	(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
-	(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
-							xpc_notloaded,
-	(void (*)(partid_t, int, void *)) xpc_notloaded,
-	(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
+	(void (*)(int))xpc_notloaded,
+	(void (*)(int))xpc_notloaded,
+	(enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
+	(enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
+	(enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
+	    xpc_notloaded,
+	(void (*)(partid_t, int, void *))xpc_notloaded,
+	(enum xpc_retval(*)(partid_t, void *))xpc_notloaded
 };
-
+EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
  * XPC calls this when it (the XPC module) has been loaded.
  */
 void
-xpc_set_interface(void (*connect)(int),
-		void (*disconnect)(int),
-		enum xpc_retval (*allocate)(partid_t, int, u32, void **),
-		enum xpc_retval (*send)(partid_t, int, void *),
-		enum xpc_retval (*send_notify)(partid_t, int, void *,
-						xpc_notify_func, void *),
-		void (*received)(partid_t, int, void *),
-		enum xpc_retval (*partid_to_nasids)(partid_t, void *))
+xpc_set_interface(void (*connect) (int),
+		  void (*disconnect) (int),
+		  enum xpc_retval (*allocate) (partid_t, int, u32, void **),
+		  enum xpc_retval (*send) (partid_t, int, void *),
+		  enum xpc_retval (*send_notify) (partid_t, int, void *,
+						  xpc_notify_func, void *),
+		  void (*received) (partid_t, int, void *),
+		  enum xpc_retval (*partid_to_nasids) (partid_t, void *))
 {
 	xpc_interface.connect = connect;
 	xpc_interface.disconnect = disconnect;
@@ -76,7 +81,7 @@
 	xpc_interface.received = received;
 	xpc_interface.partid_to_nasids = partid_to_nasids;
 }
-
+EXPORT_SYMBOL_GPL(xpc_set_interface);
 
 /*
  * XPC calls this when it (the XPC module) is being unloaded.
@@ -84,20 +89,21 @@
 void
 xpc_clear_interface(void)
 {
-	xpc_interface.connect = (void (*)(int)) xpc_notloaded;
-	xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
-	xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
-					void **)) xpc_notloaded;
-	xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
-					xpc_notloaded;
-	xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
-				    xpc_notify_func, void *)) xpc_notloaded;
+	xpc_interface.connect = (void (*)(int))xpc_notloaded;
+	xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
+	xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
+						     void **))xpc_notloaded;
+	xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
+	    xpc_notloaded;
+	xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
+							xpc_notify_func,
+							void *))xpc_notloaded;
 	xpc_interface.received = (void (*)(partid_t, int, void *))
-					xpc_notloaded;
-	xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
-					xpc_notloaded;
+	    xpc_notloaded;
+	xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
+	    xpc_notloaded;
 }
-
+EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
 /*
  * Register for automatic establishment of a channel connection whenever
@@ -125,11 +131,10 @@
  */
 enum xpc_retval
 xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
-		u16 nentries, u32 assigned_limit, u32 idle_limit)
+	    u16 nentries, u32 assigned_limit, u32 idle_limit)
 {
 	struct xpc_registration *registration;
 
-
 	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 	DBUG_ON(payload_size == 0 || nentries == 0);
 	DBUG_ON(func == NULL);
@@ -137,9 +142,8 @@
 
 	registration = &xpc_registrations[ch_number];
 
-	if (mutex_lock_interruptible(&registration->mutex) != 0) {
+	if (mutex_lock_interruptible(&registration->mutex) != 0)
 		return xpcInterrupted;
-	}
 
 	/* if XPC_CHANNEL_REGISTERED(ch_number) */
 	if (registration->func != NULL) {
@@ -161,7 +165,7 @@
 
 	return xpcSuccess;
 }
-
+EXPORT_SYMBOL_GPL(xpc_connect);
 
 /*
  * Remove the registration for automatic connection of the specified channel
@@ -181,7 +185,6 @@
 {
 	struct xpc_registration *registration;
 
-
 	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
 	registration = &xpc_registrations[ch_number];
@@ -213,19 +216,17 @@
 
 	return;
 }
-
+EXPORT_SYMBOL_GPL(xpc_disconnect);
 
 int __init
 xp_init(void)
 {
 	int ret, ch_number;
-	u64 func_addr = *(u64 *) xp_nofault_PIOR;
-	u64 err_func_addr = *(u64 *) xp_error_PIOR;
+	u64 func_addr = *(u64 *)xp_nofault_PIOR;
+	u64 err_func_addr = *(u64 *)xp_error_PIOR;
 
-
-	if (!ia64_platform_is("sn2")) {
+	if (!ia64_platform_is("sn2"))
 		return -ENODEV;
-	}
 
 	/*
 	 * Register a nofault code region which performs a cross-partition
@@ -236,55 +237,43 @@
 	 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
 	 * work around).
 	 */
-	if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
-						err_func_addr, 1, 1)) != 0) {
+	ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
+				       1, 1);
+	if (ret != 0) {
 		printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
-			ret);
+		       ret);
 	}
 	/*
 	 * Setup the nofault PIO read target. (There is no special reason why
 	 * SH_IPI_ACCESS was selected.)
 	 */
-	if (is_shub2()) {
+	if (is_shub2())
 		xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
-	} else {
+	else
 		xp_nofault_PIOR_target = SH1_IPI_ACCESS;
-	}
 
 	/* initialize the connection registration mutex */
-	for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
+	for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
 		mutex_init(&xpc_registrations[ch_number].mutex);
-	}
 
 	return 0;
 }
-module_init(xp_init);
 
+module_init(xp_init);
 
 void __exit
 xp_exit(void)
 {
-	u64 func_addr = *(u64 *) xp_nofault_PIOR;
-	u64 err_func_addr = *(u64 *) xp_error_PIOR;
-
+	u64 func_addr = *(u64 *)xp_nofault_PIOR;
+	u64 err_func_addr = *(u64 *)xp_error_PIOR;
 
 	/* unregister the PIO read nofault code region */
-	(void) sn_register_nofault_code(func_addr, err_func_addr,
-					err_func_addr, 1, 0);
+	(void)sn_register_nofault_code(func_addr, err_func_addr,
+				       err_func_addr, 1, 0);
 }
-module_exit(xp_exit);
 
+module_exit(xp_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition (XP) base");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(xp_nofault_PIOR);
-EXPORT_SYMBOL(xp_nofault_PIOR_target);
-EXPORT_SYMBOL(xpc_registrations);
-EXPORT_SYMBOL(xpc_interface);
-EXPORT_SYMBOL(xpc_clear_interface);
-EXPORT_SYMBOL(xpc_set_interface);
-EXPORT_SYMBOL(xpc_connect);
-EXPORT_SYMBOL(xpc_disconnect);
-
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S
similarity index 94%
rename from arch/ia64/sn/kernel/xp_nofault.S
rename to drivers/misc/sgi-xp/xp_nofault.S
index 98e7c7d..e38d433 100644
--- a/arch/ia64/sn/kernel/xp_nofault.S
+++ b/drivers/misc/sgi-xp/xp_nofault.S
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * The xp_nofault_PIOR function takes a pointer to a remote PIO register
  * and attempts to load and consume a value from it.  This function
diff --git a/include/asm-ia64/sn/xpc.h b/drivers/misc/sgi-xp/xpc.h
similarity index 75%
rename from include/asm-ia64/sn/xpc.h
rename to drivers/misc/sgi-xp/xpc.h
index 3c0900a..9eb6d4a 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -3,17 +3,15 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) structures and macros.
  */
 
-#ifndef _ASM_IA64_SN_XPC_H
-#define _ASM_IA64_SN_XPC_H
-
+#ifndef _DRIVERS_MISC_SGIXP_XPC_H
+#define _DRIVERS_MISC_SGIXP_XPC_H
 
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
@@ -27,8 +25,7 @@
 #include <asm/sn/addrs.h>
 #include <asm/sn/mspec.h>
 #include <asm/sn/shub_mmr.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
  * XPC Version numbers consist of a major and minor number. XPC can always
@@ -39,7 +36,6 @@
 #define XPC_VERSION_MAJOR(_v)		((_v) >> 4)
 #define XPC_VERSION_MINOR(_v)		((_v) & 0xf)
 
-
 /*
  * The next macros define word or bit representations for given
  * C-brick nasid in either the SAL provided bit array representing
@@ -67,7 +63,6 @@
 /* define the process name of the discovery thread */
 #define XPC_DISCOVERY_THREAD_NAME	"xpc_discovery"
 
-
 /*
  * the reserved page
  *
@@ -115,16 +110,16 @@
 	u8 partid;		/* SAL: partition ID */
 	u8 version;
 	u8 pad1[6];		/* align to next u64 in cacheline */
-	volatile u64 vars_pa;
+	u64 vars_pa;		/* physical address of struct xpc_vars */
 	struct timespec stamp;	/* time when reserved page was setup by XPC */
 	u64 pad2[9];		/* align to last u64 in cacheline */
 	u64 nasids_size;	/* SAL: size of each nasid mask in bytes */
 };
 
-#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
 
 #define XPC_SUPPORTS_RP_STAMP(_version) \
-			(_version >= _XPC_VERSION(1,1))
+			(_version >= _XPC_VERSION(1, 1))
 
 /*
  * compare stamps - the return value is:
@@ -138,14 +133,13 @@
 {
 	int ret;
 
-
-	if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
+	ret = stamp1->tv_sec - stamp2->tv_sec;
+	if (ret == 0)
 		ret = stamp1->tv_nsec - stamp2->tv_nsec;
-	}
+
 	return ret;
 }
 
-
 /*
  * Define the structures by which XPC variables can be exported to other
  * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -172,11 +166,10 @@
 	AMO_t *amos_page;	/* vaddr of page of AMOs from MSPEC driver */
 };
 
-#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
+#define XPC_V_VERSION _XPC_VERSION(3, 1)    /* version 3.1 of the cross vars */
 
 #define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
-			(_version >= _XPC_VERSION(3,1))
-
+			(_version >= _XPC_VERSION(3, 1))
 
 static inline int
 xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
@@ -193,7 +186,7 @@
 		old_mask = vars->heartbeating_to_mask;
 		new_mask = (old_mask | (1UL << partid));
 	} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-							old_mask);
+		 old_mask);
 }
 
 static inline void
@@ -205,10 +198,9 @@
 		old_mask = vars->heartbeating_to_mask;
 		new_mask = (old_mask & ~(1UL << partid));
 	} while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-							old_mask);
+		 old_mask);
 }
 
-
 /*
  * The AMOs page consists of a number of AMO variables which are divided into
  * four groups, The first two groups are used to identify an IRQ's sender.
@@ -222,7 +214,6 @@
 #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
 #define XPC_DISENGAGE_REQUEST_AMO  (XPC_ENGAGED_PARTITIONS_AMO + 1)
 
-
 /*
  * The following structure describes the per partition specific variables.
  *
@@ -234,7 +225,7 @@
  * occupies half a cacheline.
  */
 struct xpc_vars_part {
-	volatile u64 magic;
+	u64 magic;
 
 	u64 openclose_args_pa;	/* physical address of open and close args */
 	u64 GPs_pa;		/* physical address of Get/Put values */
@@ -257,20 +248,20 @@
  * MAGIC2 indicates that this partition has pulled the remote partititions
  * per partition variables that pertain to this partition.
  */
-#define XPC_VP_MAGIC1	0x0053524156435058L  /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2	0x0073726176435058L  /* 'XPCvars\0'L (little endian) */
-
+#define XPC_VP_MAGIC1	0x0053524156435058L   /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2	0x0073726176435058L   /* 'XPCvars\0'L (little endian) */
 
 /* the reserved page sizes and offsets */
 
 #define XPC_RP_HEADER_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
-#define XPC_RP_VARS_SIZE 	L1_CACHE_ALIGN(sizeof(struct xpc_vars))
+#define XPC_RP_VARS_SIZE	L1_CACHE_ALIGN(sizeof(struct xpc_vars))
 
-#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
+#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
 #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS(_rp)	((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS_PART(_rp)	(struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
-
+#define XPC_RP_VARS(_rp)	((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
+				    xp_nasid_mask_words))
+#define XPC_RP_VARS_PART(_rp)	((struct xpc_vars_part *) \
+				    ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
 
 /*
  * Functions registered by add_timer() or called by kernel_thread() only
@@ -285,21 +276,17 @@
 #define XPC_UNPACK_ARG1(_args)	(((u64) _args) & 0xffffffff)
 #define XPC_UNPACK_ARG2(_args)	((((u64) _args) >> 32) & 0xffffffff)
 
-
-
 /*
  * Define a Get/Put value pair (pointers) used with a message queue.
  */
 struct xpc_gp {
-	volatile s64 get;	/* Get value */
-	volatile s64 put;	/* Put value */
+	s64 get;		/* Get value */
+	s64 put;		/* Put value */
 };
 
 #define XPC_GP_SIZE \
 		L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
 
-
-
 /*
  * Define a structure that contains arguments associated with opening and
  * closing a channel.
@@ -315,20 +302,15 @@
 #define XPC_OPENCLOSE_ARGS_SIZE \
 	      L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
 
-
-
 /* struct xpc_msg flags */
 
 #define	XPC_M_DONE		0x01	/* msg has been received/consumed */
 #define	XPC_M_READY		0x02	/* msg is ready to be sent */
 #define	XPC_M_INTERRUPT		0x04	/* send interrupt when msg consumed */
 
-
 #define XPC_MSG_ADDRESS(_payload) \
 		((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
 
-
-
 /*
  * Defines notify entry.
  *
@@ -336,19 +318,17 @@
  * and consumed by the intended recipient.
  */
 struct xpc_notify {
-	volatile u8 type;		/* type of notification */
+	u8 type;		/* type of notification */
 
 	/* the following two fields are only used if type == XPC_N_CALL */
-	xpc_notify_func func;		/* user's notify function */
-	void *key;			/* pointer to user's key */
+	xpc_notify_func func;	/* user's notify function */
+	void *key;		/* pointer to user's key */
 };
 
 /* struct xpc_notify type of notification */
 
 #define	XPC_N_CALL		0x01	/* notify function provided by user */
 
-
-
 /*
  * Define the structure that manages all the stuff required by a channel. In
  * particular, they are used to manage the messages sent across the channel.
@@ -428,48 +408,48 @@
  *	messages.
  */
 struct xpc_channel {
-	partid_t partid;		/* ID of remote partition connected */
-	spinlock_t lock;		/* lock for updating this structure */
-	u32 flags;			/* general flags */
+	partid_t partid;	/* ID of remote partition connected */
+	spinlock_t lock;	/* lock for updating this structure */
+	u32 flags;		/* general flags */
 
-	enum xpc_retval reason;		/* reason why channel is disconnect'g */
-	int reason_line;		/* line# disconnect initiated from */
+	enum xpc_retval reason;	/* reason why channel is disconnect'g */
+	int reason_line;	/* line# disconnect initiated from */
 
-	u16 number;			/* channel # */
+	u16 number;		/* channel # */
 
-	u16 msg_size;			/* sizeof each msg entry */
-	u16 local_nentries;		/* #of msg entries in local msg queue */
-	u16 remote_nentries;		/* #of msg entries in remote msg queue*/
+	u16 msg_size;		/* sizeof each msg entry */
+	u16 local_nentries;	/* #of msg entries in local msg queue */
+	u16 remote_nentries;	/* #of msg entries in remote msg queue */
 
 	void *local_msgqueue_base;	/* base address of kmalloc'd space */
 	struct xpc_msg *local_msgqueue;	/* local message queue */
 	void *remote_msgqueue_base;	/* base address of kmalloc'd space */
-	struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
-					/* local message queue */
-	u64 remote_msgqueue_pa;		/* phys addr of remote partition's */
-					/* local message queue */
+	struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
+					 /* local message queue */
+	u64 remote_msgqueue_pa;	/* phys addr of remote partition's */
+				/* local message queue */
 
-	atomic_t references;		/* #of external references to queues */
+	atomic_t references;	/* #of external references to queues */
 
-	atomic_t n_on_msg_allocate_wq;   /* #on msg allocation wait queue */
-	wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+	atomic_t n_on_msg_allocate_wq;	/* #on msg allocation wait queue */
+	wait_queue_head_t msg_allocate_wq;	/* msg allocation wait queue */
 
-	u8 delayed_IPI_flags;		/* IPI flags received, but delayed */
-					/* action until channel disconnected */
+	u8 delayed_IPI_flags;	/* IPI flags received, but delayed */
+				/* action until channel disconnected */
 
 	/* queue of msg senders who want to be notified when msg received */
 
-	atomic_t n_to_notify;		/* #of msg senders to notify */
-	struct xpc_notify *notify_queue;/* notify queue for messages sent */
+	atomic_t n_to_notify;	/* #of msg senders to notify */
+	struct xpc_notify *notify_queue;    /* notify queue for messages sent */
 
-	xpc_channel_func func;		/* user's channel function */
-	void *key;			/* pointer to user's key */
+	xpc_channel_func func;	/* user's channel function */
+	void *key;		/* pointer to user's key */
 
 	struct mutex msg_to_pull_mutex;	/* next msg to pull serialization */
-	struct completion wdisconnect_wait; /* wait for channel disconnect */
+	struct completion wdisconnect_wait;    /* wait for channel disconnect */
 
 	struct xpc_openclose_args *local_openclose_args; /* args passed on */
-					/* opening or closing of channel */
+					     /* opening or closing of channel */
 
 	/* various flavors of local and remote Get/Put values */
 
@@ -477,56 +457,48 @@
 	struct xpc_gp remote_GP;	/* remote Get/Put values */
 	struct xpc_gp w_local_GP;	/* working local Get/Put values */
 	struct xpc_gp w_remote_GP;	/* working remote Get/Put values */
-	s64 next_msg_to_pull;		/* Put value of next msg to pull */
+	s64 next_msg_to_pull;	/* Put value of next msg to pull */
 
 	/* kthread management related fields */
 
-// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
-// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
-// >>> dependent on activity over the last interval of time
 	atomic_t kthreads_assigned;	/* #of kthreads assigned to channel */
-	u32 kthreads_assigned_limit; 	/* limit on #of kthreads assigned */
-	atomic_t kthreads_idle;		/* #of kthreads idle waiting for work */
+	u32 kthreads_assigned_limit;	/* limit on #of kthreads assigned */
+	atomic_t kthreads_idle;	/* #of kthreads idle waiting for work */
 	u32 kthreads_idle_limit;	/* limit on #of kthreads idle */
 	atomic_t kthreads_active;	/* #of kthreads actively working */
-	// >>> following field is temporary
-	u32 kthreads_created;		/* total #of kthreads created */
 
 	wait_queue_head_t idle_wq;	/* idle kthread wait queue */
 
 } ____cacheline_aligned;
 
-
 /* struct xpc_channel flags */
 
-#define	XPC_C_WASCONNECTED	0x00000001 /* channel was connected */
+#define	XPC_C_WASCONNECTED	0x00000001	/* channel was connected */
 
-#define	XPC_C_ROPENREPLY	0x00000002 /* remote open channel reply */
-#define	XPC_C_OPENREPLY		0x00000004 /* local open channel reply */
-#define	XPC_C_ROPENREQUEST	0x00000008 /* remote open channel request */
-#define	XPC_C_OPENREQUEST	0x00000010 /* local open channel request */
+#define	XPC_C_ROPENREPLY	0x00000002	/* remote open channel reply */
+#define	XPC_C_OPENREPLY		0x00000004	/* local open channel reply */
+#define	XPC_C_ROPENREQUEST	0x00000008     /* remote open channel request */
+#define	XPC_C_OPENREQUEST	0x00000010	/* local open channel request */
 
 #define	XPC_C_SETUP		0x00000020 /* channel's msgqueues are alloc'd */
-#define	XPC_C_CONNECTEDCALLOUT	0x00000040 /* connected callout initiated */
+#define	XPC_C_CONNECTEDCALLOUT	0x00000040     /* connected callout initiated */
 #define	XPC_C_CONNECTEDCALLOUT_MADE \
-				0x00000080 /* connected callout completed */
-#define	XPC_C_CONNECTED		0x00000100 /* local channel is connected */
-#define	XPC_C_CONNECTING	0x00000200 /* channel is being connected */
+				0x00000080     /* connected callout completed */
+#define	XPC_C_CONNECTED		0x00000100	/* local channel is connected */
+#define	XPC_C_CONNECTING	0x00000200	/* channel is being connected */
 
-#define	XPC_C_RCLOSEREPLY	0x00000400 /* remote close channel reply */
-#define	XPC_C_CLOSEREPLY	0x00000800 /* local close channel reply */
-#define	XPC_C_RCLOSEREQUEST	0x00001000 /* remote close channel request */
-#define	XPC_C_CLOSEREQUEST	0x00002000 /* local close channel request */
+#define	XPC_C_RCLOSEREPLY	0x00000400	/* remote close channel reply */
+#define	XPC_C_CLOSEREPLY	0x00000800	/* local close channel reply */
+#define	XPC_C_RCLOSEREQUEST	0x00001000    /* remote close channel request */
+#define	XPC_C_CLOSEREQUEST	0x00002000     /* local close channel request */
 
-#define	XPC_C_DISCONNECTED	0x00004000 /* channel is disconnected */
-#define	XPC_C_DISCONNECTING	0x00008000 /* channel is being disconnected */
+#define	XPC_C_DISCONNECTED	0x00004000	/* channel is disconnected */
+#define	XPC_C_DISCONNECTING	0x00008000   /* channel is being disconnected */
 #define	XPC_C_DISCONNECTINGCALLOUT \
 				0x00010000 /* disconnecting callout initiated */
 #define	XPC_C_DISCONNECTINGCALLOUT_MADE \
 				0x00020000 /* disconnecting callout completed */
-#define	XPC_C_WDISCONNECT	0x00040000 /* waiting for channel disconnect */
-
-
+#define	XPC_C_WDISCONNECT	0x00040000  /* waiting for channel disconnect */
 
 /*
  * Manages channels on a partition basis. There is one of these structures
@@ -537,33 +509,31 @@
 
 	/* XPC HB infrastructure */
 
-	u8 remote_rp_version;		/* version# of partition's rsvd pg */
-	struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
-	u64 remote_rp_pa;		/* phys addr of partition's rsvd pg */
-	u64 remote_vars_pa;		/* phys addr of partition's vars */
+	u8 remote_rp_version;	/* version# of partition's rsvd pg */
+	struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
+	u64 remote_rp_pa;	/* phys addr of partition's rsvd pg */
+	u64 remote_vars_pa;	/* phys addr of partition's vars */
 	u64 remote_vars_part_pa;	/* phys addr of partition's vars part */
-	u64 last_heartbeat;		/* HB at last read */
+	u64 last_heartbeat;	/* HB at last read */
 	u64 remote_amos_page_pa;	/* phys addr of partition's amos page */
-	int remote_act_nasid;		/* active part's act/deact nasid */
+	int remote_act_nasid;	/* active part's act/deact nasid */
 	int remote_act_phys_cpuid;	/* active part's act/deact phys cpuid */
-	u32 act_IRQ_rcvd;		/* IRQs since activation */
-	spinlock_t act_lock;		/* protect updating of act_state */
-	u8 act_state;			/* from XPC HB viewpoint */
-	u8 remote_vars_version;		/* version# of partition's vars */
-	enum xpc_retval reason;		/* reason partition is deactivating */
-	int reason_line;		/* line# deactivation initiated from */
-	int reactivate_nasid;		/* nasid in partition to reactivate */
+	u32 act_IRQ_rcvd;	/* IRQs since activation */
+	spinlock_t act_lock;	/* protect updating of act_state */
+	u8 act_state;		/* from XPC HB viewpoint */
+	u8 remote_vars_version;	/* version# of partition's vars */
+	enum xpc_retval reason;	/* reason partition is deactivating */
+	int reason_line;	/* line# deactivation initiated from */
+	int reactivate_nasid;	/* nasid in partition to reactivate */
 
-	unsigned long disengage_request_timeout; /* timeout in jiffies */
+	unsigned long disengage_request_timeout;	/* timeout in jiffies */
 	struct timer_list disengage_request_timer;
 
-
 	/* XPC infrastructure referencing and teardown control */
 
-	volatile u8 setup_state;	/* infrastructure setup state */
+	u8 setup_state;		/* infrastructure setup state */
 	wait_queue_head_t teardown_wq;	/* kthread waiting to teardown infra */
-	atomic_t references;		/* #of references to infrastructure */
-
+	atomic_t references;	/* #of references to infrastructure */
 
 	/*
 	 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
@@ -572,53 +542,48 @@
 	 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
 	 */
 
+	u8 nchannels;		/* #of defined channels supported */
+	atomic_t nchannels_active;  /* #of channels that are not DISCONNECTED */
+	atomic_t nchannels_engaged;  /* #of channels engaged with remote part */
+	struct xpc_channel *channels;	/* array of channel structures */
 
-	u8 nchannels;		   /* #of defined channels supported */
-	atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
-	atomic_t nchannels_engaged;/* #of channels engaged with remote part */
-	struct xpc_channel *channels;/* array of channel structures */
-
-	void *local_GPs_base;	  /* base address of kmalloc'd space */
-	struct xpc_gp *local_GPs; /* local Get/Put values */
-	void *remote_GPs_base;    /* base address of kmalloc'd space */
-	struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
-				  /* values */
-	u64 remote_GPs_pa;	  /* phys address of remote partition's local */
-				  /* Get/Put values */
-
+	void *local_GPs_base;	/* base address of kmalloc'd space */
+	struct xpc_gp *local_GPs;	/* local Get/Put values */
+	void *remote_GPs_base;	/* base address of kmalloc'd space */
+	struct xpc_gp *remote_GPs;	/* copy of remote partition's local */
+					/* Get/Put values */
+	u64 remote_GPs_pa;	/* phys address of remote partition's local */
+				/* Get/Put values */
 
 	/* fields used to pass args when opening or closing a channel */
 
-	void *local_openclose_args_base;  /* base address of kmalloc'd space */
-	struct xpc_openclose_args *local_openclose_args;  /* local's args */
-	void *remote_openclose_args_base; /* base address of kmalloc'd space */
+	void *local_openclose_args_base;   /* base address of kmalloc'd space */
+	struct xpc_openclose_args *local_openclose_args;      /* local's args */
+	void *remote_openclose_args_base;  /* base address of kmalloc'd space */
 	struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
-					  /* args */
-	u64 remote_openclose_args_pa;	  /* phys addr of remote's args */
-
+							  /* args */
+	u64 remote_openclose_args_pa;	/* phys addr of remote's args */
 
 	/* IPI sending, receiving and handling related fields */
 
-	int remote_IPI_nasid;	    /* nasid of where to send IPIs */
-	int remote_IPI_phys_cpuid;  /* phys CPU ID of where to send IPIs */
-	AMO_t *remote_IPI_amo_va;   /* address of remote IPI AMO_t structure */
+	int remote_IPI_nasid;	/* nasid of where to send IPIs */
+	int remote_IPI_phys_cpuid;	/* phys CPU ID of where to send IPIs */
+	AMO_t *remote_IPI_amo_va;    /* address of remote IPI AMO_t structure */
 
-	AMO_t *local_IPI_amo_va;    /* address of IPI AMO_t structure */
-	u64 local_IPI_amo;	    /* IPI amo flags yet to be handled */
-	char IPI_owner[8];	    /* IPI owner's name */
-	struct timer_list dropped_IPI_timer; /* dropped IPI timer */
+	AMO_t *local_IPI_amo_va;	/* address of IPI AMO_t structure */
+	u64 local_IPI_amo;	/* IPI amo flags yet to be handled */
+	char IPI_owner[8];	/* IPI owner's name */
+	struct timer_list dropped_IPI_timer;	/* dropped IPI timer */
 
-	spinlock_t IPI_lock;	    /* IPI handler lock */
-
+	spinlock_t IPI_lock;	/* IPI handler lock */
 
 	/* channel manager related fields */
 
 	atomic_t channel_mgr_requests;	/* #of requests to activate chan mgr */
-	wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+	wait_queue_head_t channel_mgr_wq;	/* channel mgr's wait queue */
 
 } ____cacheline_aligned;
 
-
 /* struct xpc_partition act_state values (for XPC HB) */
 
 #define	XPC_P_INACTIVE		0x00	/* partition is not active */
@@ -627,11 +592,9 @@
 #define XPC_P_ACTIVE		0x03	/* xpc_partition_up() was called */
 #define XPC_P_DEACTIVATING	0x04	/* partition deactivation initiated */
 
-
 #define XPC_DEACTIVATE_PARTITION(_p, _reason) \
 			xpc_deactivate_partition(__LINE__, (_p), (_reason))
 
-
 /* struct xpc_partition setup_state values */
 
 #define XPC_P_UNSET		0x00	/* infrastructure was never setup */
@@ -639,8 +602,6 @@
 #define XPC_P_WTEARDOWN		0x02	/* waiting to teardown infrastructure */
 #define XPC_P_TORNDOWN		0x03	/* infrastructure is torndown */
 
-
-
 /*
  * struct xpc_partition IPI_timer #of seconds to wait before checking for
  * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -648,22 +609,17 @@
  */
 #define XPC_P_DROPPED_IPI_WAIT	(0.25 * HZ)
 
-
 /* number of seconds to wait for other partitions to disengage */
 #define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT	90
 
 /* interval in seconds to print 'waiting disengagement' messages */
 #define XPC_DISENGAGE_PRINTMSG_INTERVAL		10
 
-
 #define XPC_PARTID(_p)	((partid_t) ((_p) - &xpc_partitions[0]))
 
-
-
 /* found in xp_main.c */
 extern struct xpc_registration xpc_registrations[];
 
-
 /* found in xpc_main.c */
 extern struct device *xpc_part;
 extern struct device *xpc_chan;
@@ -676,7 +632,6 @@
 extern void xpc_create_kthreads(struct xpc_channel *, int, int);
 extern void xpc_disconnect_wait(int);
 
-
 /* found in xpc_partition.c */
 extern int xpc_exiting;
 extern struct xpc_vars *xpc_vars;
@@ -696,10 +651,9 @@
 extern void xpc_discovery(void);
 extern void xpc_check_remote_hb(void);
 extern void xpc_deactivate_partition(const int, struct xpc_partition *,
-						enum xpc_retval);
+				     enum xpc_retval);
 extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
 
-
 /* found in xpc_channel.c */
 extern void xpc_initiate_connect(int);
 extern void xpc_initiate_disconnect(int);
@@ -714,23 +668,18 @@
 extern void xpc_connected_callout(struct xpc_channel *);
 extern void xpc_deliver_msg(struct xpc_channel *);
 extern void xpc_disconnect_channel(const int, struct xpc_channel *,
-					enum xpc_retval, unsigned long *);
+				   enum xpc_retval, unsigned long *);
 extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
 extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
 extern void xpc_teardown_infrastructure(struct xpc_partition *);
 
-
-
 static inline void
 xpc_wakeup_channel_mgr(struct xpc_partition *part)
 {
-	if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
+	if (atomic_inc_return(&part->channel_mgr_requests) == 1)
 		wake_up(&part->channel_mgr_wq);
-	}
 }
 
-
-
 /*
  * These next two inlines are used to keep us from tearing down a channel's
  * msg queues while a thread may be referencing them.
@@ -747,17 +696,13 @@
 	s32 refs = atomic_dec_return(&ch->references);
 
 	DBUG_ON(refs < 0);
-	if (refs == 0) {
+	if (refs == 0)
 		xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
-	}
 }
 
-
-
 #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
 		xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
 
-
 /*
  * These two inlines are used to keep us from tearing down a partition's
  * setup infrastructure while a thread may be referencing it.
@@ -767,11 +712,9 @@
 {
 	s32 refs = atomic_dec_return(&part->references);
 
-
 	DBUG_ON(refs < 0);
-	if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
+	if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
 		wake_up(&part->teardown_wq);
-	}
 }
 
 static inline int
@@ -779,17 +722,14 @@
 {
 	int setup;
 
-
 	atomic_inc(&part->references);
 	setup = (part->setup_state == XPC_P_SETUP);
-	if (!setup) {
+	if (!setup)
 		xpc_part_deref(part);
-	}
+
 	return setup;
 }
 
-
-
 /*
  * The following macro is to be used for the setting of the reason and
  * reason_line fields in both the struct xpc_channel and struct xpc_partition
@@ -801,8 +741,6 @@
 		(_p)->reason_line = _line; \
 	}
 
-
-
 /*
  * This next set of inlines are used to keep track of when a partition is
  * potentially engaged in accessing memory belonging to another partition.
@@ -812,23 +750,24 @@
 xpc_mark_partition_engaged(struct xpc_partition *part)
 {
 	unsigned long irq_flags;
-	AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-				(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
+	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+				   (XPC_ENGAGED_PARTITIONS_AMO *
+				    sizeof(AMO_t)));
 
 	local_irq_save(irq_flags);
 
 	/* set bit corresponding to our partid in remote partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-						(1UL << sn_partition_id));
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+			 (1UL << sn_partition_id));
 	/*
 	 * We must always use the nofault function regardless of whether we
 	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
 	 * didn't, we'd never know that the other partition is down and would
 	 * keep sending IPIs and AMOs to it until the heartbeat times out.
 	 */
-	(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-				variable), xp_nofault_PIOR_target));
+	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+							       variable),
+						     xp_nofault_PIOR_target));
 
 	local_irq_restore(irq_flags);
 }
@@ -837,23 +776,24 @@
 xpc_mark_partition_disengaged(struct xpc_partition *part)
 {
 	unsigned long irq_flags;
-	AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-				(XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
+	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+				   (XPC_ENGAGED_PARTITIONS_AMO *
+				    sizeof(AMO_t)));
 
 	local_irq_save(irq_flags);
 
 	/* clear bit corresponding to our partid in remote partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-						~(1UL << sn_partition_id));
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+			 ~(1UL << sn_partition_id));
 	/*
 	 * We must always use the nofault function regardless of whether we
 	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
 	 * didn't, we'd never know that the other partition is down and would
 	 * keep sending IPIs and AMOs to it until the heartbeat times out.
 	 */
-	(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-				variable), xp_nofault_PIOR_target));
+	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+							       variable),
+						     xp_nofault_PIOR_target));
 
 	local_irq_restore(irq_flags);
 }
@@ -862,23 +802,23 @@
 xpc_request_partition_disengage(struct xpc_partition *part)
 {
 	unsigned long irq_flags;
-	AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-				(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
+	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+				   (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
 
 	local_irq_save(irq_flags);
 
 	/* set bit corresponding to our partid in remote partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-						(1UL << sn_partition_id));
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
+			 (1UL << sn_partition_id));
 	/*
 	 * We must always use the nofault function regardless of whether we
 	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
 	 * didn't, we'd never know that the other partition is down and would
 	 * keep sending IPIs and AMOs to it until the heartbeat times out.
 	 */
-	(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-				variable), xp_nofault_PIOR_target));
+	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+							       variable),
+						     xp_nofault_PIOR_target));
 
 	local_irq_restore(irq_flags);
 }
@@ -887,23 +827,23 @@
 xpc_cancel_partition_disengage_request(struct xpc_partition *part)
 {
 	unsigned long irq_flags;
-	AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-				(XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
+	AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
+				   (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
 
 	local_irq_save(irq_flags);
 
 	/* clear bit corresponding to our partid in remote partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-						~(1UL << sn_partition_id));
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+			 ~(1UL << sn_partition_id));
 	/*
 	 * We must always use the nofault function regardless of whether we
 	 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
 	 * didn't, we'd never know that the other partition is down and would
 	 * keep sending IPIs and AMOs to it until the heartbeat times out.
 	 */
-	(void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-				variable), xp_nofault_PIOR_target));
+	(void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
+							       variable),
+						     xp_nofault_PIOR_target));
 
 	local_irq_restore(irq_flags);
 }
@@ -913,10 +853,9 @@
 {
 	AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
 
-
 	/* return our partition's AMO variable ANDed with partid_mask */
-	return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-								partid_mask);
+	return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+		partid_mask);
 }
 
 static inline u64
@@ -924,10 +863,9 @@
 {
 	AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
 
-
 	/* return our partition's AMO variable ANDed with partid_mask */
-	return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-								partid_mask);
+	return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
+		partid_mask);
 }
 
 static inline void
@@ -935,10 +873,9 @@
 {
 	AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
 
-
 	/* clear bit(s) based on partid_mask in our partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-								~partid_mask);
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+			 ~partid_mask);
 }
 
 static inline void
@@ -946,14 +883,11 @@
 {
 	AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
 
-
 	/* clear bit(s) based on partid_mask in our partition's AMO */
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-								~partid_mask);
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
+			 ~partid_mask);
 }
 
-
-
 /*
  * The following set of macros and inlines are used for the sending and
  * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -964,20 +898,18 @@
 static inline u64
 xpc_IPI_receive(AMO_t *amo)
 {
-	return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
+	return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
 }
 
-
 static inline enum xpc_retval
 xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
 {
 	int ret = 0;
 	unsigned long irq_flags;
 
-
 	local_irq_save(irq_flags);
 
-	FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
+	FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
 	sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
 
 	/*
@@ -986,15 +918,14 @@
 	 * didn't, we'd never know that the other partition is down and would
 	 * keep sending IPIs and AMOs to it until the heartbeat times out.
 	 */
-	ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
-				xp_nofault_PIOR_target));
+	ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
+						     xp_nofault_PIOR_target));
 
 	local_irq_restore(irq_flags);
 
 	return ((ret == 0) ? xpcSuccess : xpcPioReadError);
 }
 
-
 /*
  * IPIs associated with SGI_XPC_ACTIVATE IRQ.
  */
@@ -1004,47 +935,47 @@
  */
 static inline void
 xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
-			int to_phys_cpuid)
+		      int to_phys_cpuid)
 {
 	int w_index = XPC_NASID_W_INDEX(from_nasid);
 	int b_index = XPC_NASID_B_INDEX(from_nasid);
-	AMO_t *amos = (AMO_t *) __va(amos_page_pa +
-				(XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
+	AMO_t *amos = (AMO_t *)__va(amos_page_pa +
+				    (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
 
-
-	(void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
-				to_phys_cpuid, SGI_XPC_ACTIVATE);
+	(void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
+			   to_phys_cpuid, SGI_XPC_ACTIVATE);
 }
 
 static inline void
 xpc_IPI_send_activate(struct xpc_vars *vars)
 {
 	xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
-				vars->act_nasid, vars->act_phys_cpuid);
+			      vars->act_nasid, vars->act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_activated(struct xpc_partition *part)
 {
 	xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-			part->remote_act_nasid, part->remote_act_phys_cpuid);
+			      part->remote_act_nasid,
+			      part->remote_act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_reactivate(struct xpc_partition *part)
 {
 	xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
-				xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
+			      xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
 }
 
 static inline void
 xpc_IPI_send_disengage(struct xpc_partition *part)
 {
 	xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-			part->remote_act_nasid, part->remote_act_phys_cpuid);
+			      part->remote_act_nasid,
+			      part->remote_act_phys_cpuid);
 }
 
-
 /*
  * IPIs associated with SGI_XPC_NOTIFY IRQ.
  */
@@ -1058,33 +989,28 @@
 
 static inline void
 xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
-			unsigned long *irq_flags)
+		    unsigned long *irq_flags)
 {
 	struct xpc_partition *part = &xpc_partitions[ch->partid];
 	enum xpc_retval ret;
 
-
 	if (likely(part->act_state != XPC_P_DEACTIVATING)) {
 		ret = xpc_IPI_send(part->remote_IPI_amo_va,
-					(u64) ipi_flag << (ch->number * 8),
-					part->remote_IPI_nasid,
-					part->remote_IPI_phys_cpuid,
-					SGI_XPC_NOTIFY);
+				   (u64)ipi_flag << (ch->number * 8),
+				   part->remote_IPI_nasid,
+				   part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
 		dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
 			ipi_flag_string, ch->partid, ch->number, ret);
 		if (unlikely(ret != xpcSuccess)) {
-			if (irq_flags != NULL) {
+			if (irq_flags != NULL)
 				spin_unlock_irqrestore(&ch->lock, *irq_flags);
-			}
 			XPC_DEACTIVATE_PARTITION(part, ret);
-			if (irq_flags != NULL) {
+			if (irq_flags != NULL)
 				spin_lock_irqsave(&ch->lock, *irq_flags);
-			}
 		}
 	}
 }
 
-
 /*
  * Make it look like the remote partition, which is associated with the
  * specified channel, sent us an IPI. This faked IPI will be handled
@@ -1095,18 +1021,16 @@
 
 static inline void
 xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
-				char *ipi_flag_string)
+			  char *ipi_flag_string)
 {
 	struct xpc_partition *part = &xpc_partitions[ch->partid];
 
-
-	FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
-			FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
+	FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
+			 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
 	dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
 		ipi_flag_string, ch->partid, ch->number);
 }
 
-
 /*
  * The sending and receiving of IPIs includes the setting of an AMO variable
  * to indicate the reason the IPI was sent. The 64-bit variable is divided
@@ -1121,21 +1045,18 @@
 #define	XPC_IPI_OPENREPLY	0x08
 #define	XPC_IPI_MSGREQUEST	0x10
 
-
 /* given an AMO variable and a channel#, get its associated IPI flags */
 #define XPC_GET_IPI_FLAGS(_amo, _c)	((u8) (((_amo) >> ((_c) * 8)) & 0xff))
 #define XPC_SET_IPI_FLAGS(_amo, _c, _f)	(_amo) |= ((u64) (_f) << ((_c) * 8))
 
-#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
-#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & __IA64_UL_CONST(0x1010101010101010))
-
+#define	XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010UL)
 
 static inline void
 xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
 {
 	struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
 	args->reason = ch->reason;
 
 	XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
@@ -1152,7 +1073,6 @@
 {
 	struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
 	args->msg_size = ch->msg_size;
 	args->local_nentries = ch->local_nentries;
 
@@ -1164,7 +1084,6 @@
 {
 	struct xpc_openclose_args *args = ch->local_openclose_args;
 
-
 	args->remote_nentries = ch->remote_nentries;
 	args->local_nentries = ch->local_nentries;
 	args->local_msgqueue_pa = __pa(ch->local_msgqueue);
@@ -1184,7 +1103,6 @@
 	XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
 }
 
-
 /*
  * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
  * pages are located in the lowest granule. The lowest granule uses 4k pages
@@ -1201,13 +1119,10 @@
 {
 	AMO_t *amo = xpc_vars->amos_page + index;
 
-
-	(void) xpc_IPI_receive(amo);	/* clear AMO variable */
+	(void)xpc_IPI_receive(amo);	/* clear AMO variable */
 	return amo;
 }
 
-
-
 static inline enum xpc_retval
 xpc_map_bte_errors(bte_result_t error)
 {
@@ -1220,22 +1135,31 @@
 		return xpcBteUnmappedError;
 	}
 	switch (error) {
-	case BTE_SUCCESS:	return xpcSuccess;
-	case BTEFAIL_DIR:	return xpcBteDirectoryError;
-	case BTEFAIL_POISON:	return xpcBtePoisonError;
-	case BTEFAIL_WERR:	return xpcBteWriteError;
-	case BTEFAIL_ACCESS:	return xpcBteAccessError;
-	case BTEFAIL_PWERR:	return xpcBtePWriteError;
-	case BTEFAIL_PRERR:	return xpcBtePReadError;
-	case BTEFAIL_TOUT:	return xpcBteTimeOutError;
-	case BTEFAIL_XTERR:	return xpcBteXtalkError;
-	case BTEFAIL_NOTAVAIL:	return xpcBteNotAvailable;
-	default:		return xpcBteUnmappedError;
+	case BTE_SUCCESS:
+		return xpcSuccess;
+	case BTEFAIL_DIR:
+		return xpcBteDirectoryError;
+	case BTEFAIL_POISON:
+		return xpcBtePoisonError;
+	case BTEFAIL_WERR:
+		return xpcBteWriteError;
+	case BTEFAIL_ACCESS:
+		return xpcBteAccessError;
+	case BTEFAIL_PWERR:
+		return xpcBtePWriteError;
+	case BTEFAIL_PRERR:
+		return xpcBtePReadError;
+	case BTEFAIL_TOUT:
+		return xpcBteTimeOutError;
+	case BTEFAIL_XTERR:
+		return xpcBteXtalkError;
+	case BTEFAIL_NOTAVAIL:
+		return xpcBteNotAvailable;
+	default:
+		return xpcBteUnmappedError;
 	}
 }
 
-
-
 /*
  * Check to see if there is any channel activity to/from the specified
  * partition.
@@ -1246,11 +1170,9 @@
 	u64 IPI_amo;
 	unsigned long irq_flags;
 
-
 	IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
-	if (IPI_amo == 0) {
+	if (IPI_amo == 0)
 		return;
-	}
 
 	spin_lock_irqsave(&part->IPI_lock, irq_flags);
 	part->local_IPI_amo |= IPI_amo;
@@ -1262,6 +1184,4 @@
 	xpc_wakeup_channel_mgr(part);
 }
 
-
-#endif /* _ASM_IA64_SN_XPC_H */
-
+#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
similarity index 86%
rename from arch/ia64/sn/kernel/xpc_channel.c
rename to drivers/misc/sgi-xp/xpc_channel.c
index 44ccc0d..bfcb9ea 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) channel support.
  *
@@ -15,7 +14,6 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -25,8 +23,7 @@
 #include <linux/completion.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /*
  * Guarantee that the kzalloc'd memory is cacheline aligned.
@@ -36,22 +33,21 @@
 {
 	/* see if kzalloc will give us cachline aligned memory by default */
 	*base = kzalloc(size, flags);
-	if (*base == NULL) {
+	if (*base == NULL)
 		return NULL;
-	}
-	if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+
+	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
 		return *base;
-	}
+
 	kfree(*base);
 
 	/* nope, we'll have to do it ourselves */
 	*base = kzalloc(size + L1_CACHE_BYTES, flags);
-	if (*base == NULL) {
+	if (*base == NULL)
 		return NULL;
-	}
-	return (void *) L1_CACHE_ALIGN((u64) *base);
-}
 
+	return (void *)L1_CACHE_ALIGN((u64)*base);
+}
 
 /*
  * Set up the initial values for the XPartition Communication channels.
@@ -62,7 +58,6 @@
 	int ch_number;
 	struct xpc_channel *ch;
 
-
 	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
 		ch = &part->channels[ch_number];
 
@@ -72,7 +67,7 @@
 
 		ch->local_GP = &part->local_GPs[ch_number];
 		ch->local_openclose_args =
-					&part->local_openclose_args[ch_number];
+		    &part->local_openclose_args[ch_number];
 
 		atomic_set(&ch->kthreads_assigned, 0);
 		atomic_set(&ch->kthreads_idle, 0);
@@ -91,7 +86,6 @@
 	}
 }
 
-
 /*
  * Setup the infrastructure necessary to support XPartition Communication
  * between the specified remote partition and the local one.
@@ -103,7 +97,6 @@
 	struct timer_list *timer;
 	partid_t partid = XPC_PARTID(part);
 
-
 	/*
 	 * Zero out MOST of the entry for this partition. Only the fields
 	 * starting with `nchannels' will be zeroed. The preceding fields must
@@ -111,14 +104,14 @@
 	 * referenced during this memset() operation.
 	 */
 	memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
-				offsetof(struct xpc_partition, nchannels));
+	       offsetof(struct xpc_partition, nchannels));
 
 	/*
 	 * Allocate all of the channel structures as a contiguous chunk of
 	 * memory.
 	 */
 	part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
-								GFP_KERNEL);
+				 GFP_KERNEL);
 	if (part->channels == NULL) {
 		dev_err(xpc_chan, "can't get memory for channels\n");
 		return xpcNoMemory;
@@ -126,11 +119,11 @@
 
 	part->nchannels = XPC_NCHANNELS;
 
-
 	/* allocate all the required GET/PUT values */
 
 	part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
-					GFP_KERNEL, &part->local_GPs_base);
+							GFP_KERNEL,
+							&part->local_GPs_base);
 	if (part->local_GPs == NULL) {
 		kfree(part->channels);
 		part->channels = NULL;
@@ -140,7 +133,9 @@
 	}
 
 	part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
-					GFP_KERNEL, &part->remote_GPs_base);
+							 GFP_KERNEL,
+							 &part->
+							 remote_GPs_base);
 	if (part->remote_GPs == NULL) {
 		dev_err(xpc_chan, "can't get memory for remote get/put "
 			"values\n");
@@ -151,12 +146,11 @@
 		return xpcNoMemory;
 	}
 
-
 	/* allocate all the required open and close args */
 
-	part->local_openclose_args = xpc_kzalloc_cacheline_aligned(
-					XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
-					&part->local_openclose_args_base);
+	part->local_openclose_args =
+	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+					  &part->local_openclose_args_base);
 	if (part->local_openclose_args == NULL) {
 		dev_err(xpc_chan, "can't get memory for local connect args\n");
 		kfree(part->remote_GPs_base);
@@ -168,9 +162,9 @@
 		return xpcNoMemory;
 	}
 
-	part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
-					XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
-					&part->remote_openclose_args_base);
+	part->remote_openclose_args =
+	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
+					  &part->remote_openclose_args_base);
 	if (part->remote_openclose_args == NULL) {
 		dev_err(xpc_chan, "can't get memory for remote connect args\n");
 		kfree(part->local_openclose_args_base);
@@ -184,13 +178,11 @@
 		return xpcNoMemory;
 	}
 
-
 	xpc_initialize_channels(part, partid);
 
 	atomic_set(&part->nchannels_active, 0);
 	atomic_set(&part->nchannels_engaged, 0);
 
-
 	/* local_IPI_amo were set to 0 by an earlier memset() */
 
 	/* Initialize this partitions AMO_t structure */
@@ -203,7 +195,7 @@
 
 	sprintf(part->IPI_owner, "xpc%02d", partid);
 	ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
-				part->IPI_owner, (void *) (u64) partid);
+			  part->IPI_owner, (void *)(u64)partid);
 	if (ret != 0) {
 		dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
 			"errno=%d\n", -ret);
@@ -223,8 +215,8 @@
 	/* Setup a timer to check for dropped IPIs */
 	timer = &part->dropped_IPI_timer;
 	init_timer(timer);
-	timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
-	timer->data = (unsigned long) part;
+	timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
+	timer->data = (unsigned long)part;
 	timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
 	add_timer(timer);
 
@@ -234,7 +226,6 @@
 	 */
 	part->setup_state = XPC_P_SETUP;
 
-
 	/*
 	 * Setup the per partition specific variables required by the
 	 * remote partition to establish channel connections with us.
@@ -244,7 +235,7 @@
 	 */
 	xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
 	xpc_vars_part[partid].openclose_args_pa =
-					__pa(part->local_openclose_args);
+	    __pa(part->local_openclose_args);
 	xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
 	cpuid = raw_smp_processor_id();	/* any CPU in this partition will do */
 	xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
@@ -255,7 +246,6 @@
 	return xpcSuccess;
 }
 
-
 /*
  * Create a wrapper that hides the underlying mechanism for pulling a cacheline
  * (or multiple cachelines) from a remote partition.
@@ -266,24 +256,21 @@
  */
 static enum xpc_retval
 xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
-				const void *src, size_t cnt)
+			   const void *src, size_t cnt)
 {
 	bte_result_t bte_ret;
 
-
-	DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
-	DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
+	DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
+	DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
 	DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
 
-	if (part->act_state == XPC_P_DEACTIVATING) {
+	if (part->act_state == XPC_P_DEACTIVATING)
 		return part->reason;
-	}
 
-	bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt,
-					(BTE_NORMAL | BTE_WACQUIRE), NULL);
-	if (bte_ret == BTE_SUCCESS) {
+	bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
+			      (BTE_NORMAL | BTE_WACQUIRE), NULL);
+	if (bte_ret == BTE_SUCCESS)
 		return xpcSuccess;
-	}
 
 	dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
 		XPC_PARTID(part), bte_ret);
@@ -291,7 +278,6 @@
 	return xpc_map_bte_errors(bte_ret);
 }
 
-
 /*
  * Pull the remote per partition specific variables from the specified
  * partition.
@@ -301,41 +287,40 @@
 {
 	u8 buffer[L1_CACHE_BYTES * 2];
 	struct xpc_vars_part *pulled_entry_cacheline =
-			(struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
+	    (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
 	struct xpc_vars_part *pulled_entry;
 	u64 remote_entry_cacheline_pa, remote_entry_pa;
 	partid_t partid = XPC_PARTID(part);
 	enum xpc_retval ret;
 
-
 	/* pull the cacheline that contains the variables we're interested in */
 
 	DBUG_ON(part->remote_vars_part_pa !=
-				L1_CACHE_ALIGN(part->remote_vars_part_pa));
+		L1_CACHE_ALIGN(part->remote_vars_part_pa));
 	DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
 
 	remote_entry_pa = part->remote_vars_part_pa +
-			sn_partition_id * sizeof(struct xpc_vars_part);
+	    sn_partition_id * sizeof(struct xpc_vars_part);
 
 	remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
 
-	pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
-				(remote_entry_pa & (L1_CACHE_BYTES - 1)));
+	pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
+						(remote_entry_pa &
+						 (L1_CACHE_BYTES - 1)));
 
 	ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
-					(void *) remote_entry_cacheline_pa,
-					L1_CACHE_BYTES);
+					 (void *)remote_entry_cacheline_pa,
+					 L1_CACHE_BYTES);
 	if (ret != xpcSuccess) {
 		dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
 			"partition %d, ret=%d\n", partid, ret);
 		return ret;
 	}
 
-
 	/* see if they've been set up yet */
 
 	if (pulled_entry->magic != XPC_VP_MAGIC1 &&
-				pulled_entry->magic != XPC_VP_MAGIC2) {
+	    pulled_entry->magic != XPC_VP_MAGIC2) {
 
 		if (pulled_entry->magic != 0) {
 			dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +338,8 @@
 		/* validate the variables */
 
 		if (pulled_entry->GPs_pa == 0 ||
-				pulled_entry->openclose_args_pa == 0 ||
-					pulled_entry->IPI_amo_pa == 0) {
+		    pulled_entry->openclose_args_pa == 0 ||
+		    pulled_entry->IPI_amo_pa == 0) {
 
 			dev_err(xpc_chan, "partition %d's XPC vars_part for "
 				"partition %d are not valid\n", partid,
@@ -366,29 +351,26 @@
 
 		part->remote_GPs_pa = pulled_entry->GPs_pa;
 		part->remote_openclose_args_pa =
-					pulled_entry->openclose_args_pa;
+		    pulled_entry->openclose_args_pa;
 		part->remote_IPI_amo_va =
-				      (AMO_t *) __va(pulled_entry->IPI_amo_pa);
+		    (AMO_t *)__va(pulled_entry->IPI_amo_pa);
 		part->remote_IPI_nasid = pulled_entry->IPI_nasid;
 		part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
 
-		if (part->nchannels > pulled_entry->nchannels) {
+		if (part->nchannels > pulled_entry->nchannels)
 			part->nchannels = pulled_entry->nchannels;
-		}
 
 		/* let the other side know that we've pulled their variables */
 
 		xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
 	}
 
-	if (pulled_entry->magic == XPC_VP_MAGIC1) {
+	if (pulled_entry->magic == XPC_VP_MAGIC1)
 		return xpcRetry;
-	}
 
 	return xpcSuccess;
 }
 
-
 /*
  * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
  */
@@ -399,23 +381,23 @@
 	u64 IPI_amo;
 	enum xpc_retval ret;
 
-
 	/*
 	 * See if there are any IPI flags to be handled.
 	 */
 
 	spin_lock_irqsave(&part->IPI_lock, irq_flags);
-	if ((IPI_amo = part->local_IPI_amo) != 0) {
+	IPI_amo = part->local_IPI_amo;
+	if (IPI_amo != 0)
 		part->local_IPI_amo = 0;
-	}
-	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
 
+	spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
 
 	if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
 		ret = xpc_pull_remote_cachelines(part,
-					part->remote_openclose_args,
-					(void *) part->remote_openclose_args_pa,
-					XPC_OPENCLOSE_ARGS_SIZE);
+						 part->remote_openclose_args,
+						 (void *)part->
+						 remote_openclose_args_pa,
+						 XPC_OPENCLOSE_ARGS_SIZE);
 		if (ret != xpcSuccess) {
 			XPC_DEACTIVATE_PARTITION(part, ret);
 
@@ -430,8 +412,8 @@
 
 	if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
 		ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
-						(void *) part->remote_GPs_pa,
-						XPC_GP_SIZE);
+						 (void *)part->remote_GPs_pa,
+						 XPC_GP_SIZE);
 		if (ret != xpcSuccess) {
 			XPC_DEACTIVATE_PARTITION(part, ret);
 
@@ -446,7 +428,6 @@
 	return IPI_amo;
 }
 
-
 /*
  * Allocate the local message queue and the notify queue.
  */
@@ -457,20 +438,14 @@
 	int nentries;
 	size_t nbytes;
 
-
-	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
-	// >>> iterations of the for-loop, bail if set?
-
-	// >>> should we impose a minimum #of entries? like 4 or 8?
 	for (nentries = ch->local_nentries; nentries > 0; nentries--) {
 
 		nbytes = nentries * ch->msg_size;
 		ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
-						GFP_KERNEL,
-						&ch->local_msgqueue_base);
-		if (ch->local_msgqueue == NULL) {
+								   GFP_KERNEL,
+						      &ch->local_msgqueue_base);
+		if (ch->local_msgqueue == NULL)
 			continue;
-		}
 
 		nbytes = nentries * sizeof(struct xpc_notify);
 		ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -497,7 +472,6 @@
 	return xpcNoMemory;
 }
 
-
 /*
  * Allocate the cached remote message queue.
  */
@@ -508,22 +482,16 @@
 	int nentries;
 	size_t nbytes;
 
-
 	DBUG_ON(ch->remote_nentries <= 0);
 
-	// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
-	// >>> iterations of the for-loop, bail if set?
-
-	// >>> should we impose a minimum #of entries? like 4 or 8?
 	for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
 
 		nbytes = nentries * ch->msg_size;
 		ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
-						GFP_KERNEL,
-						&ch->remote_msgqueue_base);
-		if (ch->remote_msgqueue == NULL) {
+								    GFP_KERNEL,
+						     &ch->remote_msgqueue_base);
+		if (ch->remote_msgqueue == NULL)
 			continue;
-		}
 
 		spin_lock_irqsave(&ch->lock, irq_flags);
 		if (nentries < ch->remote_nentries) {
@@ -542,7 +510,6 @@
 	return xpcNoMemory;
 }
 
-
 /*
  * Allocate message queues and other stuff associated with a channel.
  *
@@ -554,14 +521,14 @@
 	unsigned long irq_flags;
 	enum xpc_retval ret;
 
-
 	DBUG_ON(ch->flags & XPC_C_SETUP);
 
-	if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
+	ret = xpc_allocate_local_msgqueue(ch);
+	if (ret != xpcSuccess)
 		return ret;
-	}
 
-	if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
+	ret = xpc_allocate_remote_msgqueue(ch);
+	if (ret != xpcSuccess) {
 		kfree(ch->local_msgqueue_base);
 		ch->local_msgqueue = NULL;
 		kfree(ch->notify_queue);
@@ -576,7 +543,6 @@
 	return xpcSuccess;
 }
 
-
 /*
  * Process a connect message from a remote partition.
  *
@@ -588,11 +554,10 @@
 {
 	enum xpc_retval ret;
 
-
 	DBUG_ON(!spin_is_locked(&ch->lock));
 
 	if (!(ch->flags & XPC_C_OPENREQUEST) ||
-				!(ch->flags & XPC_C_ROPENREQUEST)) {
+	    !(ch->flags & XPC_C_ROPENREQUEST)) {
 		/* nothing more to do for now */
 		return;
 	}
@@ -603,12 +568,11 @@
 		ret = xpc_allocate_msgqueues(ch);
 		spin_lock_irqsave(&ch->lock, *irq_flags);
 
-		if (ret != xpcSuccess) {
+		if (ret != xpcSuccess)
 			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
-		}
-		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
+
+		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
 			return;
-		}
 
 		DBUG_ON(!(ch->flags & XPC_C_SETUP));
 		DBUG_ON(ch->local_msgqueue == NULL);
@@ -620,23 +584,21 @@
 		xpc_IPI_send_openreply(ch, irq_flags);
 	}
 
-	if (!(ch->flags & XPC_C_ROPENREPLY)) {
+	if (!(ch->flags & XPC_C_ROPENREPLY))
 		return;
-	}
 
 	DBUG_ON(ch->remote_msgqueue_pa == 0);
 
 	ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);	/* clear all else */
 
 	dev_info(xpc_chan, "channel %d to partition %d connected\n",
-		ch->number, ch->partid);
+		 ch->number, ch->partid);
 
 	spin_unlock_irqrestore(&ch->lock, *irq_flags);
 	xpc_create_kthreads(ch, 1, 0);
 	spin_lock_irqsave(&ch->lock, *irq_flags);
 }
 
-
 /*
  * Notify those who wanted to be notified upon delivery of their message.
  */
@@ -647,7 +609,6 @@
 	u8 notify_type;
 	s64 get = ch->w_remote_GP.get - 1;
 
-
 	while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
 
 		notify = &ch->notify_queue[get % ch->local_nentries];
@@ -660,8 +621,7 @@
 		 */
 		notify_type = notify->type;
 		if (notify_type == 0 ||
-				cmpxchg(&notify->type, notify_type, 0) !=
-								notify_type) {
+		    cmpxchg(&notify->type, notify_type, 0) != notify_type) {
 			continue;
 		}
 
@@ -672,20 +632,19 @@
 		if (notify->func != NULL) {
 			dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
 				"msg_number=%ld, partid=%d, channel=%d\n",
-				(void *) notify, get, ch->partid, ch->number);
+				(void *)notify, get, ch->partid, ch->number);
 
 			notify->func(reason, ch->partid, ch->number,
-								notify->key);
+				     notify->key);
 
 			dev_dbg(xpc_chan, "notify->func() returned, "
 				"notify=0x%p, msg_number=%ld, partid=%d, "
-				"channel=%d\n", (void *) notify, get,
+				"channel=%d\n", (void *)notify, get,
 				ch->partid, ch->number);
 		}
 	}
 }
 
-
 /*
  * Free up message queues and other stuff that were allocated for the specified
  * channel.
@@ -733,7 +692,6 @@
 	}
 }
 
-
 /*
  * spin_lock_irqsave() is expected to be held on entry.
  */
@@ -743,46 +701,41 @@
 	struct xpc_partition *part = &xpc_partitions[ch->partid];
 	u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
 
-
 	DBUG_ON(!spin_is_locked(&ch->lock));
 
-	if (!(ch->flags & XPC_C_DISCONNECTING)) {
+	if (!(ch->flags & XPC_C_DISCONNECTING))
 		return;
-	}
 
 	DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
 
 	/* make sure all activity has settled down first */
 
 	if (atomic_read(&ch->kthreads_assigned) > 0 ||
-				atomic_read(&ch->references) > 0) {
+	    atomic_read(&ch->references) > 0) {
 		return;
 	}
 	DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-			!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
+		!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
 
 	if (part->act_state == XPC_P_DEACTIVATING) {
 		/* can't proceed until the other side disengages from us */
-		if (xpc_partition_engaged(1UL << ch->partid)) {
+		if (xpc_partition_engaged(1UL << ch->partid))
 			return;
-		}
 
 	} else {
 
 		/* as long as the other side is up do the full protocol */
 
-		if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
+		if (!(ch->flags & XPC_C_RCLOSEREQUEST))
 			return;
-		}
 
 		if (!(ch->flags & XPC_C_CLOSEREPLY)) {
 			ch->flags |= XPC_C_CLOSEREPLY;
 			xpc_IPI_send_closereply(ch, irq_flags);
 		}
 
-		if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
+		if (!(ch->flags & XPC_C_RCLOSEREPLY))
 			return;
-		}
 	}
 
 	/* wake those waiting for notify completion */
@@ -809,7 +762,7 @@
 
 	if (channel_was_connected) {
 		dev_info(xpc_chan, "channel %d to partition %d disconnected, "
-			"reason=%d\n", ch->number, ch->partid, ch->reason);
+			 "reason=%d\n", ch->number, ch->partid, ch->reason);
 	}
 
 	if (ch->flags & XPC_C_WDISCONNECT) {
@@ -820,35 +773,32 @@
 			/* time to take action on any delayed IPI flags */
 			spin_lock(&part->IPI_lock);
 			XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
-							ch->delayed_IPI_flags);
+					  ch->delayed_IPI_flags);
 			spin_unlock(&part->IPI_lock);
 		}
 		ch->delayed_IPI_flags = 0;
 	}
 }
 
-
 /*
  * Process a change in the channel's remote connection state.
  */
 static void
 xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
-				u8 IPI_flags)
+			  u8 IPI_flags)
 {
 	unsigned long irq_flags;
 	struct xpc_openclose_args *args =
-				&part->remote_openclose_args[ch_number];
+	    &part->remote_openclose_args[ch_number];
 	struct xpc_channel *ch = &part->channels[ch_number];
 	enum xpc_retval reason;
 
-
-
 	spin_lock_irqsave(&ch->lock, irq_flags);
 
 again:
 
 	if ((ch->flags & XPC_C_DISCONNECTED) &&
-					(ch->flags & XPC_C_WDISCONNECT)) {
+	    (ch->flags & XPC_C_WDISCONNECT)) {
 		/*
 		 * Delay processing IPI flags until thread waiting disconnect
 		 * has had a chance to see that the channel is disconnected.
@@ -858,7 +808,6 @@
 		return;
 	}
 
-
 	if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
 
 		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -890,13 +839,14 @@
 		if (ch->flags & XPC_C_DISCONNECTED) {
 			if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
 				if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
-					 ch_number) & XPC_IPI_OPENREQUEST)) {
+						       ch_number) &
+				     XPC_IPI_OPENREQUEST)) {
 
 					DBUG_ON(ch->delayed_IPI_flags != 0);
 					spin_lock(&part->IPI_lock);
 					XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-							ch_number,
-							XPC_IPI_CLOSEREQUEST);
+							  ch_number,
+							  XPC_IPI_CLOSEREQUEST);
 					spin_unlock(&part->IPI_lock);
 				}
 				spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -921,11 +871,10 @@
 
 		if (!(ch->flags & XPC_C_DISCONNECTING)) {
 			reason = args->reason;
-			if (reason <= xpcSuccess || reason > xpcUnknownReason) {
+			if (reason <= xpcSuccess || reason > xpcUnknownReason)
 				reason = xpcUnknownReason;
-			} else if (reason == xpcUnregistering) {
+			else if (reason == xpcUnregistering)
 				reason = xpcOtherUnregistering;
-			}
 
 			XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
 
@@ -937,7 +886,6 @@
 		xpc_process_disconnect(ch, &irq_flags);
 	}
 
-
 	if (IPI_flags & XPC_IPI_CLOSEREPLY) {
 
 		dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -953,12 +901,13 @@
 
 		if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
 			if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
-						& XPC_IPI_CLOSEREQUEST)) {
+			     & XPC_IPI_CLOSEREQUEST)) {
 
 				DBUG_ON(ch->delayed_IPI_flags != 0);
 				spin_lock(&part->IPI_lock);
 				XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-						ch_number, XPC_IPI_CLOSEREPLY);
+						  ch_number,
+						  XPC_IPI_CLOSEREPLY);
 				spin_unlock(&part->IPI_lock);
 			}
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -973,7 +922,6 @@
 		}
 	}
 
-
 	if (IPI_flags & XPC_IPI_OPENREQUEST) {
 
 		dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -982,7 +930,7 @@
 			ch->partid, ch->number);
 
 		if (part->act_state == XPC_P_DEACTIVATING ||
-					(ch->flags & XPC_C_ROPENREQUEST)) {
+		    (ch->flags & XPC_C_ROPENREQUEST)) {
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
 			return;
 		}
@@ -993,9 +941,9 @@
 			return;
 		}
 		DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
-							XPC_C_OPENREQUEST)));
+				       XPC_C_OPENREQUEST)));
 		DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
-					XPC_C_OPENREPLY | XPC_C_CONNECTED));
+				     XPC_C_OPENREPLY | XPC_C_CONNECTED));
 
 		/*
 		 * The meaningful OPENREQUEST connection state fields are:
@@ -1011,11 +959,10 @@
 		ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
 		ch->remote_nentries = args->local_nentries;
 
-
 		if (ch->flags & XPC_C_OPENREQUEST) {
 			if (args->msg_size != ch->msg_size) {
 				XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
-								&irq_flags);
+						       &irq_flags);
 				spin_unlock_irqrestore(&ch->lock, irq_flags);
 				return;
 			}
@@ -1031,7 +978,6 @@
 		xpc_process_connect(ch, &irq_flags);
 	}
 
-
 	if (IPI_flags & XPC_IPI_OPENREPLY) {
 
 		dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
@@ -1046,7 +992,7 @@
 		}
 		if (!(ch->flags & XPC_C_OPENREQUEST)) {
 			XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
-								&irq_flags);
+					       &irq_flags);
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
 			return;
 		}
@@ -1057,7 +1003,7 @@
 		/*
 		 * The meaningful OPENREPLY connection state fields are:
 		 *      local_msgqueue_pa = physical address of remote
-		 *			    partition's local_msgqueue
+		 *                          partition's local_msgqueue
 		 *      local_nentries = remote partition's local_nentries
 		 *      remote_nentries = remote partition's remote_nentries
 		 */
@@ -1093,7 +1039,6 @@
 	spin_unlock_irqrestore(&ch->lock, irq_flags);
 }
 
-
 /*
  * Attempt to establish a channel connection to a remote partition.
  */
@@ -1103,10 +1048,8 @@
 	unsigned long irq_flags;
 	struct xpc_registration *registration = &xpc_registrations[ch->number];
 
-
-	if (mutex_trylock(&registration->mutex) == 0) {
+	if (mutex_trylock(&registration->mutex) == 0)
 		return xpcRetry;
-	}
 
 	if (!XPC_CHANNEL_REGISTERED(ch->number)) {
 		mutex_unlock(&registration->mutex);
@@ -1124,7 +1067,6 @@
 		return ch->reason;
 	}
 
-
 	/* add info from the channel connect registration to the channel */
 
 	ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1154,7 +1096,7 @@
 			 */
 			mutex_unlock(&registration->mutex);
 			XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
-								&irq_flags);
+					       &irq_flags);
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
 			return xpcUnequalMsgSizes;
 		}
@@ -1169,7 +1111,6 @@
 
 	mutex_unlock(&registration->mutex);
 
-
 	/* initiate the connection */
 
 	ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1182,7 +1123,6 @@
 	return xpcSuccess;
 }
 
-
 /*
  * Clear some of the msg flags in the local message queue.
  */
@@ -1192,16 +1132,15 @@
 	struct xpc_msg *msg;
 	s64 get;
 
-
 	get = ch->w_remote_GP.get;
 	do {
-		msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-				(get % ch->local_nentries) * ch->msg_size);
+		msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+					 (get % ch->local_nentries) *
+					 ch->msg_size);
 		msg->flags = 0;
-	} while (++get < (volatile s64) ch->remote_GP.get);
+	} while (++get < ch->remote_GP.get);
 }
 
-
 /*
  * Clear some of the msg flags in the remote message queue.
  */
@@ -1211,43 +1150,39 @@
 	struct xpc_msg *msg;
 	s64 put;
 
-
 	put = ch->w_remote_GP.put;
 	do {
-		msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-				(put % ch->remote_nentries) * ch->msg_size);
+		msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+					 (put % ch->remote_nentries) *
+					 ch->msg_size);
 		msg->flags = 0;
-	} while (++put < (volatile s64) ch->remote_GP.put);
+	} while (++put < ch->remote_GP.put);
 }
 
-
 static void
 xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
 {
 	struct xpc_channel *ch = &part->channels[ch_number];
 	int nmsgs_sent;
 
-
 	ch->remote_GP = part->remote_GPs[ch_number];
 
-
 	/* See what, if anything, has changed for each connected channel */
 
 	xpc_msgqueue_ref(ch);
 
 	if (ch->w_remote_GP.get == ch->remote_GP.get &&
-				ch->w_remote_GP.put == ch->remote_GP.put) {
+	    ch->w_remote_GP.put == ch->remote_GP.put) {
 		/* nothing changed since GPs were last pulled */
 		xpc_msgqueue_deref(ch);
 		return;
 	}
 
-	if (!(ch->flags & XPC_C_CONNECTED)){
+	if (!(ch->flags & XPC_C_CONNECTED)) {
 		xpc_msgqueue_deref(ch);
 		return;
 	}
 
-
 	/*
 	 * First check to see if messages recently sent by us have been
 	 * received by the other side. (The remote GET value will have
@@ -1269,7 +1204,7 @@
 			 * received and delivered by the other side.
 			 */
 			xpc_notify_senders(ch, xpcMsgDelivered,
-							ch->remote_GP.get);
+					   ch->remote_GP.get);
 		}
 
 		/*
@@ -1288,12 +1223,10 @@
 		 * If anyone was waiting for message queue entries to become
 		 * available, wake them up.
 		 */
-		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+		if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
 			wake_up(&ch->msg_allocate_wq);
-		}
 	}
 
-
 	/*
 	 * Now check for newly sent messages by the other side. (The remote
 	 * PUT value will have changed since we last looked at it.)
@@ -1318,16 +1251,14 @@
 				"delivered=%d, partid=%d, channel=%d\n",
 				nmsgs_sent, ch->partid, ch->number);
 
-			if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
+			if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
 				xpc_activate_kthreads(ch, nmsgs_sent);
-			}
 		}
 	}
 
 	xpc_msgqueue_deref(ch);
 }
 
-
 void
 xpc_process_channel_activity(struct xpc_partition *part)
 {
@@ -1337,7 +1268,6 @@
 	int ch_number;
 	u32 ch_flags;
 
-
 	IPI_amo = xpc_get_IPI_flags(part);
 
 	/*
@@ -1350,7 +1280,6 @@
 	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
 		ch = &part->channels[ch_number];
 
-
 		/*
 		 * Process any open or close related IPI flags, and then deal
 		 * with connecting or disconnecting the channel as required.
@@ -1358,9 +1287,8 @@
 
 		IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
 
-		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
+		if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
 			xpc_process_openclose_IPI(part, ch_number, IPI_flags);
-		}
 
 		ch_flags = ch->flags;	/* need an atomic snapshot of flags */
 
@@ -1371,14 +1299,13 @@
 			continue;
 		}
 
-		if (part->act_state == XPC_P_DEACTIVATING) {
+		if (part->act_state == XPC_P_DEACTIVATING)
 			continue;
-		}
 
 		if (!(ch_flags & XPC_C_CONNECTED)) {
 			if (!(ch_flags & XPC_C_OPENREQUEST)) {
 				DBUG_ON(ch_flags & XPC_C_SETUP);
-				(void) xpc_connect_channel(ch);
+				(void)xpc_connect_channel(ch);
 			} else {
 				spin_lock_irqsave(&ch->lock, irq_flags);
 				xpc_process_connect(ch, &irq_flags);
@@ -1387,20 +1314,17 @@
 			continue;
 		}
 
-
 		/*
 		 * Process any message related IPI flags, this may involve the
 		 * activation of kthreads to deliver any pending messages sent
 		 * from the other partition.
 		 */
 
-		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
+		if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
 			xpc_process_msg_IPI(part, ch_number);
-		}
 	}
 }
 
-
 /*
  * XPC's heartbeat code calls this function to inform XPC that a partition is
  * going down.  XPC responds by tearing down the XPartition Communication
@@ -1417,7 +1341,6 @@
 	int ch_number;
 	struct xpc_channel *ch;
 
-
 	dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
 		XPC_PARTID(part), reason);
 
@@ -1426,7 +1349,6 @@
 		return;
 	}
 
-
 	/* disconnect channels associated with the partition going down */
 
 	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1446,7 +1368,6 @@
 	xpc_part_deref(part);
 }
 
-
 /*
  * Teardown the infrastructure necessary to support XPartition Communication
  * between the specified remote partition and the local one.
@@ -1456,7 +1377,6 @@
 {
 	partid_t partid = XPC_PARTID(part);
 
-
 	/*
 	 * We start off by making this partition inaccessible to local
 	 * processes by marking it as no longer setup. Then we make it
@@ -1473,9 +1393,7 @@
 
 	xpc_vars_part[partid].magic = 0;
 
-
-	free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
-
+	free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
 
 	/*
 	 * Before proceeding with the teardown we have to wait until all
@@ -1483,7 +1401,6 @@
 	 */
 	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
 
-
 	/* now we can begin tearing down the infrastructure */
 
 	part->setup_state = XPC_P_TORNDOWN;
@@ -1504,7 +1421,6 @@
 	part->local_IPI_amo_va = NULL;
 }
 
-
 /*
  * Called by XP at the time of channel connection registration to cause
  * XPC to establish connections to all currently active partitions.
@@ -1516,7 +1432,6 @@
 	struct xpc_partition *part;
 	struct xpc_channel *ch;
 
-
 	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
 	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
@@ -1535,7 +1450,6 @@
 	}
 }
 
-
 void
 xpc_connected_callout(struct xpc_channel *ch)
 {
@@ -1546,14 +1460,13 @@
 			"partid=%d, channel=%d\n", ch->partid, ch->number);
 
 		ch->func(xpcConnected, ch->partid, ch->number,
-				(void *) (u64) ch->local_nentries, ch->key);
+			 (void *)(u64)ch->local_nentries, ch->key);
 
 		dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
 			"partid=%d, channel=%d\n", ch->partid, ch->number);
 	}
 }
 
-
 /*
  * Called by XP at the time of channel connection unregistration to cause
  * XPC to teardown all current connections for the specified channel.
@@ -1575,7 +1488,6 @@
 	struct xpc_partition *part;
 	struct xpc_channel *ch;
 
-
 	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
 
 	/* initiate the channel disconnect for every active partition */
@@ -1592,7 +1504,7 @@
 				ch->flags |= XPC_C_WDISCONNECT;
 
 				XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
-								&irq_flags);
+						       &irq_flags);
 			}
 
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1605,7 +1517,6 @@
 	xpc_disconnect_wait(ch_number);
 }
 
-
 /*
  * To disconnect a channel, and reflect it back to all who may be waiting.
  *
@@ -1617,16 +1528,15 @@
  */
 void
 xpc_disconnect_channel(const int line, struct xpc_channel *ch,
-			enum xpc_retval reason, unsigned long *irq_flags)
+		       enum xpc_retval reason, unsigned long *irq_flags)
 {
 	u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
 
-
 	DBUG_ON(!spin_is_locked(&ch->lock));
 
-	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
+	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
 		return;
-	}
+
 	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
 
 	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1637,14 +1547,13 @@
 	ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
 	/* some of these may not have been set */
 	ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
-			XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
-			XPC_C_CONNECTING | XPC_C_CONNECTED);
+		       XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
+		       XPC_C_CONNECTING | XPC_C_CONNECTED);
 
 	xpc_IPI_send_closerequest(ch, irq_flags);
 
-	if (channel_was_connected) {
+	if (channel_was_connected)
 		ch->flags |= XPC_C_WASCONNECTED;
-	}
 
 	spin_unlock_irqrestore(&ch->lock, *irq_flags);
 
@@ -1653,20 +1562,18 @@
 		wake_up_all(&ch->idle_wq);
 
 	} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-			!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+		   !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
 		/* start a kthread that will do the xpcDisconnecting callout */
 		xpc_create_kthreads(ch, 1, 1);
 	}
 
 	/* wake those waiting to allocate an entry from the local msg queue */
-	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
+	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
 		wake_up(&ch->msg_allocate_wq);
-	}
 
 	spin_lock_irqsave(&ch->lock, *irq_flags);
 }
 
-
 void
 xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
 {
@@ -1687,7 +1594,6 @@
 	}
 }
 
-
 /*
  * Wait for a message entry to become available for the specified channel,
  * but don't wait any longer than 1 jiffy.
@@ -1697,9 +1603,8 @@
 {
 	enum xpc_retval ret;
 
-
 	if (ch->flags & XPC_C_DISCONNECTING) {
-		DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+		DBUG_ON(ch->reason == xpcInterrupted);
 		return ch->reason;
 	}
 
@@ -1709,7 +1614,7 @@
 
 	if (ch->flags & XPC_C_DISCONNECTING) {
 		ret = ch->reason;
-		DBUG_ON(ch->reason == xpcInterrupted);  // >>> Is this true?
+		DBUG_ON(ch->reason == xpcInterrupted);
 	} else if (ret == 0) {
 		ret = xpcTimeout;
 	} else {
@@ -1719,20 +1624,18 @@
 	return ret;
 }
 
-
 /*
  * Allocate an entry for a message from the message queue associated with the
  * specified channel.
  */
 static enum xpc_retval
 xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
-			struct xpc_msg **address_of_msg)
+		 struct xpc_msg **address_of_msg)
 {
 	struct xpc_msg *msg;
 	enum xpc_retval ret;
 	s64 put;
 
-
 	/* this reference will be dropped in xpc_send_msg() */
 	xpc_msgqueue_ref(ch);
 
@@ -1745,7 +1648,6 @@
 		return xpcNotConnected;
 	}
 
-
 	/*
 	 * Get the next available message entry from the local message queue.
 	 * If none are available, we'll make sure that we grab the latest
@@ -1755,25 +1657,23 @@
 
 	while (1) {
 
-		put = (volatile s64) ch->w_local_GP.put;
-		if (put - (volatile s64) ch->w_remote_GP.get <
-							ch->local_nentries) {
+		put = ch->w_local_GP.put;
+		rmb();	/* guarantee that .put loads before .get */
+		if (put - ch->w_remote_GP.get < ch->local_nentries) {
 
 			/* There are available message entries. We need to try
 			 * to secure one for ourselves. We'll do this by trying
 			 * to increment w_local_GP.put as long as someone else
 			 * doesn't beat us to it. If they do, we'll have to
 			 * try again.
-		 	 */
-			if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
-									put) {
+			 */
+			if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
 				/* we got the entry referenced by put */
 				break;
 			}
 			continue;	/* try again */
 		}
 
-
 		/*
 		 * There aren't any available msg entries at this time.
 		 *
@@ -1783,9 +1683,8 @@
 		 * that will cause the IPI handler to fetch the latest
 		 * GP values as if an IPI was sent by the other side.
 		 */
-		if (ret == xpcTimeout) {
+		if (ret == xpcTimeout)
 			xpc_IPI_send_local_msgrequest(ch);
-		}
 
 		if (flags & XPC_NOWAIT) {
 			xpc_msgqueue_deref(ch);
@@ -1799,25 +1698,22 @@
 		}
 	}
 
-
 	/* get the message's address and initialize it */
-	msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-				(put % ch->local_nentries) * ch->msg_size);
-
+	msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+				 (put % ch->local_nentries) * ch->msg_size);
 
 	DBUG_ON(msg->flags != 0);
 	msg->number = put;
 
 	dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
 		"msg_number=%ld, partid=%d, channel=%d\n", put + 1,
-		(void *) msg, msg->number, ch->partid, ch->number);
+		(void *)msg, msg->number, ch->partid, ch->number);
 
 	*address_of_msg = msg;
 
 	return xpcSuccess;
 }
 
-
 /*
  * Allocate an entry for a message from the message queue associated with the
  * specified channel. NOTE that this routine can sleep waiting for a message
@@ -1838,7 +1734,6 @@
 	enum xpc_retval ret = xpcUnknownReason;
 	struct xpc_msg *msg = NULL;
 
-
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
 
@@ -1848,15 +1743,13 @@
 		ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
 		xpc_part_deref(part);
 
-		if (msg != NULL) {
+		if (msg != NULL)
 			*payload = &msg->payload;
-		}
 	}
 
 	return ret;
 }
 
-
 /*
  * Now we actually send the messages that are ready to be sent by advancing
  * the local message queue's Put value and then send an IPI to the recipient
@@ -1869,20 +1762,18 @@
 	s64 put = initial_put + 1;
 	int send_IPI = 0;
 
-
 	while (1) {
 
 		while (1) {
-			if (put == (volatile s64) ch->w_local_GP.put) {
+			if (put == ch->w_local_GP.put)
 				break;
-			}
 
-			msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
-			       (put % ch->local_nentries) * ch->msg_size);
+			msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
+						 (put % ch->local_nentries) *
+						 ch->msg_size);
 
-			if (!(msg->flags & XPC_M_READY)) {
+			if (!(msg->flags & XPC_M_READY))
 				break;
-			}
 
 			put++;
 		}
@@ -1893,9 +1784,9 @@
 		}
 
 		if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
-								initial_put) {
+		    initial_put) {
 			/* someone else beat us to it */
-			DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
+			DBUG_ON(ch->local_GP->put < initial_put);
 			break;
 		}
 
@@ -1914,12 +1805,10 @@
 		initial_put = put;
 	}
 
-	if (send_IPI) {
+	if (send_IPI)
 		xpc_IPI_send_msgrequest(ch);
-	}
 }
 
-
 /*
  * Common code that does the actual sending of the message by advancing the
  * local message queue's Put value and sends an IPI to the partition the
@@ -1927,16 +1816,15 @@
  */
 static enum xpc_retval
 xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
-			xpc_notify_func func, void *key)
+	     xpc_notify_func func, void *key)
 {
 	enum xpc_retval ret = xpcSuccess;
 	struct xpc_notify *notify = notify;
 	s64 put, msg_number = msg->number;
 
-
 	DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
-	DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
-					msg_number % ch->local_nentries);
+	DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
+		msg_number % ch->local_nentries);
 	DBUG_ON(msg->flags & XPC_M_READY);
 
 	if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1959,7 +1847,7 @@
 		notify->key = key;
 		notify->type = notify_type;
 
-		// >>> is a mb() needed here?
+		/* >>> is a mb() needed here? */
 
 		if (ch->flags & XPC_C_DISCONNECTING) {
 			/*
@@ -1970,7 +1858,7 @@
 			 * the notify entry.
 			 */
 			if (cmpxchg(&notify->type, notify_type, 0) ==
-								notify_type) {
+			    notify_type) {
 				atomic_dec(&ch->n_to_notify);
 				ret = ch->reason;
 			}
@@ -1992,16 +1880,14 @@
 	/* see if the message is next in line to be sent, if so send it */
 
 	put = ch->local_GP->put;
-	if (put == msg_number) {
+	if (put == msg_number)
 		xpc_send_msgs(ch, put);
-	}
 
 	/* drop the reference grabbed in xpc_allocate_msg() */
 	xpc_msgqueue_deref(ch);
 	return ret;
 }
 
-
 /*
  * Send a message previously allocated using xpc_initiate_allocate() on the
  * specified channel connected to the specified partition.
@@ -2029,8 +1915,7 @@
 	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
 	enum xpc_retval ret;
 
-
-	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
 		partid, ch_number);
 
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2042,7 +1927,6 @@
 	return ret;
 }
 
-
 /*
  * Send a message previously allocated using xpc_initiate_allocate on the
  * specified channel connected to the specified partition.
@@ -2075,14 +1959,13 @@
  */
 enum xpc_retval
 xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
-				xpc_notify_func func, void *key)
+			 xpc_notify_func func, void *key)
 {
 	struct xpc_partition *part = &xpc_partitions[partid];
 	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
 	enum xpc_retval ret;
 
-
-	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
+	dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
 		partid, ch_number);
 
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2091,11 +1974,10 @@
 	DBUG_ON(func == NULL);
 
 	ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
-								func, key);
+			   func, key);
 	return ret;
 }
 
-
 static struct xpc_msg *
 xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
 {
@@ -2105,7 +1987,6 @@
 	u64 msg_offset;
 	enum xpc_retval ret;
 
-
 	if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
 		/* we were interrupted by a signal */
 		return NULL;
@@ -2117,23 +1998,21 @@
 
 		msg_index = ch->next_msg_to_pull % ch->remote_nentries;
 
-		DBUG_ON(ch->next_msg_to_pull >=
-					(volatile s64) ch->w_remote_GP.put);
-		nmsgs =  (volatile s64) ch->w_remote_GP.put -
-						ch->next_msg_to_pull;
+		DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
+		nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
 		if (msg_index + nmsgs > ch->remote_nentries) {
 			/* ignore the ones that wrap the msg queue for now */
 			nmsgs = ch->remote_nentries - msg_index;
 		}
 
 		msg_offset = msg_index * ch->msg_size;
-		msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-								msg_offset);
-		remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
-								msg_offset);
+		msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
+		remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
+						msg_offset);
 
-		if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
-				nmsgs * ch->msg_size)) != xpcSuccess) {
+		ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
+						 nmsgs * ch->msg_size);
+		if (ret != xpcSuccess) {
 
 			dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
 				" msg %ld from partition %d, channel=%d, "
@@ -2146,8 +2025,6 @@
 			return NULL;
 		}
 
-		mb();	/* >>> this may not be needed, we're not sure */
-
 		ch->next_msg_to_pull += nmsgs;
 	}
 
@@ -2155,12 +2032,11 @@
 
 	/* return the message we were looking for */
 	msg_offset = (get % ch->remote_nentries) * ch->msg_size;
-	msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
+	msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
 
 	return msg;
 }
 
-
 /*
  * Get a message to be delivered.
  */
@@ -2170,23 +2046,21 @@
 	struct xpc_msg *msg = NULL;
 	s64 get;
 
-
 	do {
-		if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
+		if (ch->flags & XPC_C_DISCONNECTING)
 			break;
-		}
 
-		get = (volatile s64) ch->w_local_GP.get;
-		if (get == (volatile s64) ch->w_remote_GP.put) {
+		get = ch->w_local_GP.get;
+		rmb();	/* guarantee that .get loads before .put */
+		if (get == ch->w_remote_GP.put)
 			break;
-		}
 
 		/* There are messages waiting to be pulled and delivered.
 		 * We need to try to secure one for ourselves. We'll do this
 		 * by trying to increment w_local_GP.get and hope that no one
 		 * else beats us to it. If they do, we'll we'll simply have
 		 * to try again for the next one.
-	 	 */
+		 */
 
 		if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
 			/* we got the entry referenced by get */
@@ -2211,7 +2085,6 @@
 	return msg;
 }
 
-
 /*
  * Deliver a message to its intended recipient.
  */
@@ -2220,8 +2093,8 @@
 {
 	struct xpc_msg *msg;
 
-
-	if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
+	msg = xpc_get_deliverable_msg(ch);
+	if (msg != NULL) {
 
 		/*
 		 * This ref is taken to protect the payload itself from being
@@ -2235,16 +2108,16 @@
 		if (ch->func != NULL) {
 			dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
 				"msg_number=%ld, partid=%d, channel=%d\n",
-				(void *) msg, msg->number, ch->partid,
+				(void *)msg, msg->number, ch->partid,
 				ch->number);
 
 			/* deliver the message to its intended recipient */
 			ch->func(xpcMsgReceived, ch->partid, ch->number,
-					&msg->payload, ch->key);
+				 &msg->payload, ch->key);
 
 			dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
 				"msg_number=%ld, partid=%d, channel=%d\n",
-				(void *) msg, msg->number, ch->partid,
+				(void *)msg, msg->number, ch->partid,
 				ch->number);
 		}
 
@@ -2252,7 +2125,6 @@
 	}
 }
 
-
 /*
  * Now we actually acknowledge the messages that have been delivered and ack'd
  * by advancing the cached remote message queue's Get value and if requested
@@ -2265,20 +2137,18 @@
 	s64 get = initial_get + 1;
 	int send_IPI = 0;
 
-
 	while (1) {
 
 		while (1) {
-			if (get == (volatile s64) ch->w_local_GP.get) {
+			if (get == ch->w_local_GP.get)
 				break;
-			}
 
-			msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
-			       (get % ch->remote_nentries) * ch->msg_size);
+			msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
+						 (get % ch->remote_nentries) *
+						 ch->msg_size);
 
-			if (!(msg->flags & XPC_M_DONE)) {
+			if (!(msg->flags & XPC_M_DONE))
 				break;
-			}
 
 			msg_flags |= msg->flags;
 			get++;
@@ -2290,10 +2160,9 @@
 		}
 
 		if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
-								initial_get) {
+		    initial_get) {
 			/* someone else beat us to it */
-			DBUG_ON((volatile s64) ch->local_GP->get <=
-								initial_get);
+			DBUG_ON(ch->local_GP->get <= initial_get);
 			break;
 		}
 
@@ -2312,12 +2181,10 @@
 		initial_get = get;
 	}
 
-	if (send_IPI) {
+	if (send_IPI)
 		xpc_IPI_send_msgrequest(ch);
-	}
 }
 
-
 /*
  * Acknowledge receipt of a delivered message.
  *
@@ -2343,17 +2210,16 @@
 	struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
 	s64 get, msg_number = msg->number;
 
-
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
 
 	ch = &part->channels[ch_number];
 
 	dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
-		(void *) msg, msg_number, ch->partid, ch->number);
+		(void *)msg, msg_number, ch->partid, ch->number);
 
-	DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
-					msg_number % ch->remote_nentries);
+	DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
+		msg_number % ch->remote_nentries);
 	DBUG_ON(msg->flags & XPC_M_DONE);
 
 	msg->flags |= XPC_M_DONE;
@@ -2369,11 +2235,9 @@
 	 * been delivered.
 	 */
 	get = ch->local_GP->get;
-	if (get == msg_number) {
+	if (get == msg_number)
 		xpc_acknowledge_msgs(ch, get, msg->flags);
-	}
 
 	/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg()  */
 	xpc_msgqueue_deref(ch);
 }
-
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
similarity index 81%
rename from arch/ia64/sn/kernel/xpc_main.c
rename to drivers/misc/sgi-xp/xpc_main.c
index 9e0b164..f673ba9 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2007 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) support - standard version.
  *
@@ -44,23 +43,20 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/completion.h>
 #include <linux/kdebug.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/uaccess.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /* define two XPC debug device structures to be used with dev_dbg() et al */
 
@@ -81,10 +77,8 @@
 struct device *xpc_part = &xpc_part_dbg_subname;
 struct device *xpc_chan = &xpc_chan_dbg_subname;
 
-
 static int xpc_kdebug_ignore;
 
-
 /* systune related variables for /proc/sys directories */
 
 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -96,61 +90,56 @@
 static int xpc_hb_check_max_interval = 120;
 
 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
-static int xpc_disengage_request_min_timelimit = 0;
+static int xpc_disengage_request_min_timelimit;	/* = 0 */
 static int xpc_disengage_request_max_timelimit = 120;
 
 static ctl_table xpc_sys_xpc_hb_dir[] = {
 	{
-		.ctl_name 	= CTL_UNNUMBERED,
-		.procname	= "hb_interval",
-		.data		= &xpc_hb_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &xpc_hb_min_interval,
-		.extra2		= &xpc_hb_max_interval
-	},
+	 .ctl_name = CTL_UNNUMBERED,
+	 .procname = "hb_interval",
+	 .data = &xpc_hb_interval,
+	 .maxlen = sizeof(int),
+	 .mode = 0644,
+	 .proc_handler = &proc_dointvec_minmax,
+	 .strategy = &sysctl_intvec,
+	 .extra1 = &xpc_hb_min_interval,
+	 .extra2 = &xpc_hb_max_interval},
 	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "hb_check_interval",
-		.data		= &xpc_hb_check_interval,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &xpc_hb_check_min_interval,
-		.extra2		= &xpc_hb_check_max_interval
-	},
+	 .ctl_name = CTL_UNNUMBERED,
+	 .procname = "hb_check_interval",
+	 .data = &xpc_hb_check_interval,
+	 .maxlen = sizeof(int),
+	 .mode = 0644,
+	 .proc_handler = &proc_dointvec_minmax,
+	 .strategy = &sysctl_intvec,
+	 .extra1 = &xpc_hb_check_min_interval,
+	 .extra2 = &xpc_hb_check_max_interval},
 	{}
 };
 static ctl_table xpc_sys_xpc_dir[] = {
 	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "hb",
-		.mode		= 0555,
-		.child		= xpc_sys_xpc_hb_dir
-	},
+	 .ctl_name = CTL_UNNUMBERED,
+	 .procname = "hb",
+	 .mode = 0555,
+	 .child = xpc_sys_xpc_hb_dir},
 	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "disengage_request_timelimit",
-		.data		= &xpc_disengage_request_timelimit,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &xpc_disengage_request_min_timelimit,
-		.extra2		= &xpc_disengage_request_max_timelimit
-	},
+	 .ctl_name = CTL_UNNUMBERED,
+	 .procname = "disengage_request_timelimit",
+	 .data = &xpc_disengage_request_timelimit,
+	 .maxlen = sizeof(int),
+	 .mode = 0644,
+	 .proc_handler = &proc_dointvec_minmax,
+	 .strategy = &sysctl_intvec,
+	 .extra1 = &xpc_disengage_request_min_timelimit,
+	 .extra2 = &xpc_disengage_request_max_timelimit},
 	{}
 };
 static ctl_table xpc_sys_dir[] = {
 	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "xpc",
-		.mode		= 0555,
-		.child		= xpc_sys_xpc_dir
-	},
+	 .ctl_name = CTL_UNNUMBERED,
+	 .procname = "xpc",
+	 .mode = 0555,
+	 .child = xpc_sys_xpc_dir},
 	{}
 };
 static struct ctl_table_header *xpc_sysctl;
@@ -172,13 +161,10 @@
 /* notification that the xpc_discovery thread has exited */
 static DECLARE_COMPLETION(xpc_discovery_exited);
 
-
 static struct timer_list xpc_hb_timer;
 
-
 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
 
-
 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
 static struct notifier_block xpc_reboot_notifier = {
 	.notifier_call = xpc_system_reboot,
@@ -189,25 +175,22 @@
 	.notifier_call = xpc_system_die,
 };
 
-
 /*
  * Timer function to enforce the timelimit on the partition disengage request.
  */
 static void
 xpc_timeout_partition_disengage_request(unsigned long data)
 {
-	struct xpc_partition *part = (struct xpc_partition *) data;
-
+	struct xpc_partition *part = (struct xpc_partition *)data;
 
 	DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
 
-	(void) xpc_partition_disengaged(part);
+	(void)xpc_partition_disengaged(part);
 
 	DBUG_ON(part->disengage_request_timeout != 0);
 	DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
 }
 
-
 /*
  * Notify the heartbeat check thread that an IRQ has been received.
  */
@@ -219,7 +202,6 @@
 	return IRQ_HANDLED;
 }
 
-
 /*
  * Timer to produce the heartbeat.  The timer structures function is
  * already set when this is initially called.  A tunable is used to
@@ -230,15 +212,13 @@
 {
 	xpc_vars->heartbeat++;
 
-	if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
+	if (time_after_eq(jiffies, xpc_hb_check_timeout))
 		wake_up_interruptible(&xpc_act_IRQ_wq);
-	}
 
 	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
 	add_timer(&xpc_hb_timer);
 }
 
-
 /*
  * This thread is responsible for nearly all of the partition
  * activation/deactivation.
@@ -248,27 +228,23 @@
 {
 	int last_IRQ_count = 0;
 	int new_IRQ_count;
-	int force_IRQ=0;
-
+	int force_IRQ = 0;
 
 	/* this thread was marked active by xpc_hb_init() */
 
-	daemonize(XPC_HB_CHECK_THREAD_NAME);
-
 	set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
 
 	/* set our heartbeating to other partitions into motion */
 	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
 	xpc_hb_beater(0);
 
-	while (!(volatile int) xpc_exiting) {
+	while (!xpc_exiting) {
 
 		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
 			"been received\n",
-			(int) (xpc_hb_check_timeout - jiffies),
+			(int)(xpc_hb_check_timeout - jiffies),
 			atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
 
-
 		/* checking of remote heartbeats is skewed by IRQ handling */
 		if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
 			dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -282,7 +258,6 @@
 			force_IRQ = 1;
 		}
 
-
 		/* check for outstanding IRQs */
 		new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
 		if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -294,30 +269,30 @@
 			last_IRQ_count += xpc_identify_act_IRQ_sender();
 			if (last_IRQ_count < new_IRQ_count) {
 				/* retry once to help avoid missing AMO */
-				(void) xpc_identify_act_IRQ_sender();
+				(void)xpc_identify_act_IRQ_sender();
 			}
 			last_IRQ_count = new_IRQ_count;
 
 			xpc_hb_check_timeout = jiffies +
-					   (xpc_hb_check_interval * HZ);
+			    (xpc_hb_check_interval * HZ);
 		}
 
 		/* wait for IRQ or timeout */
-		(void) wait_event_interruptible(xpc_act_IRQ_wq,
-			    (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
-					time_after_eq(jiffies, xpc_hb_check_timeout) ||
-						(volatile int) xpc_exiting));
+		(void)wait_event_interruptible(xpc_act_IRQ_wq,
+					       (last_IRQ_count <
+						atomic_read(&xpc_act_IRQ_rcvd)
+						|| time_after_eq(jiffies,
+							xpc_hb_check_timeout) ||
+						xpc_exiting));
 	}
 
 	dev_dbg(xpc_part, "heartbeat checker is exiting\n");
 
-
 	/* mark this thread as having exited */
 	complete(&xpc_hb_checker_exited);
 	return 0;
 }
 
-
 /*
  * This thread will attempt to discover other partitions to activate
  * based on info provided by SAL. This new thread is short lived and
@@ -326,8 +301,6 @@
 static int
 xpc_initiate_discovery(void *ignore)
 {
-	daemonize(XPC_DISCOVERY_THREAD_NAME);
-
 	xpc_discovery();
 
 	dev_dbg(xpc_part, "discovery thread is exiting\n");
@@ -337,7 +310,6 @@
 	return 0;
 }
 
-
 /*
  * Establish first contact with the remote partititon. This involves pulling
  * the XPC per partition variables from the remote partition and waiting for
@@ -348,7 +320,6 @@
 {
 	enum xpc_retval ret;
 
-
 	while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
 		if (ret != xpcRetry) {
 			XPC_DEACTIVATE_PARTITION(part, ret);
@@ -359,17 +330,15 @@
 			"partition %d\n", XPC_PARTID(part));
 
 		/* wait a 1/4 of a second or so */
-		(void) msleep_interruptible(250);
+		(void)msleep_interruptible(250);
 
-		if (part->act_state == XPC_P_DEACTIVATING) {
+		if (part->act_state == XPC_P_DEACTIVATING)
 			return part->reason;
-		}
 	}
 
 	return xpc_mark_partition_active(part);
 }
 
-
 /*
  * The first kthread assigned to a newly activated partition is the one
  * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -386,12 +355,11 @@
 xpc_channel_mgr(struct xpc_partition *part)
 {
 	while (part->act_state != XPC_P_DEACTIVATING ||
-			atomic_read(&part->nchannels_active) > 0 ||
-					!xpc_partition_disengaged(part)) {
+	       atomic_read(&part->nchannels_active) > 0 ||
+	       !xpc_partition_disengaged(part)) {
 
 		xpc_process_channel_activity(part);
 
-
 		/*
 		 * Wait until we've been requested to activate kthreads or
 		 * all of the channel's message queues have been torn down or
@@ -406,21 +374,16 @@
 		 * wake him up.
 		 */
 		atomic_dec(&part->channel_mgr_requests);
-		(void) wait_event_interruptible(part->channel_mgr_wq,
+		(void)wait_event_interruptible(part->channel_mgr_wq,
 				(atomic_read(&part->channel_mgr_requests) > 0 ||
-				(volatile u64) part->local_IPI_amo != 0 ||
-				((volatile u8) part->act_state ==
-							XPC_P_DEACTIVATING &&
-				atomic_read(&part->nchannels_active) == 0 &&
-				xpc_partition_disengaged(part))));
+				 part->local_IPI_amo != 0 ||
+				 (part->act_state == XPC_P_DEACTIVATING &&
+				 atomic_read(&part->nchannels_active) == 0 &&
+				 xpc_partition_disengaged(part))));
 		atomic_set(&part->channel_mgr_requests, 1);
-
-		// >>> Does it need to wakeup periodically as well? In case we
-		// >>> miscalculated the #of kthreads to wakeup or create?
 	}
 }
 
-
 /*
  * When XPC HB determines that a partition has come up, it will create a new
  * kthread and that kthread will call this function to attempt to set up the
@@ -443,9 +406,8 @@
 
 	dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
 
-	if (xpc_setup_infrastructure(part) != xpcSuccess) {
+	if (xpc_setup_infrastructure(part) != xpcSuccess)
 		return;
-	}
 
 	/*
 	 * The kthread that XPC HB called us with will become the
@@ -454,27 +416,22 @@
 	 * has been dismantled.
 	 */
 
-	(void) xpc_part_ref(part);	/* this will always succeed */
+	(void)xpc_part_ref(part);	/* this will always succeed */
 
-	if (xpc_make_first_contact(part) == xpcSuccess) {
+	if (xpc_make_first_contact(part) == xpcSuccess)
 		xpc_channel_mgr(part);
-	}
 
 	xpc_part_deref(part);
 
 	xpc_teardown_infrastructure(part);
 }
 
-
 static int
 xpc_activating(void *__partid)
 {
-	partid_t partid = (u64) __partid;
+	partid_t partid = (u64)__partid;
 	struct xpc_partition *part = &xpc_partitions[partid];
 	unsigned long irq_flags;
-	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-	int ret;
-
 
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 
@@ -496,21 +453,6 @@
 
 	dev_dbg(xpc_part, "bringing partition %d up\n", partid);
 
-	daemonize("xpc%02d", partid);
-
-	/*
-	 * This thread needs to run at a realtime priority to prevent a
-	 * significant performance degradation.
-	 */
-	ret = sched_setscheduler(current, SCHED_FIFO, &param);
-	if (ret != 0) {
-		dev_warn(xpc_part, "unable to set pid %d to a realtime "
-			"priority, ret=%d\n", current->pid, ret);
-	}
-
-	/* allow this thread and its children to run on any CPU */
-	set_cpus_allowed(current, CPU_MASK_ALL);
-
 	/*
 	 * Register the remote partition's AMOs with SAL so it can handle
 	 * and cleanup errors within that address range should the remote
@@ -522,9 +464,9 @@
 	 * reloads and system reboots.
 	 */
 	if (sn_register_xp_addr_region(part->remote_amos_page_pa,
-							PAGE_SIZE, 1) < 0) {
+				       PAGE_SIZE, 1) < 0) {
 		dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
-			"xp_addr region\n", partid);
+			 "xp_addr region\n", partid);
 
 		spin_lock_irqsave(&part->act_lock, irq_flags);
 		part->act_state = XPC_P_INACTIVE;
@@ -537,12 +479,11 @@
 	xpc_allow_hb(partid, xpc_vars);
 	xpc_IPI_send_activated(part);
 
-
 	/*
 	 * xpc_partition_up() holds this thread and marks this partition as
 	 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
 	 */
-	(void) xpc_partition_up(part);
+	(void)xpc_partition_up(part);
 
 	xpc_disallow_hb(partid, xpc_vars);
 	xpc_mark_partition_inactive(part);
@@ -555,14 +496,12 @@
 	return 0;
 }
 
-
 void
 xpc_activate_partition(struct xpc_partition *part)
 {
 	partid_t partid = XPC_PARTID(part);
 	unsigned long irq_flags;
-	pid_t pid;
-
+	struct task_struct *kthread;
 
 	spin_lock_irqsave(&part->act_lock, irq_flags);
 
@@ -573,9 +512,9 @@
 
 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
 
-	pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
-
-	if (unlikely(pid <= 0)) {
+	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
+			      partid);
+	if (IS_ERR(kthread)) {
 		spin_lock_irqsave(&part->act_lock, irq_flags);
 		part->act_state = XPC_P_INACTIVE;
 		XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
@@ -583,12 +522,11 @@
 	}
 }
 
-
 /*
  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
  * than one partition, we use an AMO_t structure per partition to indicate
- * whether a partition has sent an IPI or not.  >>> If it has, then wake up the
+ * whether a partition has sent an IPI or not.  If it has, then wake up the
  * associated kthread to handle it.
  *
  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -603,10 +541,9 @@
 irqreturn_t
 xpc_notify_IRQ_handler(int irq, void *dev_id)
 {
-	partid_t partid = (partid_t) (u64) dev_id;
+	partid_t partid = (partid_t) (u64)dev_id;
 	struct xpc_partition *part = &xpc_partitions[partid];
 
-
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 
 	if (xpc_part_ref(part)) {
@@ -617,7 +554,6 @@
 	return IRQ_HANDLED;
 }
 
-
 /*
  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
  * because the write to their associated IPI amo completed after the IRQ/IPI
@@ -630,13 +566,12 @@
 		xpc_check_for_channel_activity(part);
 
 		part->dropped_IPI_timer.expires = jiffies +
-							XPC_P_DROPPED_IPI_WAIT;
+		    XPC_P_DROPPED_IPI_WAIT;
 		add_timer(&part->dropped_IPI_timer);
 		xpc_part_deref(part);
 	}
 }
 
-
 void
 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
 {
@@ -644,7 +579,6 @@
 	int assigned = atomic_read(&ch->kthreads_assigned);
 	int wakeup;
 
-
 	DBUG_ON(needed <= 0);
 
 	if (idle > 0) {
@@ -658,16 +592,13 @@
 		wake_up_nr(&ch->idle_wq, wakeup);
 	}
 
-	if (needed <= 0) {
+	if (needed <= 0)
 		return;
-	}
 
 	if (needed + assigned > ch->kthreads_assigned_limit) {
 		needed = ch->kthreads_assigned_limit - assigned;
-		// >>>should never be less than 0
-		if (needed <= 0) {
+		if (needed <= 0)
 			return;
-		}
 	}
 
 	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
@@ -676,7 +607,6 @@
 	xpc_create_kthreads(ch, needed, 0);
 }
 
-
 /*
  * This function is where XPC's kthreads wait for messages to deliver.
  */
@@ -686,15 +616,13 @@
 	do {
 		/* deliver messages to their intended recipients */
 
-		while ((volatile s64) ch->w_local_GP.get <
-				(volatile s64) ch->w_remote_GP.put &&
-					!((volatile u32) ch->flags &
-						XPC_C_DISCONNECTING)) {
+		while (ch->w_local_GP.get < ch->w_remote_GP.put &&
+		       !(ch->flags & XPC_C_DISCONNECTING)) {
 			xpc_deliver_msg(ch);
 		}
 
 		if (atomic_inc_return(&ch->kthreads_idle) >
-						ch->kthreads_idle_limit) {
+		    ch->kthreads_idle_limit) {
 			/* too many idle kthreads on this channel */
 			atomic_dec(&ch->kthreads_idle);
 			break;
@@ -703,20 +631,17 @@
 		dev_dbg(xpc_chan, "idle kthread calling "
 			"wait_event_interruptible_exclusive()\n");
 
-		(void) wait_event_interruptible_exclusive(ch->idle_wq,
-				((volatile s64) ch->w_local_GP.get <
-					(volatile s64) ch->w_remote_GP.put ||
-				((volatile u32) ch->flags &
-						XPC_C_DISCONNECTING)));
+		(void)wait_event_interruptible_exclusive(ch->idle_wq,
+				(ch->w_local_GP.get < ch->w_remote_GP.put ||
+				 (ch->flags & XPC_C_DISCONNECTING)));
 
 		atomic_dec(&ch->kthreads_idle);
 
-	} while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
+	} while (!(ch->flags & XPC_C_DISCONNECTING));
 }
 
-
 static int
-xpc_daemonize_kthread(void *args)
+xpc_kthread_start(void *args)
 {
 	partid_t partid = XPC_UNPACK_ARG1(args);
 	u16 ch_number = XPC_UNPACK_ARG2(args);
@@ -725,9 +650,6 @@
 	int n_needed;
 	unsigned long irq_flags;
 
-
-	daemonize("xpc%02dc%d", partid, ch_number);
-
 	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
 		partid, ch_number);
 
@@ -756,10 +678,9 @@
 			 * need one less than total #of messages to deliver.
 			 */
 			n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
-			if (n_needed > 0 &&
-					!(ch->flags & XPC_C_DISCONNECTING)) {
+			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
 				xpc_activate_kthreads(ch, n_needed);
-			}
+
 		} else {
 			spin_unlock_irqrestore(&ch->lock, irq_flags);
 		}
@@ -771,7 +692,7 @@
 
 	spin_lock_irqsave(&ch->lock, irq_flags);
 	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
-			!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
+	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
 		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
 		spin_unlock_irqrestore(&ch->lock, irq_flags);
 
@@ -798,7 +719,6 @@
 	return 0;
 }
 
-
 /*
  * For each partition that XPC has established communications with, there is
  * a minimum of one kernel thread assigned to perform any operation that
@@ -813,13 +733,12 @@
  */
 void
 xpc_create_kthreads(struct xpc_channel *ch, int needed,
-			int ignore_disconnecting)
+		    int ignore_disconnecting)
 {
 	unsigned long irq_flags;
-	pid_t pid;
 	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
 	struct xpc_partition *part = &xpc_partitions[ch->partid];
-
+	struct task_struct *kthread;
 
 	while (needed-- > 0) {
 
@@ -832,7 +751,7 @@
 			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
 				/* kthreads assigned had gone to zero */
 				BUG_ON(!(ch->flags &
-					XPC_C_DISCONNECTINGCALLOUT_MADE));
+					 XPC_C_DISCONNECTINGCALLOUT_MADE));
 				break;
 			}
 
@@ -843,11 +762,12 @@
 			if (atomic_inc_return(&part->nchannels_engaged) == 1)
 				xpc_mark_partition_engaged(part);
 		}
-		(void) xpc_part_ref(part);
+		(void)xpc_part_ref(part);
 		xpc_msgqueue_ref(ch);
 
-		pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
-		if (pid < 0) {
+		kthread = kthread_run(xpc_kthread_start, (void *)args,
+				      "xpc%02dc%d", ch->partid, ch->number);
+		if (IS_ERR(kthread)) {
 			/* the fork failed */
 
 			/*
@@ -857,7 +777,7 @@
 			 * to this channel are blocked in the channel's
 			 * registerer, because the only thing that will unblock
 			 * them is the xpcDisconnecting callout that this
-			 * failed kernel_thread would have made.
+			 * failed kthread_run() would have made.
 			 */
 
 			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -869,7 +789,7 @@
 			xpc_part_deref(part);
 
 			if (atomic_read(&ch->kthreads_assigned) <
-						ch->kthreads_idle_limit) {
+			    ch->kthreads_idle_limit) {
 				/*
 				 * Flag this as an error only if we have an
 				 * insufficient #of kthreads for the channel
@@ -877,17 +797,14 @@
 				 */
 				spin_lock_irqsave(&ch->lock, irq_flags);
 				XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
-								&irq_flags);
+						       &irq_flags);
 				spin_unlock_irqrestore(&ch->lock, irq_flags);
 			}
 			break;
 		}
-
-		ch->kthreads_created++;	// >>> temporary debug only!!!
 	}
 }
 
-
 void
 xpc_disconnect_wait(int ch_number)
 {
@@ -897,14 +814,12 @@
 	struct xpc_channel *ch;
 	int wakeup_channel_mgr;
 
-
 	/* now wait for all callouts to the caller's function to cease */
 	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 		part = &xpc_partitions[partid];
 
-		if (!xpc_part_ref(part)) {
+		if (!xpc_part_ref(part))
 			continue;
-		}
 
 		ch = &part->channels[ch_number];
 
@@ -923,7 +838,8 @@
 			if (part->act_state != XPC_P_DEACTIVATING) {
 				spin_lock(&part->IPI_lock);
 				XPC_SET_IPI_FLAGS(part->local_IPI_amo,
-					ch->number, ch->delayed_IPI_flags);
+						  ch->number,
+						  ch->delayed_IPI_flags);
 				spin_unlock(&part->IPI_lock);
 				wakeup_channel_mgr = 1;
 			}
@@ -933,15 +849,13 @@
 		ch->flags &= ~XPC_C_WDISCONNECT;
 		spin_unlock_irqrestore(&ch->lock, irq_flags);
 
-		if (wakeup_channel_mgr) {
+		if (wakeup_channel_mgr)
 			xpc_wakeup_channel_mgr(part);
-		}
 
 		xpc_part_deref(part);
 	}
 }
 
-
 static void
 xpc_do_exit(enum xpc_retval reason)
 {
@@ -950,7 +864,6 @@
 	struct xpc_partition *part;
 	unsigned long printmsg_time, disengage_request_timeout = 0;
 
-
 	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
 	DBUG_ON(xpc_exiting == 1);
 
@@ -971,10 +884,8 @@
 	/* wait for the heartbeat checker thread to exit */
 	wait_for_completion(&xpc_hb_checker_exited);
 
-
 	/* sleep for a 1/3 of a second or so */
-	(void) msleep_interruptible(300);
-
+	(void)msleep_interruptible(300);
 
 	/* wait for all partitions to become inactive */
 
@@ -988,7 +899,7 @@
 			part = &xpc_partitions[partid];
 
 			if (xpc_partition_disengaged(part) &&
-					part->act_state == XPC_P_INACTIVE) {
+			    part->act_state == XPC_P_INACTIVE) {
 				continue;
 			}
 
@@ -997,47 +908,46 @@
 			XPC_DEACTIVATE_PARTITION(part, reason);
 
 			if (part->disengage_request_timeout >
-						disengage_request_timeout) {
+			    disengage_request_timeout) {
 				disengage_request_timeout =
-						part->disengage_request_timeout;
+				    part->disengage_request_timeout;
 			}
 		}
 
 		if (xpc_partition_engaged(-1UL)) {
 			if (time_after(jiffies, printmsg_time)) {
 				dev_info(xpc_part, "waiting for remote "
-					"partitions to disengage, timeout in "
-					"%ld seconds\n",
-					(disengage_request_timeout - jiffies)
-									/ HZ);
+					 "partitions to disengage, timeout in "
+					 "%ld seconds\n",
+					 (disengage_request_timeout - jiffies)
+					 / HZ);
 				printmsg_time = jiffies +
-					(XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
+				    (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
 				printed_waiting_msg = 1;
 			}
 
 		} else if (active_part_count > 0) {
 			if (printed_waiting_msg) {
 				dev_info(xpc_part, "waiting for local partition"
-					" to disengage\n");
+					 " to disengage\n");
 				printed_waiting_msg = 0;
 			}
 
 		} else {
 			if (!xpc_disengage_request_timedout) {
 				dev_info(xpc_part, "all partitions have "
-					"disengaged\n");
+					 "disengaged\n");
 			}
 			break;
 		}
 
 		/* sleep for a 1/3 of a second or so */
-		(void) msleep_interruptible(300);
+		(void)msleep_interruptible(300);
 
 	} while (1);
 
 	DBUG_ON(xpc_partition_engaged(-1UL));
 
-
 	/* indicate to others that our reserved page is uninitialized */
 	xpc_rsvd_page->vars_pa = 0;
 
@@ -1047,27 +957,24 @@
 
 	if (reason == xpcUnloading) {
 		/* take ourselves off of the reboot_notifier_list */
-		(void) unregister_reboot_notifier(&xpc_reboot_notifier);
+		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
 
 		/* take ourselves off of the die_notifier list */
-		(void) unregister_die_notifier(&xpc_die_notifier);
+		(void)unregister_die_notifier(&xpc_die_notifier);
 	}
 
 	/* close down protections for IPI operations */
 	xpc_restrict_IPI_ops();
 
-
 	/* clear the interface to XPC's functions */
 	xpc_clear_interface();
 
-	if (xpc_sysctl) {
+	if (xpc_sysctl)
 		unregister_sysctl_table(xpc_sysctl);
-	}
 
 	kfree(xpc_remote_copy_buffer_base);
 }
 
-
 /*
  * This function is called when the system is being rebooted.
  */
@@ -1076,7 +983,6 @@
 {
 	enum xpc_retval reason;
 
-
 	switch (event) {
 	case SYS_RESTART:
 		reason = xpcSystemReboot;
@@ -1095,7 +1001,6 @@
 	return NOTIFY_DONE;
 }
 
-
 /*
  * Notify other partitions to disengage from all references to our memory.
  */
@@ -1107,17 +1012,16 @@
 	unsigned long engaged;
 	long time, printmsg_time, disengage_request_timeout;
 
-
 	/* keep xpc_hb_checker thread from doing anything (just in case) */
 	xpc_exiting = 1;
 
-	xpc_vars->heartbeating_to_mask = 0;  /* indicate we're deactivated */
+	xpc_vars->heartbeating_to_mask = 0;	/* indicate we're deactivated */
 
 	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 		part = &xpc_partitions[partid];
 
 		if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
-							remote_vars_version)) {
+		    remote_vars_version)) {
 
 			/* just in case it was left set by an earlier XPC */
 			xpc_clear_partition_engaged(1UL << partid);
@@ -1125,7 +1029,7 @@
 		}
 
 		if (xpc_partition_engaged(1UL << partid) ||
-					part->act_state != XPC_P_INACTIVE) {
+		    part->act_state != XPC_P_INACTIVE) {
 			xpc_request_partition_disengage(part);
 			xpc_mark_partition_disengaged(part);
 			xpc_IPI_send_disengage(part);
@@ -1134,9 +1038,9 @@
 
 	time = rtc_time();
 	printmsg_time = time +
-		(XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
+	    (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
 	disengage_request_timeout = time +
-		(xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
+	    (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
 
 	/* wait for all other partitions to disengage from us */
 
@@ -1152,8 +1056,8 @@
 			for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 				if (engaged & (1UL << partid)) {
 					dev_info(xpc_part, "disengage from "
-						"remote partition %d timed "
-						"out\n", partid);
+						 "remote partition %d timed "
+						 "out\n", partid);
 				}
 			}
 			break;
@@ -1161,17 +1065,16 @@
 
 		if (time >= printmsg_time) {
 			dev_info(xpc_part, "waiting for remote partitions to "
-				"disengage, timeout in %ld seconds\n",
-				(disengage_request_timeout - time) /
-						sn_rtc_cycles_per_second);
+				 "disengage, timeout in %ld seconds\n",
+				 (disengage_request_timeout - time) /
+				 sn_rtc_cycles_per_second);
 			printmsg_time = time +
-					(XPC_DISENGAGE_PRINTMSG_INTERVAL *
-						sn_rtc_cycles_per_second);
+			    (XPC_DISENGAGE_PRINTMSG_INTERVAL *
+			     sn_rtc_cycles_per_second);
 		}
 	}
 }
 
-
 /*
  * This function is called when the system is being restarted or halted due
  * to some sort of system failure. If this is the case we need to notify the
@@ -1191,9 +1094,9 @@
 
 	case DIE_KDEBUG_ENTER:
 		/* Should lack of heartbeat be ignored by other partitions? */
-		if (!xpc_kdebug_ignore) {
+		if (!xpc_kdebug_ignore)
 			break;
-		}
+
 		/* fall through */
 	case DIE_MCA_MONARCH_ENTER:
 	case DIE_INIT_MONARCH_ENTER:
@@ -1203,9 +1106,9 @@
 
 	case DIE_KDEBUG_LEAVE:
 		/* Is lack of heartbeat being ignored by other partitions? */
-		if (!xpc_kdebug_ignore) {
+		if (!xpc_kdebug_ignore)
 			break;
-		}
+
 		/* fall through */
 	case DIE_MCA_MONARCH_LEAVE:
 	case DIE_INIT_MONARCH_LEAVE:
@@ -1217,26 +1120,23 @@
 	return NOTIFY_DONE;
 }
 
-
 int __init
 xpc_init(void)
 {
 	int ret;
 	partid_t partid;
 	struct xpc_partition *part;
-	pid_t pid;
+	struct task_struct *kthread;
 	size_t buf_size;
 
-
-	if (!ia64_platform_is("sn2")) {
+	if (!ia64_platform_is("sn2"))
 		return -ENODEV;
-	}
-
 
 	buf_size = max(XPC_RP_VARS_SIZE,
-				XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
+		       XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
 	xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
-				     GFP_KERNEL, &xpc_remote_copy_buffer_base);
+							       GFP_KERNEL,
+						  &xpc_remote_copy_buffer_base);
 	if (xpc_remote_copy_buffer == NULL)
 		return -ENOMEM;
 
@@ -1256,7 +1156,7 @@
 	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 		part = &xpc_partitions[partid];
 
-		DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
+		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
 
 		part->act_IRQ_rcvd = 0;
 		spin_lock_init(&part->act_lock);
@@ -1265,8 +1165,8 @@
 
 		init_timer(&part->disengage_request_timer);
 		part->disengage_request_timer.function =
-				xpc_timeout_partition_disengage_request;
-		part->disengage_request_timer.data = (unsigned long) part;
+		    xpc_timeout_partition_disengage_request;
+		part->disengage_request_timer.data = (unsigned long)part;
 
 		part->setup_state = XPC_P_UNSET;
 		init_waitqueue_head(&part->teardown_wq);
@@ -1292,16 +1192,15 @@
 	 * but rather immediately process the interrupt.
 	 */
 	ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
-							"xpc hb", NULL);
+			  "xpc hb", NULL);
 	if (ret != 0) {
 		dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
 			"errno=%d\n", -ret);
 
 		xpc_restrict_IPI_ops();
 
-		if (xpc_sysctl) {
+		if (xpc_sysctl)
 			unregister_sysctl_table(xpc_sysctl);
-		}
 
 		kfree(xpc_remote_copy_buffer_base);
 		return -EBUSY;
@@ -1319,26 +1218,22 @@
 		free_irq(SGI_XPC_ACTIVATE, NULL);
 		xpc_restrict_IPI_ops();
 
-		if (xpc_sysctl) {
+		if (xpc_sysctl)
 			unregister_sysctl_table(xpc_sysctl);
-		}
 
 		kfree(xpc_remote_copy_buffer_base);
 		return -EBUSY;
 	}
 
-
 	/* add ourselves to the reboot_notifier_list */
 	ret = register_reboot_notifier(&xpc_reboot_notifier);
-	if (ret != 0) {
+	if (ret != 0)
 		dev_warn(xpc_part, "can't register reboot notifier\n");
-	}
 
 	/* add ourselves to the die_notifier list */
 	ret = register_die_notifier(&xpc_die_notifier);
-	if (ret != 0) {
+	if (ret != 0)
 		dev_warn(xpc_part, "can't register die notifier\n");
-	}
 
 	init_timer(&xpc_hb_timer);
 	xpc_hb_timer.function = xpc_hb_beater;
@@ -1347,39 +1242,38 @@
 	 * The real work-horse behind xpc.  This processes incoming
 	 * interrupts and monitors remote heartbeats.
 	 */
-	pid = kernel_thread(xpc_hb_checker, NULL, 0);
-	if (pid < 0) {
+	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
+	if (IS_ERR(kthread)) {
 		dev_err(xpc_part, "failed while forking hb check thread\n");
 
 		/* indicate to others that our reserved page is uninitialized */
 		xpc_rsvd_page->vars_pa = 0;
 
 		/* take ourselves off of the reboot_notifier_list */
-		(void) unregister_reboot_notifier(&xpc_reboot_notifier);
+		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
 
 		/* take ourselves off of the die_notifier list */
-		(void) unregister_die_notifier(&xpc_die_notifier);
+		(void)unregister_die_notifier(&xpc_die_notifier);
 
 		del_timer_sync(&xpc_hb_timer);
 		free_irq(SGI_XPC_ACTIVATE, NULL);
 		xpc_restrict_IPI_ops();
 
-		if (xpc_sysctl) {
+		if (xpc_sysctl)
 			unregister_sysctl_table(xpc_sysctl);
-		}
 
 		kfree(xpc_remote_copy_buffer_base);
 		return -EBUSY;
 	}
 
-
 	/*
 	 * Startup a thread that will attempt to discover other partitions to
 	 * activate based on info provided by SAL. This new thread is short
 	 * lived and will exit once discovery is complete.
 	 */
-	pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
-	if (pid < 0) {
+	kthread = kthread_run(xpc_initiate_discovery, NULL,
+			      XPC_DISCOVERY_THREAD_NAME);
+	if (IS_ERR(kthread)) {
 		dev_err(xpc_part, "failed while forking discovery thread\n");
 
 		/* mark this new thread as a non-starter */
@@ -1389,7 +1283,6 @@
 		return -EBUSY;
 	}
 
-
 	/* set the interface to point at XPC's functions */
 	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
 			  xpc_initiate_allocate, xpc_initiate_send,
@@ -1398,16 +1291,16 @@
 
 	return 0;
 }
-module_init(xpc_init);
 
+module_init(xpc_init);
 
 void __exit
 xpc_exit(void)
 {
 	xpc_do_exit(xpcUnloading);
 }
-module_exit(xpc_exit);
 
+module_exit(xpc_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1415,17 +1308,16 @@
 
 module_param(xpc_hb_interval, int, 0);
 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
-		"heartbeat increments.");
+		 "heartbeat increments.");
 
 module_param(xpc_hb_check_interval, int, 0);
 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
-		"heartbeat checks.");
+		 "heartbeat checks.");
 
 module_param(xpc_disengage_request_timelimit, int, 0);
 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
-		"for disengage request to complete.");
+		 "for disengage request to complete.");
 
 module_param(xpc_kdebug_ignore, int, 0);
 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
-		"other partitions when dropping into kdebug.");
-
+		 "other partitions when dropping into kdebug.");
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
similarity index 79%
rename from arch/ia64/sn/kernel/xpc_partition.c
rename to drivers/misc/sgi-xp/xpc_partition.c
index 9e97c26..27e200e 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
-
 /*
  * Cross Partition Communication (XPC) partition support.
  *
@@ -16,7 +15,6 @@
  *
  */
 
-
 #include <linux/kernel.h>
 #include <linux/sysctl.h>
 #include <linux/cache.h>
@@ -28,13 +26,11 @@
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/nodepda.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/xpc.h>
-
+#include "xpc.h"
 
 /* XPC is exiting flag */
 int xpc_exiting;
 
-
 /* SH_IPI_ACCESS shub register value on startup */
 static u64 xpc_sh1_IPI_access;
 static u64 xpc_sh2_IPI_access0;
@@ -42,11 +38,9 @@
 static u64 xpc_sh2_IPI_access2;
 static u64 xpc_sh2_IPI_access3;
 
-
 /* original protection values for each node */
 u64 xpc_prot_vec[MAX_NUMNODES];
 
-
 /* this partition's reserved page pointers */
 struct xpc_rsvd_page *xpc_rsvd_page;
 static u64 *xpc_part_nasids;
@@ -57,7 +51,6 @@
 static int xp_nasid_mask_bytes;	/* actual size in bytes of nasid mask */
 static int xp_nasid_mask_words;	/* actual size in words of nasid mask */
 
-
 /*
  * For performance reasons, each entry of xpc_partitions[] is cacheline
  * aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -66,7 +59,6 @@
  */
 struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
 
-
 /*
  * Generic buffer used to store a local copy of portions of a remote
  * partition's reserved page (either its header and part_nasids mask,
@@ -75,7 +67,6 @@
 char *xpc_remote_copy_buffer;
 void *xpc_remote_copy_buffer_base;
 
-
 /*
  * Guarantee that the kmalloc'd memory is cacheline aligned.
  */
@@ -84,22 +75,21 @@
 {
 	/* see if kmalloc will give us cachline aligned memory by default */
 	*base = kmalloc(size, flags);
-	if (*base == NULL) {
+	if (*base == NULL)
 		return NULL;
-	}
-	if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+
+	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
 		return *base;
-	}
+
 	kfree(*base);
 
 	/* nope, we'll have to do it ourselves */
 	*base = kmalloc(size + L1_CACHE_BYTES, flags);
-	if (*base == NULL) {
+	if (*base == NULL)
 		return NULL;
-	}
-	return (void *) L1_CACHE_ALIGN((u64) *base);
-}
 
+	return (void *)L1_CACHE_ALIGN((u64)*base);
+}
 
 /*
  * Given a nasid, get the physical address of the  partition's reserved page
@@ -117,25 +107,24 @@
 	u64 buf_len = 0;
 	void *buf_base = NULL;
 
-
 	while (1) {
 
 		status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
-								&len);
+						       &len);
 
 		dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
 			"0x%016lx, address=0x%016lx, len=0x%016lx\n",
 			status, cookie, rp_pa, len);
 
-		if (status != SALRET_MORE_PASSES) {
+		if (status != SALRET_MORE_PASSES)
 			break;
-		}
 
 		if (L1_CACHE_ALIGN(len) > buf_len) {
 			kfree(buf_base);
 			buf_len = L1_CACHE_ALIGN(len);
-			buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
-							GFP_KERNEL, &buf_base);
+			buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
+								 GFP_KERNEL,
+								 &buf_base);
 			if (buf_base == NULL) {
 				dev_err(xpc_part, "unable to kmalloc "
 					"len=0x%016lx\n", buf_len);
@@ -145,7 +134,7 @@
 		}
 
 		bte_res = xp_bte_copy(rp_pa, buf, buf_len,
-					(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+				      (BTE_NOTIFY | BTE_WACQUIRE), NULL);
 		if (bte_res != BTE_SUCCESS) {
 			dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
 			status = SALRET_ERROR;
@@ -155,14 +144,13 @@
 
 	kfree(buf_base);
 
-	if (status != SALRET_OK) {
+	if (status != SALRET_OK)
 		rp_pa = 0;
-	}
+
 	dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
 	return rp_pa;
 }
 
-
 /*
  * Fill the partition reserved page with the information needed by
  * other partitions to discover we are alive and establish initial
@@ -176,7 +164,6 @@
 	u64 rp_pa, nasid_array = 0;
 	int i, ret;
 
-
 	/* get the local reserved page's address */
 
 	preempt_disable();
@@ -186,7 +173,7 @@
 		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
 		return NULL;
 	}
-	rp = (struct xpc_rsvd_page *) __va(rp_pa);
+	rp = (struct xpc_rsvd_page *)__va(rp_pa);
 
 	if (rp->partid != sn_partition_id) {
 		dev_err(xpc_part, "the reserved page's partid of %d should be "
@@ -222,8 +209,9 @@
 	 * on subsequent loads of XPC. This AMO page is never freed, and its
 	 * memory protections are never restricted.
 	 */
-	if ((amos_page = xpc_vars->amos_page) == NULL) {
-		amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
+	amos_page = xpc_vars->amos_page;
+	if (amos_page == NULL) {
+		amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
 		if (amos_page == NULL) {
 			dev_err(xpc_part, "can't allocate page of AMOs\n");
 			return NULL;
@@ -234,30 +222,31 @@
 		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
 		 */
 		if (!enable_shub_wars_1_1()) {
-			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
-					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
-					&nasid_array);
+			ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
+						   PAGE_SIZE,
+						   SN_MEMPROT_ACCESS_CLASS_1,
+						   &nasid_array);
 			if (ret != 0) {
 				dev_err(xpc_part, "can't change memory "
 					"protections\n");
 				uncached_free_page(__IA64_UNCACHED_OFFSET |
-						   TO_PHYS((u64) amos_page));
+						   TO_PHYS((u64)amos_page));
 				return NULL;
 			}
 		}
-	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
+	} else if (!IS_AMO_ADDRESS((u64)amos_page)) {
 		/*
 		 * EFI's XPBOOT can also set amos_page in the reserved page,
 		 * but it happens to leave it as an uncached physical address
 		 * and we need it to be an uncached virtual, so we'll have to
 		 * convert it.
 		 */
-		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
+		if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
 			dev_err(xpc_part, "previously used amos_page address "
-				"is bad = 0x%p\n", (void *) amos_page);
+				"is bad = 0x%p\n", (void *)amos_page);
 			return NULL;
 		}
-		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
+		amos_page = (AMO_t *)TO_AMO((u64)amos_page);
 	}
 
 	/* clear xpc_vars */
@@ -267,22 +256,20 @@
 	xpc_vars->act_nasid = cpuid_to_nasid(0);
 	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
 	xpc_vars->vars_part_pa = __pa(xpc_vars_part);
-	xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
-	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */
-
+	xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
+	xpc_vars->amos_page = amos_page;	/* save for next load of XPC */
 
 	/* clear xpc_vars_part */
-	memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
-							XP_MAX_PARTITIONS);
+	memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
+	       XP_MAX_PARTITIONS);
 
 	/* initialize the activate IRQ related AMO variables */
-	for (i = 0; i < xp_nasid_mask_words; i++) {
-		(void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
-	}
+	for (i = 0; i < xp_nasid_mask_words; i++)
+		(void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
 
 	/* initialize the engaged remote partitions related AMO variables */
-	(void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
-	(void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
+	(void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
+	(void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
 
 	/* timestamp of when reserved page was setup by XPC */
 	rp->stamp = CURRENT_TIME;
@@ -296,7 +283,6 @@
 	return rp;
 }
 
-
 /*
  * Change protections to allow IPI operations (and AMO operations on
  * Shub 1.1 systems).
@@ -307,39 +293,38 @@
 	int node;
 	int nasid;
 
-
-	// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+	/* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
 
 	if (is_shub2()) {
 		xpc_sh2_IPI_access0 =
-			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
+		    (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
 		xpc_sh2_IPI_access1 =
-			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
+		    (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
 		xpc_sh2_IPI_access2 =
-			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
+		    (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
 		xpc_sh2_IPI_access3 =
-			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
+		    (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
 
 		for_each_online_node(node) {
 			nasid = cnodeid_to_nasid(node);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-								-1UL);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-								-1UL);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-								-1UL);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-								-1UL);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+			      -1UL);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+			      -1UL);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+			      -1UL);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+			      -1UL);
 		}
 
 	} else {
 		xpc_sh1_IPI_access =
-			(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
+		    (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
 
 		for_each_online_node(node) {
 			nasid = cnodeid_to_nasid(node);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-								-1UL);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+			      -1UL);
 
 			/*
 			 * Since the BIST collides with memory operations on
@@ -347,21 +332,23 @@
 			 */
 			if (enable_shub_wars_1_1()) {
 				/* open up everything */
-				xpc_prot_vec[node] = (u64) HUB_L((u64 *)
-						GLOBAL_MMR_ADDR(nasid,
-						SH1_MD_DQLP_MMR_DIR_PRIVEC0));
-				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-						SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-								-1UL);
-				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-						SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-								-1UL);
+				xpc_prot_vec[node] = (u64)HUB_L((u64 *)
+								GLOBAL_MMR_ADDR
+								(nasid,
+						  SH1_MD_DQLP_MMR_DIR_PRIVEC0));
+				HUB_S((u64 *)
+				      GLOBAL_MMR_ADDR(nasid,
+						   SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+				      -1UL);
+				HUB_S((u64 *)
+				      GLOBAL_MMR_ADDR(nasid,
+						   SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+				      -1UL);
 			}
 		}
 	}
 }
 
-
 /*
  * Restrict protections to disallow IPI operations (and AMO operations on
  * Shub 1.1 systems).
@@ -372,43 +359,41 @@
 	int node;
 	int nasid;
 
-
-	// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
+	/* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
 
 	if (is_shub2()) {
 
 		for_each_online_node(node) {
 			nasid = cnodeid_to_nasid(node);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-							xpc_sh2_IPI_access0);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-							xpc_sh2_IPI_access1);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-							xpc_sh2_IPI_access2);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-							xpc_sh2_IPI_access3);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
+			      xpc_sh2_IPI_access0);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
+			      xpc_sh2_IPI_access1);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
+			      xpc_sh2_IPI_access2);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
+			      xpc_sh2_IPI_access3);
 		}
 
 	} else {
 
 		for_each_online_node(node) {
 			nasid = cnodeid_to_nasid(node);
-			HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-							xpc_sh1_IPI_access);
+			HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
+			      xpc_sh1_IPI_access);
 
 			if (enable_shub_wars_1_1()) {
-				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-						SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-							xpc_prot_vec[node]);
-				HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
-						SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-							xpc_prot_vec[node]);
+				HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+						   SH1_MD_DQLP_MMR_DIR_PRIVEC0),
+				      xpc_prot_vec[node]);
+				HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
+						   SH1_MD_DQRP_MMR_DIR_PRIVEC0),
+				      xpc_prot_vec[node]);
 			}
 		}
 	}
 }
 
-
 /*
  * At periodic intervals, scan through all active partitions and ensure
  * their heartbeat is still active.  If not, the partition is deactivated.
@@ -421,34 +406,31 @@
 	partid_t partid;
 	bte_result_t bres;
 
-
-	remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+	remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
 
 	for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
 
-		if (xpc_exiting) {
+		if (xpc_exiting)
 			break;
-		}
 
-		if (partid == sn_partition_id) {
+		if (partid == sn_partition_id)
 			continue;
-		}
 
 		part = &xpc_partitions[partid];
 
 		if (part->act_state == XPC_P_INACTIVE ||
-				part->act_state == XPC_P_DEACTIVATING) {
+		    part->act_state == XPC_P_DEACTIVATING) {
 			continue;
 		}
 
 		/* pull the remote_hb cache line */
 		bres = xp_bte_copy(part->remote_vars_pa,
-					(u64) remote_vars,
-					XPC_RP_VARS_SIZE,
-					(BTE_NOTIFY | BTE_WACQUIRE), NULL);
+				   (u64)remote_vars,
+				   XPC_RP_VARS_SIZE,
+				   (BTE_NOTIFY | BTE_WACQUIRE), NULL);
 		if (bres != BTE_SUCCESS) {
 			XPC_DEACTIVATE_PARTITION(part,
-						xpc_map_bte_errors(bres));
+						 xpc_map_bte_errors(bres));
 			continue;
 		}
 
@@ -459,8 +441,8 @@
 			remote_vars->heartbeating_to_mask);
 
 		if (((remote_vars->heartbeat == part->last_heartbeat) &&
-			(remote_vars->heartbeat_offline == 0)) ||
-			     !xpc_hb_allowed(sn_partition_id, remote_vars)) {
+		     (remote_vars->heartbeat_offline == 0)) ||
+		    !xpc_hb_allowed(sn_partition_id, remote_vars)) {
 
 			XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
 			continue;
@@ -470,7 +452,6 @@
 	}
 }
 
-
 /*
  * Get a copy of a portion of the remote partition's rsvd page.
  *
@@ -480,59 +461,48 @@
  */
 static enum xpc_retval
 xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
-		struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
+		  struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
 {
 	int bres, i;
 
-
 	/* get the reserved page's physical address */
 
 	*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
-	if (*remote_rp_pa == 0) {
+	if (*remote_rp_pa == 0)
 		return xpcNoRsvdPageAddr;
-	}
-
 
 	/* pull over the reserved page header and part_nasids mask */
-	bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp,
-				XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
-				(BTE_NOTIFY | BTE_WACQUIRE), NULL);
-	if (bres != BTE_SUCCESS) {
+	bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
+			   XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
+			   (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+	if (bres != BTE_SUCCESS)
 		return xpc_map_bte_errors(bres);
-	}
-
 
 	if (discovered_nasids != NULL) {
 		u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
 
-
-		for (i = 0; i < xp_nasid_mask_words; i++) {
+		for (i = 0; i < xp_nasid_mask_words; i++)
 			discovered_nasids[i] |= remote_part_nasids[i];
-		}
 	}
 
-
 	/* check that the partid is for another partition */
 
 	if (remote_rp->partid < 1 ||
-				remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
+	    remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
 		return xpcInvalidPartid;
 	}
 
-	if (remote_rp->partid == sn_partition_id) {
+	if (remote_rp->partid == sn_partition_id)
 		return xpcLocalPartid;
-	}
-
 
 	if (XPC_VERSION_MAJOR(remote_rp->version) !=
-					XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
+	    XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
 		return xpcBadVersion;
 	}
 
 	return xpcSuccess;
 }
 
-
 /*
  * Get a copy of the remote partition's XPC variables from the reserved page.
  *
@@ -544,34 +514,30 @@
 {
 	int bres;
 
-
-	if (remote_vars_pa == 0) {
+	if (remote_vars_pa == 0)
 		return xpcVarsNotSet;
-	}
 
 	/* pull over the cross partition variables */
-	bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE,
-				(BTE_NOTIFY | BTE_WACQUIRE), NULL);
-	if (bres != BTE_SUCCESS) {
+	bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
+			   (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+	if (bres != BTE_SUCCESS)
 		return xpc_map_bte_errors(bres);
-	}
 
 	if (XPC_VERSION_MAJOR(remote_vars->version) !=
-					XPC_VERSION_MAJOR(XPC_V_VERSION)) {
+	    XPC_VERSION_MAJOR(XPC_V_VERSION)) {
 		return xpcBadVersion;
 	}
 
 	return xpcSuccess;
 }
 
-
 /*
  * Update the remote partition's info.
  */
 static void
 xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
-		struct timespec *remote_rp_stamp, u64 remote_rp_pa,
-		u64 remote_vars_pa, struct xpc_vars *remote_vars)
+			  struct timespec *remote_rp_stamp, u64 remote_rp_pa,
+			  u64 remote_vars_pa, struct xpc_vars *remote_vars)
 {
 	part->remote_rp_version = remote_rp_version;
 	dev_dbg(xpc_part, "  remote_rp_version = 0x%016x\n",
@@ -613,7 +579,6 @@
 		part->remote_vars_version);
 }
 
-
 /*
  * Prior code has determined the nasid which generated an IPI.  Inspect
  * that nasid to determine if its partition needs to be activated or
@@ -643,54 +608,51 @@
 	struct xpc_partition *part;
 	enum xpc_retval ret;
 
-
 	/* pull over the reserved page structure */
 
-	remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
+	remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
 
 	ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
 	if (ret != xpcSuccess) {
 		dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
-			"which sent interrupt, reason=%d\n", nasid, ret);
+			 "which sent interrupt, reason=%d\n", nasid, ret);
 		return;
 	}
 
 	remote_vars_pa = remote_rp->vars_pa;
 	remote_rp_version = remote_rp->version;
-	if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
+	if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
 		remote_rp_stamp = remote_rp->stamp;
-	}
+
 	partid = remote_rp->partid;
 	part = &xpc_partitions[partid];
 
-
 	/* pull over the cross partition variables */
 
-	remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
+	remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
 
 	ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
 	if (ret != xpcSuccess) {
 
 		dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
-			"which sent interrupt, reason=%d\n", nasid, ret);
+			 "which sent interrupt, reason=%d\n", nasid, ret);
 
 		XPC_DEACTIVATE_PARTITION(part, ret);
 		return;
 	}
 
-
 	part->act_IRQ_rcvd++;
 
 	dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
-		"%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
+		"%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
 		remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
 
 	if (xpc_partition_disengaged(part) &&
-					part->act_state == XPC_P_INACTIVE) {
+	    part->act_state == XPC_P_INACTIVE) {
 
 		xpc_update_partition_info(part, remote_rp_version,
-					&remote_rp_stamp, remote_rp_pa,
-					remote_vars_pa, remote_vars);
+					  &remote_rp_stamp, remote_rp_pa,
+					  remote_vars_pa, remote_vars);
 
 		if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
 			if (xpc_partition_disengage_requested(1UL << partid)) {
@@ -714,16 +676,15 @@
 
 	if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
 		DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
-							remote_vars_version));
+						       remote_vars_version));
 
 		if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
 			DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
-								version));
+							       version));
 			/* see if the other side rebooted */
 			if (part->remote_amos_page_pa ==
-				remote_vars->amos_page_pa &&
-					xpc_hb_allowed(sn_partition_id,
-								remote_vars)) {
+			    remote_vars->amos_page_pa &&
+			    xpc_hb_allowed(sn_partition_id, remote_vars)) {
 				/* doesn't look that way, so ignore the IPI */
 				return;
 			}
@@ -735,8 +696,8 @@
 		 */
 
 		xpc_update_partition_info(part, remote_rp_version,
-						&remote_rp_stamp, remote_rp_pa,
-						remote_vars_pa, remote_vars);
+					  &remote_rp_stamp, remote_rp_pa,
+					  remote_vars_pa, remote_vars);
 		part->reactivate_nasid = nasid;
 		XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
 		return;
@@ -756,15 +717,15 @@
 		xpc_clear_partition_disengage_request(1UL << partid);
 
 		xpc_update_partition_info(part, remote_rp_version,
-						&remote_rp_stamp, remote_rp_pa,
-						remote_vars_pa, remote_vars);
+					  &remote_rp_stamp, remote_rp_pa,
+					  remote_vars_pa, remote_vars);
 		reactivate = 1;
 
 	} else {
 		DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
 
 		stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
-							&remote_rp_stamp);
+						&remote_rp_stamp);
 		if (stamp_diff != 0) {
 			DBUG_ON(stamp_diff >= 0);
 
@@ -775,17 +736,18 @@
 
 			DBUG_ON(xpc_partition_engaged(1UL << partid));
 			DBUG_ON(xpc_partition_disengage_requested(1UL <<
-								partid));
+								  partid));
 
 			xpc_update_partition_info(part, remote_rp_version,
-						&remote_rp_stamp, remote_rp_pa,
-						remote_vars_pa, remote_vars);
+						  &remote_rp_stamp,
+						  remote_rp_pa, remote_vars_pa,
+						  remote_vars);
 			reactivate = 1;
 		}
 	}
 
 	if (part->disengage_request_timeout > 0 &&
-					!xpc_partition_disengaged(part)) {
+	    !xpc_partition_disengaged(part)) {
 		/* still waiting on other side to disengage from us */
 		return;
 	}
@@ -795,12 +757,11 @@
 		XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
 
 	} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
-			xpc_partition_disengage_requested(1UL << partid)) {
+		   xpc_partition_disengage_requested(1UL << partid)) {
 		XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
 	}
 }
 
-
 /*
  * Loop through the activation AMO variables and process any bits
  * which are set.  Each bit indicates a nasid sending a partition
@@ -813,20 +774,17 @@
 {
 	int word, bit;
 	u64 nasid_mask;
-	u64 nasid;			/* remote nasid */
+	u64 nasid;		/* remote nasid */
 	int n_IRQs_detected = 0;
 	AMO_t *act_amos;
 
-
 	act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
 
-
 	/* scan through act AMO variable looking for non-zero entries */
 	for (word = 0; word < xp_nasid_mask_words; word++) {
 
-		if (xpc_exiting) {
+		if (xpc_exiting)
 			break;
-		}
 
 		nasid_mask = xpc_IPI_receive(&act_amos[word]);
 		if (nasid_mask == 0) {
@@ -837,7 +795,6 @@
 		dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
 			nasid_mask);
 
-
 		/*
 		 * If this nasid has been added to the machine since
 		 * our partition was reset, this will retain the
@@ -846,7 +803,6 @@
 		 */
 		xpc_mach_nasids[word] |= nasid_mask;
 
-
 		/* locate the nasid(s) which sent interrupts */
 
 		for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
@@ -862,7 +818,6 @@
 	return n_IRQs_detected;
 }
 
-
 /*
  * See if the other side has responded to a partition disengage request
  * from us.
@@ -873,11 +828,11 @@
 	partid_t partid = XPC_PARTID(part);
 	int disengaged;
 
-
 	disengaged = (xpc_partition_engaged(1UL << partid) == 0);
 	if (part->disengage_request_timeout) {
 		if (!disengaged) {
-			if (time_before(jiffies, part->disengage_request_timeout)) {
+			if (time_before(jiffies,
+			    part->disengage_request_timeout)) {
 				/* timelimit hasn't been reached yet */
 				return 0;
 			}
@@ -888,7 +843,7 @@
 			 */
 
 			dev_info(xpc_part, "disengage from remote partition %d "
-				"timed out\n", partid);
+				 "timed out\n", partid);
 			xpc_disengage_request_timedout = 1;
 			xpc_clear_partition_engaged(1UL << partid);
 			disengaged = 1;
@@ -898,23 +853,20 @@
 		/* cancel the timer function, provided it's not us */
 		if (!in_interrupt()) {
 			del_singleshot_timer_sync(&part->
-						      disengage_request_timer);
+						  disengage_request_timer);
 		}
 
 		DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
-					part->act_state != XPC_P_INACTIVE);
-		if (part->act_state != XPC_P_INACTIVE) {
+			part->act_state != XPC_P_INACTIVE);
+		if (part->act_state != XPC_P_INACTIVE)
 			xpc_wakeup_channel_mgr(part);
-		}
 
-		if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
+		if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
 			xpc_cancel_partition_disengage_request(part);
-		}
 	}
 	return disengaged;
 }
 
-
 /*
  * Mark specified partition as active.
  */
@@ -924,7 +876,6 @@
 	unsigned long irq_flags;
 	enum xpc_retval ret;
 
-
 	dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
 
 	spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -940,17 +891,15 @@
 	return ret;
 }
 
-
 /*
  * Notify XPC that the partition is down.
  */
 void
 xpc_deactivate_partition(const int line, struct xpc_partition *part,
-				enum xpc_retval reason)
+			 enum xpc_retval reason)
 {
 	unsigned long irq_flags;
 
-
 	spin_lock_irqsave(&part->act_lock, irq_flags);
 
 	if (part->act_state == XPC_P_INACTIVE) {
@@ -964,7 +913,7 @@
 	}
 	if (part->act_state == XPC_P_DEACTIVATING) {
 		if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
-					reason == xpcReactivating) {
+		    reason == xpcReactivating) {
 			XPC_SET_REASON(part, reason, line);
 		}
 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,9 +931,9 @@
 
 		/* set a timelimit on the disengage request */
 		part->disengage_request_timeout = jiffies +
-					(xpc_disengage_request_timelimit * HZ);
+		    (xpc_disengage_request_timelimit * HZ);
 		part->disengage_request_timer.expires =
-					part->disengage_request_timeout;
+		    part->disengage_request_timeout;
 		add_timer(&part->disengage_request_timer);
 	}
 
@@ -994,7 +943,6 @@
 	xpc_partition_going_down(part, reason);
 }
 
-
 /*
  * Mark specified partition as inactive.
  */
@@ -1003,7 +951,6 @@
 {
 	unsigned long irq_flags;
 
-
 	dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
 		XPC_PARTID(part));
 
@@ -1013,7 +960,6 @@
 	part->remote_rp_pa = 0;
 }
 
-
 /*
  * SAL has provided a partition and machine mask.  The partition mask
  * contains a bit for each even nasid in our partition.  The machine
@@ -1041,24 +987,22 @@
 	u64 *discovered_nasids;
 	enum xpc_retval ret;
 
-
 	remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
-						xp_nasid_mask_bytes,
-						GFP_KERNEL, &remote_rp_base);
-	if (remote_rp == NULL) {
+						  xp_nasid_mask_bytes,
+						  GFP_KERNEL, &remote_rp_base);
+	if (remote_rp == NULL)
 		return;
-	}
-	remote_vars = (struct xpc_vars *) remote_rp;
 
+	remote_vars = (struct xpc_vars *)remote_rp;
 
 	discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
-							GFP_KERNEL);
+				    GFP_KERNEL);
 	if (discovered_nasids == NULL) {
 		kfree(remote_rp_base);
 		return;
 	}
 
-	rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
+	rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
 
 	/*
 	 * The term 'region' in this context refers to the minimum number of
@@ -1081,23 +1025,19 @@
 
 	for (region = 0; region < max_regions; region++) {
 
-		if ((volatile int) xpc_exiting) {
+		if (xpc_exiting)
 			break;
-		}
 
 		dev_dbg(xpc_part, "searching region %d\n", region);
 
 		for (nasid = (region * region_size * 2);
-		     nasid < ((region + 1) * region_size * 2);
-		     nasid += 2) {
+		     nasid < ((region + 1) * region_size * 2); nasid += 2) {
 
-			if ((volatile int) xpc_exiting) {
+			if (xpc_exiting)
 				break;
-			}
 
 			dev_dbg(xpc_part, "checking nasid %d\n", nasid);
 
-
 			if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
 				dev_dbg(xpc_part, "PROM indicates Nasid %d is "
 					"part of the local partition; skipping "
@@ -1119,19 +1059,18 @@
 				continue;
 			}
 
-
 			/* pull over the reserved page structure */
 
 			ret = xpc_get_remote_rp(nasid, discovered_nasids,
-					      remote_rp, &remote_rp_pa);
+						remote_rp, &remote_rp_pa);
 			if (ret != xpcSuccess) {
 				dev_dbg(xpc_part, "unable to get reserved page "
 					"from nasid %d, reason=%d\n", nasid,
 					ret);
 
-				if (ret == xpcLocalPartid) {
+				if (ret == xpcLocalPartid)
 					break;
-				}
+
 				continue;
 			}
 
@@ -1140,7 +1079,6 @@
 			partid = remote_rp->partid;
 			part = &xpc_partitions[partid];
 
-
 			/* pull over the cross partition variables */
 
 			ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
@@ -1171,15 +1109,15 @@
 			 * get the same page for remote_act_amos_pa after
 			 * module reloads and system reboots.
 			 */
-			if (sn_register_xp_addr_region(
-					    remote_vars->amos_page_pa,
-							PAGE_SIZE, 1) < 0) {
-				dev_dbg(xpc_part, "partition %d failed to "
+			if (sn_register_xp_addr_region
+			    (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
+				dev_dbg(xpc_part,
+					"partition %d failed to "
 					"register xp_addr region 0x%016lx\n",
 					partid, remote_vars->amos_page_pa);
 
 				XPC_SET_REASON(part, xpcPhysAddrRegFailed,
-						__LINE__);
+					       __LINE__);
 				break;
 			}
 
@@ -1195,9 +1133,9 @@
 				remote_vars->act_phys_cpuid);
 
 			if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
-								version)) {
+							   version)) {
 				part->remote_amos_page_pa =
-						remote_vars->amos_page_pa;
+				    remote_vars->amos_page_pa;
 				xpc_mark_partition_disengaged(part);
 				xpc_cancel_partition_disengage_request(part);
 			}
@@ -1209,7 +1147,6 @@
 	kfree(remote_rp_base);
 }
 
-
 /*
  * Given a partid, get the nasids owned by that partition from the
  * remote partition's reserved page.
@@ -1221,19 +1158,17 @@
 	u64 part_nasid_pa;
 	int bte_res;
 
-
 	part = &xpc_partitions[partid];
-	if (part->remote_rp_pa == 0) {
+	if (part->remote_rp_pa == 0)
 		return xpcPartitionDown;
-	}
 
 	memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
 
-	part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
+	part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
 
-	bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask,
-			xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
+	bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
+			      xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
+			      NULL);
 
 	return xpc_map_bte_errors(bte_res);
 }
-
diff --git a/arch/ia64/sn/kernel/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
similarity index 89%
rename from arch/ia64/sn/kernel/xpnet.c
rename to drivers/misc/sgi-xp/xpnet.c
index a5df672..a9543c6 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -3,10 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
  */
 
-
 /*
  * Cross Partition Network Interface (XPNET) support
  *
@@ -21,8 +20,8 @@
  *
  */
 
-
 #include <linux/module.h>
+#include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
@@ -36,10 +35,8 @@
 #include <asm/sn/bte.h>
 #include <asm/sn/io.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/types.h>
 #include <asm/atomic.h>
-#include <asm/sn/xp.h>
-
+#include "xp.h"
 
 /*
  * The message payload transferred by XPC.
@@ -79,7 +76,6 @@
 #define XPNET_MSG_ALIGNED_SIZE	(L1_CACHE_ALIGN(XPNET_MSG_SIZE))
 #define XPNET_MSG_NENTRIES	(PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
 
-
 #define XPNET_MAX_KTHREADS	(XPNET_MSG_NENTRIES + 1)
 #define XPNET_MAX_IDLE_KTHREADS	(XPNET_MSG_NENTRIES + 1)
 
@@ -91,9 +87,9 @@
 #define XPNET_VERSION_MAJOR(_v)		((_v) >> 4)
 #define XPNET_VERSION_MINOR(_v)		((_v) & 0xf)
 
-#define	XPNET_VERSION _XPNET_VERSION(1,0)		/* version 1.0 */
-#define	XPNET_VERSION_EMBED _XPNET_VERSION(1,1)		/* version 1.1 */
-#define XPNET_MAGIC	0x88786984 /* "XNET" */
+#define	XPNET_VERSION _XPNET_VERSION(1, 0)	/* version 1.0 */
+#define	XPNET_VERSION_EMBED _XPNET_VERSION(1, 1)	/* version 1.1 */
+#define XPNET_MAGIC	0x88786984	/* "XNET" */
 
 #define XPNET_VALID_MSG(_m)						     \
    ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -101,7 +97,6 @@
 
 #define XPNET_DEVICE_NAME		"xp0"
 
-
 /*
  * When messages are queued with xpc_send_notify, a kmalloc'd buffer
  * of the following type is passed as a notification cookie.  When the
@@ -145,7 +140,6 @@
 /* 32KB has been determined to be the ideal */
 #define XPNET_DEF_MTU (0x8000UL)
 
-
 /*
  * The partition id is encapsulated in the MAC address.  The following
  * define locates the octet the partid is in.
@@ -153,7 +147,6 @@
 #define XPNET_PARTID_OCTET	1
 #define XPNET_LICENSE_OCTET	2
 
-
 /*
  * Define the XPNET debug device structure that is to be used with dev_dbg(),
  * dev_err(), dev_warn(), and dev_info().
@@ -163,7 +156,7 @@
 };
 
 struct device xpnet_dbg_subname = {
-	.bus_id = {0},			/* set to "" */
+	.bus_id = {0},		/* set to "" */
 	.driver = &xpnet_dbg_name
 };
 
@@ -178,14 +171,13 @@
 	struct sk_buff *skb;
 	bte_result_t bret;
 	struct xpnet_dev_private *priv =
-		(struct xpnet_dev_private *) xpnet_device->priv;
-
+	    (struct xpnet_dev_private *)xpnet_device->priv;
 
 	if (!XPNET_VALID_MSG(msg)) {
 		/*
 		 * Packet with a different XPC version.  Ignore.
 		 */
-		xpc_received(partid, channel, (void *) msg);
+		xpc_received(partid, channel, (void *)msg);
 
 		priv->stats.rx_errors++;
 
@@ -194,14 +186,13 @@
 	dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
 		msg->leadin_ignore, msg->tailout_ignore);
 
-
 	/* reserve an extra cache line */
 	skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
 	if (!skb) {
 		dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
 			msg->size + L1_CACHE_BYTES);
 
-		xpc_received(partid, channel, (void *) msg);
+		xpc_received(partid, channel, (void *)msg);
 
 		priv->stats.rx_errors++;
 
@@ -227,12 +218,13 @@
 	 * Move the data over from the other side.
 	 */
 	if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
-						(msg->embedded_bytes != 0)) {
+	    (msg->embedded_bytes != 0)) {
 		dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
 			"%lu)\n", skb->data, &msg->data,
-			(size_t) msg->embedded_bytes);
+			(size_t)msg->embedded_bytes);
 
-		skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes);
+		skb_copy_to_linear_data(skb, &msg->data,
+					(size_t)msg->embedded_bytes);
 	} else {
 		dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
 			"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -244,16 +236,18 @@
 				msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
 
 		if (bret != BTE_SUCCESS) {
-			// >>> Need better way of cleaning skb.  Currently skb
-			// >>> appears in_use and we can't just call
-			// >>> dev_kfree_skb.
+			/*
+			 * >>> Need better way of cleaning skb.  Currently skb
+			 * >>> appears in_use and we can't just call
+			 * >>> dev_kfree_skb.
+			 */
 			dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
 				"error=0x%x\n", (void *)msg->buf_pa,
 				(void *)__pa((u64)skb->data &
-							~(L1_CACHE_BYTES - 1)),
+					     ~(L1_CACHE_BYTES - 1)),
 				msg->size, bret);
 
-			xpc_received(partid, channel, (void *) msg);
+			xpc_received(partid, channel, (void *)msg);
 
 			priv->stats.rx_errors++;
 
@@ -262,7 +256,7 @@
 	}
 
 	dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
-		"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+		"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
 		(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
 		skb->len);
 
@@ -275,16 +269,14 @@
 		(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
 		skb_end_pointer(skb), skb->len);
 
-
 	xpnet_device->last_rx = jiffies;
 	priv->stats.rx_packets++;
 	priv->stats.rx_bytes += skb->len + ETH_HLEN;
 
 	netif_rx_ni(skb);
-	xpc_received(partid, channel, (void *) msg);
+	xpc_received(partid, channel, (void *)msg);
 }
 
-
 /*
  * This is the handler which XPC calls during any sort of change in
  * state or message reception on a connection.
@@ -295,20 +287,19 @@
 {
 	long bp;
 
-
 	DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
 	DBUG_ON(channel != XPC_NET_CHANNEL);
 
-	switch(reason) {
+	switch (reason) {
 	case xpcMsgReceived:	/* message received */
 		DBUG_ON(data == NULL);
 
-		xpnet_receive(partid, channel, (struct xpnet_message *) data);
+		xpnet_receive(partid, channel, (struct xpnet_message *)data);
 		break;
 
 	case xpcConnected:	/* connection completed to a partition */
 		spin_lock_bh(&xpnet_broadcast_lock);
-		xpnet_broadcast_partitions |= 1UL << (partid -1 );
+		xpnet_broadcast_partitions |= 1UL << (partid - 1);
 		bp = xpnet_broadcast_partitions;
 		spin_unlock_bh(&xpnet_broadcast_lock);
 
@@ -321,13 +312,12 @@
 
 	default:
 		spin_lock_bh(&xpnet_broadcast_lock);
-		xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
+		xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
 		bp = xpnet_broadcast_partitions;
 		spin_unlock_bh(&xpnet_broadcast_lock);
 
-		if (bp == 0) {
+		if (bp == 0)
 			netif_carrier_off(xpnet_device);
-		}
 
 		dev_dbg(xpnet, "%s disconnected from partition %d; "
 			"xpnet_broadcast_partitions=0x%lx\n",
@@ -337,13 +327,11 @@
 	}
 }
 
-
 static int
 xpnet_dev_open(struct net_device *dev)
 {
 	enum xpc_retval ret;
 
-
 	dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
 		"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
 		XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
@@ -364,7 +352,6 @@
 	return 0;
 }
 
-
 static int
 xpnet_dev_stop(struct net_device *dev)
 {
@@ -375,7 +362,6 @@
 	return 0;
 }
 
-
 static int
 xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -392,7 +378,6 @@
 	return 0;
 }
 
-
 /*
  * Required for the net_device structure.
  */
@@ -402,7 +387,6 @@
 	return 0;
 }
 
-
 /*
  * Return statistics to the caller.
  */
@@ -411,13 +395,11 @@
 {
 	struct xpnet_dev_private *priv;
 
-
-	priv = (struct xpnet_dev_private *) dev->priv;
+	priv = (struct xpnet_dev_private *)dev->priv;
 
 	return &priv->stats;
 }
 
-
 /*
  * Notification that the other end has received the message and
  * DMA'd the skb information.  At this point, they are done with
@@ -426,11 +408,9 @@
  */
 static void
 xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
-			void *__qm)
+		     void *__qm)
 {
-	struct xpnet_pending_msg *queued_msg =
-		(struct xpnet_pending_msg *) __qm;
-
+	struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
 
 	DBUG_ON(queued_msg == NULL);
 
@@ -439,14 +419,13 @@
 
 	if (atomic_dec_return(&queued_msg->use_count) == 0) {
 		dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
-			(void *) queued_msg->skb->head);
+			(void *)queued_msg->skb->head);
 
 		dev_kfree_skb_any(queued_msg->skb);
 		kfree(queued_msg);
 	}
 }
 
-
 /*
  * Network layer has formatted a packet (skb) and is ready to place it
  * "on the wire".  Prepare and send an xpnet_message to all partitions
@@ -469,16 +448,13 @@
 	struct xpnet_dev_private *priv;
 	u16 embedded_bytes;
 
-
-	priv = (struct xpnet_dev_private *) dev->priv;
-
+	priv = (struct xpnet_dev_private *)dev->priv;
 
 	dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
-		"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
+		"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
 		(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
 		skb->len);
 
-
 	/*
 	 * The xpnet_pending_msg tracks how many outstanding
 	 * xpc_send_notifies are relying on this skb.  When none
@@ -487,16 +463,15 @@
 	queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
 	if (queued_msg == NULL) {
 		dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
-			"packet\n", sizeof(struct xpnet_pending_msg));
+			 "packet\n", sizeof(struct xpnet_pending_msg));
 
 		priv->stats.tx_errors++;
 
 		return -ENOMEM;
 	}
 
-
 	/* get the beginning of the first cacheline and end of last */
-	start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
+	start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
 	end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
 
 	/* calculate how many bytes to embed in the XPC message */
@@ -506,7 +481,6 @@
 		embedded_bytes = skb->len;
 	}
 
-
 	/*
 	 * Since the send occurs asynchronously, we set the count to one
 	 * and begin sending.  Any sends that happen to complete before
@@ -517,14 +491,13 @@
 	atomic_set(&queued_msg->use_count, 1);
 	queued_msg->skb = skb;
 
-
 	second_mac_octet = skb->data[XPNET_PARTID_OCTET];
 	if (second_mac_octet == 0xff) {
 		/* we are being asked to broadcast to all partitions */
 		dp = xpnet_broadcast_partitions;
 	} else if (second_mac_octet != 0) {
 		dp = xpnet_broadcast_partitions &
-					(1UL << (second_mac_octet - 1));
+		    (1UL << (second_mac_octet - 1));
 	} else {
 		/* 0 is an invalid partid.  Ignore */
 		dp = 0;
@@ -543,7 +516,6 @@
 	for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
 	     dest_partid++) {
 
-
 		if (!(dp & (1UL << (dest_partid - 1)))) {
 			/* not destined for this partition */
 			continue;
@@ -552,20 +524,18 @@
 		/* remove this partition from the destinations mask */
 		dp &= ~(1UL << (dest_partid - 1));
 
-
 		/* found a partition to send to */
 
 		ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
 				   XPC_NOWAIT, (void **)&msg);
-		if (unlikely(ret != xpcSuccess)) {
+		if (unlikely(ret != xpcSuccess))
 			continue;
-		}
 
 		msg->embedded_bytes = embedded_bytes;
 		if (unlikely(embedded_bytes != 0)) {
 			msg->version = XPNET_VERSION_EMBED;
 			dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
-				&msg->data, skb->data, (size_t) embedded_bytes);
+				&msg->data, skb->data, (size_t)embedded_bytes);
 			skb_copy_from_linear_data(skb, &msg->data,
 						  (size_t)embedded_bytes);
 		} else {
@@ -573,7 +543,7 @@
 		}
 		msg->magic = XPNET_MAGIC;
 		msg->size = end_addr - start_addr;
-		msg->leadin_ignore = (u64) skb->data - start_addr;
+		msg->leadin_ignore = (u64)skb->data - start_addr;
 		msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
 		msg->buf_pa = __pa(start_addr);
 
@@ -583,7 +553,6 @@
 			dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
 			msg->leadin_ignore, msg->tailout_ignore);
 
-
 		atomic_inc(&queued_msg->use_count);
 
 		ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
@@ -592,14 +561,12 @@
 			atomic_dec(&queued_msg->use_count);
 			continue;
 		}
-
 	}
 
 	if (atomic_dec_return(&queued_msg->use_count) == 0) {
 		dev_dbg(xpnet, "no partitions to receive packet destined for "
 			"%d\n", dest_partid);
 
-
 		dev_kfree_skb(skb);
 		kfree(queued_msg);
 	}
@@ -610,23 +577,20 @@
 	return 0;
 }
 
-
 /*
  * Deal with transmit timeouts coming from the network layer.
  */
 static void
-xpnet_dev_tx_timeout (struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev)
 {
 	struct xpnet_dev_private *priv;
 
-
-	priv = (struct xpnet_dev_private *) dev->priv;
+	priv = (struct xpnet_dev_private *)dev->priv;
 
 	priv->stats.tx_errors++;
 	return;
 }
 
-
 static int __init
 xpnet_init(void)
 {
@@ -634,10 +598,8 @@
 	u32 license_num;
 	int result = -ENOMEM;
 
-
-	if (!ia64_platform_is("sn2")) {
+	if (!ia64_platform_is("sn2"))
 		return -ENODEV;
-	}
 
 	dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
 
@@ -647,9 +609,8 @@
 	 */
 	xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
 				    XPNET_DEVICE_NAME, ether_setup);
-	if (xpnet_device == NULL) {
+	if (xpnet_device == NULL)
 		return -ENOMEM;
-	}
 
 	netif_carrier_off(xpnet_device);
 
@@ -672,7 +633,7 @@
 	license_num = sn_partition_serial_number_val();
 	for (i = 3; i >= 0; i--) {
 		xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
-							license_num & 0xff;
+		    license_num & 0xff;
 		license_num = license_num >> 8;
 	}
 
@@ -690,29 +651,27 @@
 	xpnet_device->features = NETIF_F_NO_CSUM;
 
 	result = register_netdev(xpnet_device);
-	if (result != 0) {
+	if (result != 0)
 		free_netdev(xpnet_device);
-	}
 
 	return result;
 }
-module_init(xpnet_init);
 
+module_init(xpnet_init);
 
 static void __exit
 xpnet_exit(void)
 {
 	dev_info(xpnet, "unregistering network device %s\n",
-		xpnet_device[0].name);
+		 xpnet_device[0].name);
 
 	unregister_netdev(xpnet_device);
 
 	free_netdev(xpnet_device);
 }
-module_exit(xpnet_exit);
 
+module_exit(xpnet_exit);
 
 MODULE_AUTHOR("Silicon Graphics, Inc.");
 MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
 MODULE_LICENSE("GPL");
-
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f9b7bdd..8ddb918 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -416,13 +416,13 @@
 	 * Reserve some resources for CardBus.  We reserve
 	 * a fixed amount of bus space for CardBus bridges.
 	 */
-	b_res[0].start = pci_cardbus_io_size;
-	b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
-	b_res[0].flags |= IORESOURCE_IO;
+	b_res[0].start = 0;
+	b_res[0].end = pci_cardbus_io_size - 1;
+	b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
 
-	b_res[1].start = pci_cardbus_io_size;
-	b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
-	b_res[1].flags |= IORESOURCE_IO;
+	b_res[1].start = 0;
+	b_res[1].end = pci_cardbus_io_size - 1;
+	b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
 
 	/*
 	 * Check whether prefetchable memory is supported
@@ -441,17 +441,17 @@
 	 * twice the size.
 	 */
 	if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
-		b_res[2].start = pci_cardbus_mem_size;
-		b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
-		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+		b_res[2].start = 0;
+		b_res[2].end = pci_cardbus_mem_size - 1;
+		b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
 
-		b_res[3].start = pci_cardbus_mem_size;
-		b_res[3].end = b_res[3].start + pci_cardbus_mem_size - 1;
-		b_res[3].flags |= IORESOURCE_MEM;
+		b_res[3].start = 0;
+		b_res[3].end = pci_cardbus_mem_size - 1;
+		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
 	} else {
-		b_res[3].start = pci_cardbus_mem_size * 2;
-		b_res[3].end = b_res[3].start + pci_cardbus_mem_size * 2 - 1;
-		b_res[3].flags |= IORESOURCE_MEM;
+		b_res[3].start = 0;
+		b_res[3].end = pci_cardbus_mem_size * 2 - 1;
+		b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
 	}
 }
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 028ae38..8b18a87 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -689,6 +689,7 @@
 
 config UDF_FS
 	tristate "UDF file system support"
+	select CRC_ITU_T
 	help
 	  This is the new file system used on some CD-ROMs and DVDs. Say Y if
 	  you intend to mount DVD discs or CDRW's written in packet mode, or
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index d248e60..ca1c912 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -10,6 +10,7 @@
 				midcomms.o \
 				netlink.o \
 				lowcomms.o \
+				plock.o \
 				rcom.o \
 				recover.o \
 				recoverd.o \
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c3ad1df..eac23bd 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -114,7 +114,7 @@
 };
 
 static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
-			   unsigned int *info_field, int check_zero,
+			   int *info_field, int check_zero,
 			   const char *buf, size_t len)
 {
 	unsigned int x;
@@ -284,6 +284,7 @@
 	struct list_head list; /* space->members */
 	int nodeid;
 	int weight;
+	int new;
 };
 
 static struct configfs_group_operations clusters_ops = {
@@ -565,6 +566,7 @@
 	config_item_init_type_name(&nd->item, name, &node_type);
 	nd->nodeid = -1;
 	nd->weight = 1;  /* default weight of 1 if none is set */
+	nd->new = 1;     /* set to 0 once it's been read by dlm_nodeid_list() */
 
 	mutex_lock(&sp->members_lock);
 	list_add(&nd->list, &sp->members);
@@ -805,12 +807,13 @@
 }
 
 /* caller must free mem */
-int dlm_nodeid_list(char *lsname, int **ids_out)
+int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
+		    int **new_out, int *new_count_out)
 {
 	struct space *sp;
 	struct node *nd;
-	int i = 0, rv = 0;
-	int *ids;
+	int i = 0, rv = 0, ids_count = 0, new_count = 0;
+	int *ids, *new;
 
 	sp = get_space(lsname);
 	if (!sp)
@@ -818,23 +821,50 @@
 
 	mutex_lock(&sp->members_lock);
 	if (!sp->members_count) {
-		rv = 0;
+		rv = -EINVAL;
+		printk(KERN_ERR "dlm: zero members_count\n");
 		goto out;
 	}
 
-	ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
+	ids_count = sp->members_count;
+
+	ids = kcalloc(ids_count, sizeof(int), GFP_KERNEL);
 	if (!ids) {
 		rv = -ENOMEM;
 		goto out;
 	}
 
-	rv = sp->members_count;
-	list_for_each_entry(nd, &sp->members, list)
+	list_for_each_entry(nd, &sp->members, list) {
 		ids[i++] = nd->nodeid;
+		if (nd->new)
+			new_count++;
+	}
 
-	if (rv != i)
-		printk("bad nodeid count %d %d\n", rv, i);
+	if (ids_count != i)
+		printk(KERN_ERR "dlm: bad nodeid count %d %d\n", ids_count, i);
 
+	if (!new_count)
+		goto out_ids;
+
+	new = kcalloc(new_count, sizeof(int), GFP_KERNEL);
+	if (!new) {
+		kfree(ids);
+		rv = -ENOMEM;
+		goto out;
+	}
+
+	i = 0;
+	list_for_each_entry(nd, &sp->members, list) {
+		if (nd->new) {
+			new[i++] = nd->nodeid;
+			nd->new = 0;
+		}
+	}
+	*new_count_out = new_count;
+	*new_out = new;
+
+ out_ids:
+	*ids_count_out = ids_count;
 	*ids_out = ids;
  out:
 	mutex_unlock(&sp->members_lock);
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index a3170fe..4f1d6fc 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -35,7 +35,8 @@
 int dlm_config_init(void);
 void dlm_config_exit(void);
 int dlm_node_weight(char *lsname, int nodeid);
-int dlm_nodeid_list(char *lsname, int **ids_out);
+int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
+		    int **new_out, int *new_count_out);
 int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
 int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
 int dlm_our_nodeid(void);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 7a8824f4..5a7ac33 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -42,8 +42,6 @@
 #include <linux/dlm.h>
 #include "config.h"
 
-#define DLM_LOCKSPACE_LEN	64
-
 /* Size of the temp buffer midcomms allocates on the stack.
    We try to make this large enough so most messages fit.
    FIXME: should sctp make this unnecessary? */
@@ -132,8 +130,10 @@
 
 struct dlm_recover {
 	struct list_head	list;
-	int			*nodeids;
+	int			*nodeids;   /* nodeids of all members */
 	int			node_count;
+	int			*new;       /* nodeids of new members */
+	int			new_count;
 	uint64_t		seq;
 };
 
@@ -579,6 +579,8 @@
 int dlm_netlink_init(void);
 void dlm_netlink_exit(void);
 void dlm_timeout_warn(struct dlm_lkb *lkb);
+int dlm_plock_init(void);
+void dlm_plock_exit(void);
 
 #ifdef CONFIG_DLM_DEBUG
 int dlm_register_debugfs(void);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 8f250ac..2d3d102 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -165,7 +165,7 @@
 	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
 }
 
-void dlm_print_rsb(struct dlm_rsb *r)
+static void dlm_print_rsb(struct dlm_rsb *r)
 {
 	printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
 	       r->res_nodeid, r->res_flags, r->res_first_lkid,
@@ -1956,8 +1956,7 @@
 			list_del_init(&lkb->lkb_rsb_lookup);
 			r->res_first_lkid = lkb->lkb_id;
 			_request_lock(r, lkb);
-		} else
-			r->res_nodeid = -1;
+		}
 		break;
 
 	default:
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 05d9c82..88e93c8 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -13,7 +13,6 @@
 #ifndef __LOCK_DOT_H__
 #define __LOCK_DOT_H__
 
-void dlm_print_rsb(struct dlm_rsb *r);
 void dlm_dump_rsb(struct dlm_rsb *r);
 void dlm_print_lkb(struct dlm_lkb *lkb);
 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 58487fb..b80e0aa 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -46,10 +46,16 @@
 	if (error)
 		goto out_user;
 
+	error = dlm_plock_init();
+	if (error)
+		goto out_netlink;
+
 	printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
 
 	return 0;
 
+ out_netlink:
+	dlm_netlink_exit();
  out_user:
 	dlm_user_exit();
  out_debug:
@@ -66,6 +72,7 @@
 
 static void __exit exit_dlm(void)
 {
+	dlm_plock_exit();
 	dlm_netlink_exit();
 	dlm_user_exit();
 	dlm_config_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index fa17f5a..26133f0 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -210,6 +210,23 @@
 		}
 	}
 
+	/* Add an entry to ls_nodes_gone for members that were removed and
+	   then added again, so that previous state for these nodes will be
+	   cleared during recovery. */
+
+	for (i = 0; i < rv->new_count; i++) {
+		if (!dlm_is_member(ls, rv->new[i]))
+			continue;
+		log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
+
+		memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
+		if (!memb)
+			return -ENOMEM;
+		memb->nodeid = rv->new[i];
+		list_add_tail(&memb->list, &ls->ls_nodes_gone);
+		neg++;
+	}
+
 	/* add new members to ls_nodes */
 
 	for (i = 0; i < rv->node_count; i++) {
@@ -314,15 +331,16 @@
 int dlm_ls_start(struct dlm_ls *ls)
 {
 	struct dlm_recover *rv = NULL, *rv_old;
-	int *ids = NULL;
-	int error, count;
+	int *ids = NULL, *new = NULL;
+	int error, ids_count = 0, new_count = 0;
 
 	rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
 	if (!rv)
 		return -ENOMEM;
 
-	error = count = dlm_nodeid_list(ls->ls_name, &ids);
-	if (error <= 0)
+	error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
+				&new, &new_count);
+	if (error < 0)
 		goto fail;
 
 	spin_lock(&ls->ls_recover_lock);
@@ -337,14 +355,19 @@
 	}
 
 	rv->nodeids = ids;
-	rv->node_count = count;
+	rv->node_count = ids_count;
+	rv->new = new;
+	rv->new_count = new_count;
 	rv->seq = ++ls->ls_recover_seq;
 	rv_old = ls->ls_recover_args;
 	ls->ls_recover_args = rv;
 	spin_unlock(&ls->ls_recover_lock);
 
 	if (rv_old) {
+		log_error(ls, "unused recovery %llx %d",
+			  (unsigned long long)rv_old->seq, rv_old->node_count);
 		kfree(rv_old->nodeids);
+		kfree(rv_old->new);
 		kfree(rv_old);
 	}
 
@@ -354,6 +377,7 @@
  fail:
 	kfree(rv);
 	kfree(ids);
+	kfree(new);
 	return error;
 }
 
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/dlm/plock.c
similarity index 67%
rename from fs/gfs2/locking/dlm/plock.c
rename to fs/dlm/plock.c
index 2ebd374..d6d6e37 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -1,17 +1,19 @@
 /*
- * Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
  * of the GNU General Public License version 2.
  */
 
+#include <linux/fs.h>
 #include <linux/miscdevice.h>
-#include <linux/lock_dlm_plock.h>
 #include <linux/poll.h>
+#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
 
-#include "lock_dlm.h"
-
+#include "dlm_internal.h"
+#include "lockspace.h"
 
 static spinlock_t ops_lock;
 static struct list_head send_list;
@@ -22,7 +24,7 @@
 struct plock_op {
 	struct list_head list;
 	int done;
-	struct gdlm_plock_info info;
+	struct dlm_plock_info info;
 };
 
 struct plock_xop {
@@ -34,22 +36,22 @@
 };
 
 
-static inline void set_version(struct gdlm_plock_info *info)
+static inline void set_version(struct dlm_plock_info *info)
 {
-	info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
-	info->version[1] = GDLM_PLOCK_VERSION_MINOR;
-	info->version[2] = GDLM_PLOCK_VERSION_PATCH;
+	info->version[0] = DLM_PLOCK_VERSION_MAJOR;
+	info->version[1] = DLM_PLOCK_VERSION_MINOR;
+	info->version[2] = DLM_PLOCK_VERSION_PATCH;
 }
 
-static int check_version(struct gdlm_plock_info *info)
+static int check_version(struct dlm_plock_info *info)
 {
-	if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
-	    (GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
-		log_error("plock device version mismatch: "
+	if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
+	    (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
+		log_print("plock device version mismatch: "
 			  "kernel (%u.%u.%u), user (%u.%u.%u)",
-			  GDLM_PLOCK_VERSION_MAJOR,
-			  GDLM_PLOCK_VERSION_MINOR,
-			  GDLM_PLOCK_VERSION_PATCH,
+			  DLM_PLOCK_VERSION_MAJOR,
+			  DLM_PLOCK_VERSION_MINOR,
+			  DLM_PLOCK_VERSION_PATCH,
 			  info->version[0],
 			  info->version[1],
 			  info->version[2]);
@@ -68,25 +70,31 @@
 	wake_up(&send_wq);
 }
 
-int gdlm_plock(void *lockspace, struct lm_lockname *name,
-	       struct file *file, int cmd, struct file_lock *fl)
+int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		   int cmd, struct file_lock *fl)
 {
-	struct gdlm_ls *ls = lockspace;
+	struct dlm_ls *ls;
 	struct plock_op *op;
 	struct plock_xop *xop;
 	int rv;
 
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
+
 	xop = kzalloc(sizeof(*xop), GFP_KERNEL);
-	if (!xop)
-		return -ENOMEM;
+	if (!xop) {
+		rv = -ENOMEM;
+		goto out;
+	}
 
 	op = &xop->xop;
-	op->info.optype		= GDLM_PLOCK_OP_LOCK;
+	op->info.optype		= DLM_PLOCK_OP_LOCK;
 	op->info.pid		= fl->fl_pid;
 	op->info.ex		= (fl->fl_type == F_WRLCK);
 	op->info.wait		= IS_SETLKW(cmd);
-	op->info.fsid		= ls->id;
-	op->info.number		= name->ln_number;
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
 	op->info.start		= fl->fl_start;
 	op->info.end		= fl->fl_end;
 	if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
@@ -107,12 +115,15 @@
 
 	if (xop->callback == NULL)
 		wait_event(recv_wq, (op->done != 0));
-	else
-		return -EINPROGRESS;
+	else {
+		rv = -EINPROGRESS;
+		goto out;
+	}
 
 	spin_lock(&ops_lock);
 	if (!list_empty(&op->list)) {
-		printk(KERN_INFO "plock op on list\n");
+		log_error(ls, "dlm_posix_lock: op on list %llx",
+			  (unsigned long long)number);
 		list_del(&op->list);
 	}
 	spin_unlock(&ops_lock);
@@ -121,17 +132,19 @@
 
 	if (!rv) {
 		if (posix_lock_file_wait(file, fl) < 0)
-			log_error("gdlm_plock: vfs lock error %x,%llx",
-				  name->ln_type,
-				  (unsigned long long)name->ln_number);
+			log_error(ls, "dlm_posix_lock: vfs lock error %llx",
+				  (unsigned long long)number);
 	}
 
 	kfree(xop);
+out:
+	dlm_put_lockspace(ls);
 	return rv;
 }
+EXPORT_SYMBOL_GPL(dlm_posix_lock);
 
 /* Returns failure iff a succesful lock operation should be canceled */
-static int gdlm_plock_callback(struct plock_op *op)
+static int dlm_plock_callback(struct plock_op *op)
 {
 	struct file *file;
 	struct file_lock *fl;
@@ -142,7 +155,8 @@
 
 	spin_lock(&ops_lock);
 	if (!list_empty(&op->list)) {
-		printk(KERN_INFO "plock op on list\n");
+		log_print("dlm_plock_callback: op on list %llx",
+			  (unsigned long long)op->info.number);
 		list_del(&op->list);
 	}
 	spin_unlock(&ops_lock);
@@ -165,19 +179,19 @@
 		 * This can only happen in the case of kmalloc() failure.
 		 * The filesystem's own lock is the authoritative lock,
 		 * so a failure to get the lock locally is not a disaster.
-		 * As long as GFS cannot reliably cancel locks (especially
+		 * As long as the fs cannot reliably cancel locks (especially
 		 * in a low-memory situation), we're better off ignoring
 		 * this failure than trying to recover.
 		 */
-		log_error("gdlm_plock: vfs lock error file %p fl %p",
-				file, fl);
+		log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
+			  (unsigned long long)op->info.number, file, fl);
 	}
 
 	rv = notify(flc, NULL, 0);
 	if (rv) {
 		/* XXX: We need to cancel the fs lock here: */
-		printk("gfs2 lock granted after lock request failed;"
-						" dangling lock!\n");
+		log_print("dlm_plock_callback: lock granted after lock request "
+			  "failed; dangling lock!\n");
 		goto out;
 	}
 
@@ -186,25 +200,31 @@
 	return rv;
 }
 
-int gdlm_punlock(void *lockspace, struct lm_lockname *name,
-		 struct file *file, struct file_lock *fl)
+int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		     struct file_lock *fl)
 {
-	struct gdlm_ls *ls = lockspace;
+	struct dlm_ls *ls;
 	struct plock_op *op;
 	int rv;
 
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
+
 	op = kzalloc(sizeof(*op), GFP_KERNEL);
-	if (!op)
-		return -ENOMEM;
+	if (!op) {
+		rv = -ENOMEM;
+		goto out;
+	}
 
 	if (posix_lock_file_wait(file, fl) < 0)
-		log_error("gdlm_punlock: vfs unlock error %x,%llx",
-			  name->ln_type, (unsigned long long)name->ln_number);
+		log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
+			  (unsigned long long)number);
 
-	op->info.optype		= GDLM_PLOCK_OP_UNLOCK;
+	op->info.optype		= DLM_PLOCK_OP_UNLOCK;
 	op->info.pid		= fl->fl_pid;
-	op->info.fsid		= ls->id;
-	op->info.number		= name->ln_number;
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
 	op->info.start		= fl->fl_start;
 	op->info.end		= fl->fl_end;
 	if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -217,7 +237,8 @@
 
 	spin_lock(&ops_lock);
 	if (!list_empty(&op->list)) {
-		printk(KERN_INFO "punlock op on list\n");
+		log_error(ls, "dlm_posix_unlock: op on list %llx",
+			  (unsigned long long)number);
 		list_del(&op->list);
 	}
 	spin_unlock(&ops_lock);
@@ -228,25 +249,34 @@
 		rv = 0;
 
 	kfree(op);
+out:
+	dlm_put_lockspace(ls);
 	return rv;
 }
+EXPORT_SYMBOL_GPL(dlm_posix_unlock);
 
-int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
-		   struct file *file, struct file_lock *fl)
+int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		  struct file_lock *fl)
 {
-	struct gdlm_ls *ls = lockspace;
+	struct dlm_ls *ls;
 	struct plock_op *op;
 	int rv;
 
-	op = kzalloc(sizeof(*op), GFP_KERNEL);
-	if (!op)
-		return -ENOMEM;
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
 
-	op->info.optype		= GDLM_PLOCK_OP_GET;
+	op = kzalloc(sizeof(*op), GFP_KERNEL);
+	if (!op) {
+		rv = -ENOMEM;
+		goto out;
+	}
+
+	op->info.optype		= DLM_PLOCK_OP_GET;
 	op->info.pid		= fl->fl_pid;
 	op->info.ex		= (fl->fl_type == F_WRLCK);
-	op->info.fsid		= ls->id;
-	op->info.number		= name->ln_number;
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
 	op->info.start		= fl->fl_start;
 	op->info.end		= fl->fl_end;
 	if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -259,7 +289,8 @@
 
 	spin_lock(&ops_lock);
 	if (!list_empty(&op->list)) {
-		printk(KERN_INFO "plock_get op on list\n");
+		log_error(ls, "dlm_posix_get: op on list %llx",
+			  (unsigned long long)number);
 		list_del(&op->list);
 	}
 	spin_unlock(&ops_lock);
@@ -281,14 +312,17 @@
 	}
 
 	kfree(op);
+out:
+	dlm_put_lockspace(ls);
 	return rv;
 }
+EXPORT_SYMBOL_GPL(dlm_posix_get);
 
 /* a read copies out one plock request from the send list */
 static ssize_t dev_read(struct file *file, char __user *u, size_t count,
 			loff_t *ppos)
 {
-	struct gdlm_plock_info info;
+	struct dlm_plock_info info;
 	struct plock_op *op = NULL;
 
 	if (count < sizeof(info))
@@ -315,7 +349,7 @@
 static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
 			 loff_t *ppos)
 {
-	struct gdlm_plock_info info;
+	struct dlm_plock_info info;
 	struct plock_op *op;
 	int found = 0;
 
@@ -345,12 +379,12 @@
 		struct plock_xop *xop;
 		xop = (struct plock_xop *)op;
 		if (xop->callback)
-			count = gdlm_plock_callback(op);
+			count = dlm_plock_callback(op);
 		else
 			wake_up(&recv_wq);
 	} else
-		printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
-			(unsigned long long)info.number);
+		log_print("dev_write no op %x %llx", info.fsid,
+			  (unsigned long long)info.number);
 	return count;
 }
 
@@ -377,11 +411,11 @@
 
 static struct miscdevice plock_dev_misc = {
 	.minor = MISC_DYNAMIC_MINOR,
-	.name = GDLM_PLOCK_MISC_NAME,
+	.name = DLM_PLOCK_MISC_NAME,
 	.fops = &dev_fops
 };
 
-int gdlm_plock_init(void)
+int dlm_plock_init(void)
 {
 	int rv;
 
@@ -393,14 +427,13 @@
 
 	rv = misc_register(&plock_dev_misc);
 	if (rv)
-		printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
-		       rv);
+		log_print("dlm_plock_init: misc_register failed %d", rv);
 	return rv;
 }
 
-void gdlm_plock_exit(void)
+void dlm_plock_exit(void)
 {
 	if (misc_deregister(&plock_dev_misc) < 0)
-		printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");
+		log_print("dlm_plock_exit: misc_deregister failed");
 }
 
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 997f953..fd677c8 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -257,6 +257,7 @@
 	if (rv) {
 		ls_recover(ls, rv);
 		kfree(rv->nodeids);
+		kfree(rv->new);
 		kfree(rv);
 	}
 }
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
index 89b93b6b..2609bb6 100644
--- a/fs/gfs2/locking/dlm/Makefile
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
-lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o
+lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
 
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 58fcf8c..a243cf6 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -25,6 +25,7 @@
 #include <net/sock.h>
 
 #include <linux/dlm.h>
+#include <linux/dlm_plock.h>
 #include <linux/lm_interface.h>
 
 /*
@@ -173,17 +174,6 @@
 int gdlm_hold_lvb(void *, char **);
 void gdlm_unhold_lvb(void *, char *);
 
-/* plock.c */
-
-int gdlm_plock_init(void);
-void gdlm_plock_exit(void);
-int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
-		struct file_lock *);
-int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
-		struct file_lock *);
-int gdlm_punlock(void *, struct lm_lockname *, struct file *,
-		struct file_lock *);
-
 /* mount.c */
 
 extern const struct lm_lockops gdlm_ops;
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 36a2258..b9a03a7 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -28,13 +28,6 @@
 		return error;
 	}
 
-	error = gdlm_plock_init();
-	if (error) {
-		gdlm_sysfs_exit();
-		gfs2_unregister_lockproto(&gdlm_ops);
-		return error;
-	}
-
 	printk(KERN_INFO
 	       "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
 	return 0;
@@ -42,7 +35,6 @@
 
 static void __exit exit_lock_dlm(void)
 {
-	gdlm_plock_exit();
 	gdlm_sysfs_exit();
 	gfs2_unregister_lockproto(&gdlm_ops);
 }
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index f2efff4..470bdf6 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -236,6 +236,27 @@
 	gdlm_kobject_release(ls);
 }
 
+static int gdlm_plock(void *lockspace, struct lm_lockname *name,
+	       struct file *file, int cmd, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
+}
+
+static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
+		 struct file *file, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
+static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
+		   struct file *file, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
 const struct lm_lockops gdlm_ops = {
 	.lm_proto_name = "lock_dlm",
 	.lm_mount = gdlm_mount,
diff --git a/fs/read_write.c b/fs/read_write.c
index 49a9871..f0d1240 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -33,7 +33,7 @@
 
 loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
 {
-	long long retval;
+	loff_t retval;
 	struct inode *inode = file->f_mapping->host;
 
 	mutex_lock(&inode->i_mutex);
@@ -60,7 +60,7 @@
 
 loff_t remote_llseek(struct file *file, loff_t offset, int origin)
 {
-	long long retval;
+	loff_t retval;
 
 	lock_kernel();
 	switch (origin) {
@@ -91,7 +91,7 @@
 
 loff_t default_llseek(struct file *file, loff_t offset, int origin)
 {
-	long long retval;
+	loff_t retval;
 
 	lock_kernel();
 	switch (origin) {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index bf2bcfd..cdfd996 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -247,7 +247,7 @@
 loff_t seq_lseek(struct file *file, loff_t offset, int origin)
 {
 	struct seq_file *m = (struct seq_file *)file->private_data;
-	long long retval = -EINVAL;
+	loff_t retval = -EINVAL;
 
 	mutex_lock(&m->lock);
 	m->version = file->f_version;
diff --git a/fs/udf/Makefile b/fs/udf/Makefile
index be845e7..0d4503f 100644
--- a/fs/udf/Makefile
+++ b/fs/udf/Makefile
@@ -6,4 +6,4 @@
 
 udf-objs     := balloc.o dir.o file.o ialloc.o inode.o lowlevel.o namei.o \
 		partition.o super.o truncate.o symlink.o fsync.o \
-		crc.o directory.o misc.o udftime.o unicode.o
+		directory.o misc.o udftime.o unicode.o
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index f855dcb..1b809bd4 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -149,8 +149,7 @@
 		return false;
 
 	lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
-	lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu(
-					lvid->freeSpaceTable[partition]) + cnt);
+	le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
 	return true;
 }
 
@@ -589,10 +588,8 @@
 					sptr = oepos.bh->b_data + epos.offset;
 					aed = (struct allocExtDesc *)
 						oepos.bh->b_data;
-					aed->lengthAllocDescs =
-						cpu_to_le32(le32_to_cpu(
-							aed->lengthAllocDescs) +
-								adsize);
+					le32_add_cpu(&aed->lengthAllocDescs,
+							adsize);
 				} else {
 					sptr = iinfo->i_ext.i_data +
 								epos.offset;
@@ -645,9 +642,7 @@
 				mark_inode_dirty(table);
 			} else {
 				aed = (struct allocExtDesc *)epos.bh->b_data;
-				aed->lengthAllocDescs =
-					cpu_to_le32(le32_to_cpu(
-					    aed->lengthAllocDescs) + adsize);
+				le32_add_cpu(&aed->lengthAllocDescs, adsize);
 				udf_update_tag(epos.bh->b_data, epos.offset);
 				mark_buffer_dirty(epos.bh);
 			}
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
deleted file mode 100644
index b166129..0000000
--- a/fs/udf/crc.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * crc.c
- *
- * PURPOSE
- *	Routines to generate, calculate, and test a 16-bit CRC.
- *
- * DESCRIPTION
- *	The CRC code was devised by Don P. Mitchell of AT&T Bell Laboratories
- *	and Ned W. Rhodes of Software Systems Group. It has been published in
- *	"Design and Validation of Computer Protocols", Prentice Hall,
- *	Englewood Cliffs, NJ, 1991, Chapter 3, ISBN 0-13-539925-4.
- *
- *	Copyright is held by AT&T.
- *
- *	AT&T gives permission for the free use of the CRC source code.
- *
- * COPYRIGHT
- *	This file is distributed under the terms of the GNU General Public
- *	License (GPL). Copies of the GPL can be obtained from:
- *		ftp://prep.ai.mit.edu/pub/gnu/GPL
- *	Each contributing author retains all rights to their own work.
- */
-
-#include "udfdecl.h"
-
-static uint16_t crc_table[256] = {
-	0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50a5U, 0x60c6U, 0x70e7U,
-	0x8108U, 0x9129U, 0xa14aU, 0xb16bU, 0xc18cU, 0xd1adU, 0xe1ceU, 0xf1efU,
-	0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52b5U, 0x4294U, 0x72f7U, 0x62d6U,
-	0x9339U, 0x8318U, 0xb37bU, 0xa35aU, 0xd3bdU, 0xc39cU, 0xf3ffU, 0xe3deU,
-	0x2462U, 0x3443U, 0x0420U, 0x1401U, 0x64e6U, 0x74c7U, 0x44a4U, 0x5485U,
-	0xa56aU, 0xb54bU, 0x8528U, 0x9509U, 0xe5eeU, 0xf5cfU, 0xc5acU, 0xd58dU,
-	0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76d7U, 0x66f6U, 0x5695U, 0x46b4U,
-	0xb75bU, 0xa77aU, 0x9719U, 0x8738U, 0xf7dfU, 0xe7feU, 0xd79dU, 0xc7bcU,
-	0x48c4U, 0x58e5U, 0x6886U, 0x78a7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U,
-	0xc9ccU, 0xd9edU, 0xe98eU, 0xf9afU, 0x8948U, 0x9969U, 0xa90aU, 0xb92bU,
-	0x5af5U, 0x4ad4U, 0x7ab7U, 0x6a96U, 0x1a71U, 0x0a50U, 0x3a33U, 0x2a12U,
-	0xdbfdU, 0xcbdcU, 0xfbbfU, 0xeb9eU, 0x9b79U, 0x8b58U, 0xbb3bU, 0xab1aU,
-	0x6ca6U, 0x7c87U, 0x4ce4U, 0x5cc5U, 0x2c22U, 0x3c03U, 0x0c60U, 0x1c41U,
-	0xedaeU, 0xfd8fU, 0xcdecU, 0xddcdU, 0xad2aU, 0xbd0bU, 0x8d68U, 0x9d49U,
-	0x7e97U, 0x6eb6U, 0x5ed5U, 0x4ef4U, 0x3e13U, 0x2e32U, 0x1e51U, 0x0e70U,
-	0xff9fU, 0xefbeU, 0xdfddU, 0xcffcU, 0xbf1bU, 0xaf3aU, 0x9f59U, 0x8f78U,
-	0x9188U, 0x81a9U, 0xb1caU, 0xa1ebU, 0xd10cU, 0xc12dU, 0xf14eU, 0xe16fU,
-	0x1080U, 0x00a1U, 0x30c2U, 0x20e3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U,
-	0x83b9U, 0x9398U, 0xa3fbU, 0xb3daU, 0xc33dU, 0xd31cU, 0xe37fU, 0xf35eU,
-	0x02b1U, 0x1290U, 0x22f3U, 0x32d2U, 0x4235U, 0x5214U, 0x6277U, 0x7256U,
-	0xb5eaU, 0xa5cbU, 0x95a8U, 0x8589U, 0xf56eU, 0xe54fU, 0xd52cU, 0xc50dU,
-	0x34e2U, 0x24c3U, 0x14a0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U,
-	0xa7dbU, 0xb7faU, 0x8799U, 0x97b8U, 0xe75fU, 0xf77eU, 0xc71dU, 0xd73cU,
-	0x26d3U, 0x36f2U, 0x0691U, 0x16b0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U,
-	0xd94cU, 0xc96dU, 0xf90eU, 0xe92fU, 0x99c8U, 0x89e9U, 0xb98aU, 0xa9abU,
-	0x5844U, 0x4865U, 0x7806U, 0x6827U, 0x18c0U, 0x08e1U, 0x3882U, 0x28a3U,
-	0xcb7dU, 0xdb5cU, 0xeb3fU, 0xfb1eU, 0x8bf9U, 0x9bd8U, 0xabbbU, 0xbb9aU,
-	0x4a75U, 0x5a54U, 0x6a37U, 0x7a16U, 0x0af1U, 0x1ad0U, 0x2ab3U, 0x3a92U,
-	0xfd2eU, 0xed0fU, 0xdd6cU, 0xcd4dU, 0xbdaaU, 0xad8bU, 0x9de8U, 0x8dc9U,
-	0x7c26U, 0x6c07U, 0x5c64U, 0x4c45U, 0x3ca2U, 0x2c83U, 0x1ce0U, 0x0cc1U,
-	0xef1fU, 0xff3eU, 0xcf5dU, 0xdf7cU, 0xaf9bU, 0xbfbaU, 0x8fd9U, 0x9ff8U,
-	0x6e17U, 0x7e36U, 0x4e55U, 0x5e74U, 0x2e93U, 0x3eb2U, 0x0ed1U, 0x1ef0U
-};
-
-/*
- * udf_crc
- *
- * PURPOSE
- *	Calculate a 16-bit CRC checksum using ITU-T V.41 polynomial.
- *
- * DESCRIPTION
- *	The OSTA-UDF(tm) 1.50 standard states that using CRCs is mandatory.
- *	The polynomial used is:	x^16 + x^12 + x^15 + 1
- *
- * PRE-CONDITIONS
- *	data		Pointer to the data block.
- *	size		Size of the data block.
- *
- * POST-CONDITIONS
- *	<return>	CRC of the data block.
- *
- * HISTORY
- *	July 21, 1997 - Andrew E. Mileski
- *	Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-uint16_t udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
-{
-	while (size--)
-		crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
-
-	return crc;
-}
-
-/****************************************************************************/
-#if defined(TEST)
-
-/*
- * PURPOSE
- *	Test udf_crc()
- *
- * HISTORY
- *	July 21, 1997 - Andrew E. Mileski
- *	Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-
-unsigned char bytes[] = { 0x70U, 0x6AU, 0x77U };
-
-int main(void)
-{
-	unsigned short x;
-
-	x = udf_crc(bytes, sizeof bytes);
-	printf("udf_crc: calculated = %4.4x, correct = %4.4x\n", x, 0x3299U);
-
-	return 0;
-}
-
-#endif /* defined(TEST) */
-
-/****************************************************************************/
-#if defined(GENERATE)
-
-/*
- * PURPOSE
- *	Generate a table for fast 16-bit CRC calculations (any polynomial).
- *
- * DESCRIPTION
- *	The ITU-T V.41 polynomial is 010041.
- *
- * HISTORY
- *	July 21, 1997 - Andrew E. Mileski
- *	Adapted from OSTA-UDF(tm) 1.50 standard.
- */
-
-#include <stdio.h>
-
-int main(int argc, char **argv)
-{
-	unsigned long crc, poly;
-	int n, i;
-
-	/* Get the polynomial */
-	sscanf(argv[1], "%lo", &poly);
-	if (poly & 0xffff0000U) {
-		fprintf(stderr, "polynomial is too large\en");
-		exit(1);
-	}
-
-	printf("/* CRC 0%o */\n", poly);
-
-	/* Create a table */
-	printf("static unsigned short crc_table[256] = {\n");
-	for (n = 0; n < 256; n++) {
-		if (n % 8 == 0)
-			printf("\t");
-		crc = n << 8;
-		for (i = 0; i < 8; i++) {
-			if (crc & 0x8000U)
-				crc = (crc << 1) ^ poly;
-			else
-				crc <<= 1;
-			crc &= 0xFFFFU;
-		}
-		if (n == 255)
-			printf("0x%04xU ", crc);
-		else
-			printf("0x%04xU, ", crc);
-		if (n % 8 == 7)
-			printf("\n");
-	}
-	printf("};\n");
-
-	return 0;
-}
-
-#endif /* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 8d8643ada..62dc270 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -39,13 +39,13 @@
 static int do_udf_readdir(struct inode *dir, struct file *filp,
 			  filldir_t filldir, void *dirent)
 {
-	struct udf_fileident_bh fibh;
+	struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
 	struct fileIdentDesc *fi = NULL;
 	struct fileIdentDesc cfi;
 	int block, iblock;
 	loff_t nf_pos = (filp->f_pos - 1) << 2;
 	int flen;
-	char fname[UDF_NAME_LEN];
+	char *fname = NULL;
 	char *nameptr;
 	uint16_t liu;
 	uint8_t lfi;
@@ -54,23 +54,32 @@
 	kernel_lb_addr eloc;
 	uint32_t elen;
 	sector_t offset;
-	int i, num;
+	int i, num, ret = 0;
 	unsigned int dt_type;
 	struct extent_position epos = { NULL, 0, {0, 0} };
 	struct udf_inode_info *iinfo;
 
 	if (nf_pos >= size)
-		return 0;
+		goto out;
+
+	fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+	if (!fname) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	if (nf_pos == 0)
 		nf_pos = udf_ext0_offset(dir);
 
 	fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
 	iinfo = UDF_I(dir);
-	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
-		fibh.sbh = fibh.ebh = NULL;
-	} else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
-			      &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
+	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+		if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+		    &epos, &eloc, &elen, &offset)
+		    != (EXT_RECORDED_ALLOCATED >> 30)) {
+			ret = -ENOENT;
+			goto out;
+		}
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
 		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -83,8 +92,8 @@
 		}
 
 		if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
-			brelse(epos.bh);
-			return -EIO;
+			ret = -EIO;
+			goto out;
 		}
 
 		if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
@@ -105,9 +114,6 @@
 					brelse(bha[i]);
 			}
 		}
-	} else {
-		brelse(epos.bh);
-		return -ENOENT;
 	}
 
 	while (nf_pos < size) {
@@ -115,13 +121,8 @@
 
 		fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
 					&elen, &offset);
-		if (!fi) {
-			if (fibh.sbh != fibh.ebh)
-				brelse(fibh.ebh);
-			brelse(fibh.sbh);
-			brelse(epos.bh);
-			return 0;
-		}
+		if (!fi)
+			goto out;
 
 		liu = le16_to_cpu(cfi.lengthOfImpUse);
 		lfi = cfi.lengthFileIdent;
@@ -167,53 +168,23 @@
 			dt_type = DT_UNKNOWN;
 		}
 
-		if (flen) {
-			if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0) {
-				if (fibh.sbh != fibh.ebh)
-					brelse(fibh.ebh);
-				brelse(fibh.sbh);
-				brelse(epos.bh);
-	 			return 0;
-			}
-		}
+		if (flen && filldir(dirent, fname, flen, filp->f_pos,
+				    iblock, dt_type) < 0)
+			goto out;
 	} /* end while */
 
 	filp->f_pos = (nf_pos >> 2) + 1;
 
+out:
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
 	brelse(epos.bh);
+	kfree(fname);
 
-	return 0;
+	return ret;
 }
 
-/*
- * udf_readdir
- *
- * PURPOSE
- *	Read a directory entry.
- *
- * DESCRIPTION
- *	Optional - sys_getdents() will return -ENOTDIR if this routine is not
- *	available.
- *
- *	Refer to sys_getdents() in fs/readdir.c
- *	sys_getdents() -> .
- *
- * PRE-CONDITIONS
- *	filp			Pointer to directory file.
- *	buf			Pointer to directory entry buffer.
- *	filldir			Pointer to filldir function.
- *
- * POST-CONDITIONS
- *	<return>		>=0 on success.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
-
 static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
 	struct inode *dir = filp->f_path.dentry->d_inode;
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index 5638771..a0974df 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -70,19 +70,6 @@
 	uint8_t		microseconds;
 } __attribute__ ((packed)) timestamp;
 
-typedef struct {
-	uint16_t	typeAndTimezone;
-	int16_t		year;
-	uint8_t		month;
-	uint8_t		day;
-	uint8_t		hour;
-	uint8_t		minute;
-	uint8_t		second;
-	uint8_t		centiseconds;
-	uint8_t		hundredsOfMicroseconds;
-	uint8_t		microseconds;
-} __attribute__ ((packed)) kernel_timestamp;
-
 /* Type and Time Zone (ECMA 167r3 1/7.3.1) */
 #define TIMESTAMP_TYPE_MASK		0xF000
 #define TIMESTAMP_TYPE_CUT		0x0000
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 97c71ae..0ed6e14 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -27,7 +27,6 @@
 
 #include "udfdecl.h"
 #include <linux/fs.h>
-#include <linux/udf_fs.h>
 #include <asm/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/string.h> /* memset */
@@ -144,40 +143,6 @@
 	return retval;
 }
 
-/*
- * udf_ioctl
- *
- * PURPOSE
- *	Issue an ioctl.
- *
- * DESCRIPTION
- *	Optional - sys_ioctl() will return -ENOTTY if this routine is not
- *	available, and the ioctl cannot be handled without filesystem help.
- *
- *	sys_ioctl() handles these ioctls that apply only to regular files:
- *		FIBMAP [requires udf_block_map()], FIGETBSZ, FIONREAD
- *	These ioctls are also handled by sys_ioctl():
- *		FIOCLEX, FIONCLEX, FIONBIO, FIOASYNC
- *	All other ioctls are passed to the filesystem.
- *
- *	Refer to sys_ioctl() in fs/ioctl.c
- *	sys_ioctl() -> .
- *
- * PRE-CONDITIONS
- *	inode			Pointer to inode that ioctl was issued on.
- *	filp			Pointer to file that ioctl was issued on.
- *	cmd			The ioctl command.
- *	arg			The ioctl argument [can be interpreted as a
- *				user-space pointer if desired].
- *
- * POST-CONDITIONS
- *	<return>		Success (>=0) or an error code (<=0) that
- *				sys_ioctl() will return.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
 int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 	      unsigned long arg)
 {
@@ -225,18 +190,6 @@
 	return result;
 }
 
-/*
- * udf_release_file
- *
- * PURPOSE
- *  Called when all references to the file are closed
- *
- * DESCRIPTION
- *  Discard prealloced blocks
- *
- * HISTORY
- *
- */
 static int udf_release_file(struct inode *inode, struct file *filp)
 {
 	if (filp->f_mode & FMODE_WRITE) {
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 84360315..eb9cfa2 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -21,7 +21,6 @@
 #include "udfdecl.h"
 #include <linux/fs.h>
 #include <linux/quotaops.h>
-#include <linux/udf_fs.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 
@@ -47,11 +46,9 @@
 		struct logicalVolIntegrityDescImpUse *lvidiu =
 							udf_sb_lvidiu(sbi);
 		if (S_ISDIR(inode->i_mode))
-			lvidiu->numDirs =
-				cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
+			le32_add_cpu(&lvidiu->numDirs, -1);
 		else
-			lvidiu->numFiles =
-				cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
+			le32_add_cpu(&lvidiu->numFiles, -1);
 
 		mark_buffer_dirty(sbi->s_lvid_bh);
 	}
@@ -105,11 +102,9 @@
 		lvhd = (struct logicalVolHeaderDesc *)
 				(lvid->logicalVolContentsUse);
 		if (S_ISDIR(mode))
-			lvidiu->numDirs =
-				cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
+			le32_add_cpu(&lvidiu->numDirs, 1);
 		else
-			lvidiu->numFiles =
-				cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
+			le32_add_cpu(&lvidiu->numFiles, 1);
 		iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
 		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
 			uniqueID += 16;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 24cfa55..6e74b11 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -37,6 +37,7 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/slab.h>
+#include <linux/crc-itu-t.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -66,22 +67,7 @@
 			       struct extent_position *);
 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
-/*
- * udf_delete_inode
- *
- * PURPOSE
- *	Clean-up before the specified inode is destroyed.
- *
- * DESCRIPTION
- *	This routine is called when the kernel destroys an inode structure
- *	ie. when iput() finds i_count == 0.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- *
- *  Called at the last iput() if i_nlink is zero.
- */
+
 void udf_delete_inode(struct inode *inode)
 {
 	truncate_inode_pages(&inode->i_data, 0);
@@ -323,9 +309,6 @@
 
 	lock_kernel();
 
-	if (block < 0)
-		goto abort_negative;
-
 	iinfo = UDF_I(inode);
 	if (block == iinfo->i_next_alloc_block + 1) {
 		iinfo->i_next_alloc_block++;
@@ -347,10 +330,6 @@
 abort:
 	unlock_kernel();
 	return err;
-
-abort_negative:
-	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
-	goto abort;
 }
 
 static struct buffer_head *udf_getblk(struct inode *inode, long block,
@@ -1116,42 +1095,36 @@
 	fe = (struct fileEntry *)bh->b_data;
 
 	if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
-		struct buffer_head *ibh = NULL, *nbh = NULL;
-		struct indirectEntry *ie;
+		struct buffer_head *ibh;
 
 		ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
 					&ident);
-		if (ident == TAG_IDENT_IE) {
-			if (ibh) {
-				kernel_lb_addr loc;
-				ie = (struct indirectEntry *)ibh->b_data;
+		if (ident == TAG_IDENT_IE && ibh) {
+			struct buffer_head *nbh = NULL;
+			kernel_lb_addr loc;
+			struct indirectEntry *ie;
 
-				loc = lelb_to_cpu(ie->indirectICB.extLocation);
+			ie = (struct indirectEntry *)ibh->b_data;
+			loc = lelb_to_cpu(ie->indirectICB.extLocation);
 
-				if (ie->indirectICB.extLength &&
-				    (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
-							    &ident))) {
-					if (ident == TAG_IDENT_FE ||
-					    ident == TAG_IDENT_EFE) {
-						memcpy(&iinfo->i_location,
-						       &loc,
-						       sizeof(kernel_lb_addr));
-						brelse(bh);
-						brelse(ibh);
-						brelse(nbh);
-						__udf_read_inode(inode);
-						return;
-					} else {
-						brelse(nbh);
-						brelse(ibh);
-					}
-				} else {
+			if (ie->indirectICB.extLength &&
+				(nbh = udf_read_ptagged(inode->i_sb, loc, 0,
+							&ident))) {
+				if (ident == TAG_IDENT_FE ||
+					ident == TAG_IDENT_EFE) {
+					memcpy(&iinfo->i_location,
+						&loc,
+						sizeof(kernel_lb_addr));
+					brelse(bh);
 					brelse(ibh);
+					brelse(nbh);
+					__udf_read_inode(inode);
+					return;
 				}
+				brelse(nbh);
 			}
-		} else {
-			brelse(ibh);
 		}
+		brelse(ibh);
 	} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
 		       le16_to_cpu(fe->icbTag.strategyType));
@@ -1168,8 +1141,6 @@
 {
 	struct fileEntry *fe;
 	struct extendedFileEntry *efe;
-	time_t convtime;
-	long convtime_usec;
 	int offset;
 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
 	struct udf_inode_info *iinfo = UDF_I(inode);
@@ -1257,29 +1228,15 @@
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
 			(inode->i_sb->s_blocksize_bits - 9);
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(fe->accessTime))) {
-			inode->i_atime.tv_sec = convtime;
-			inode->i_atime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
 			inode->i_atime = sbi->s_record_time;
-		}
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(fe->modificationTime))) {
-			inode->i_mtime.tv_sec = convtime;
-			inode->i_mtime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_mtime,
+					    fe->modificationTime))
 			inode->i_mtime = sbi->s_record_time;
-		}
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(fe->attrTime))) {
-			inode->i_ctime.tv_sec = convtime;
-			inode->i_ctime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
 			inode->i_ctime = sbi->s_record_time;
-		}
 
 		iinfo->i_unique = le64_to_cpu(fe->uniqueID);
 		iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1289,37 +1246,18 @@
 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
 		    (inode->i_sb->s_blocksize_bits - 9);
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(efe->accessTime))) {
-			inode->i_atime.tv_sec = convtime;
-			inode->i_atime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
 			inode->i_atime = sbi->s_record_time;
-		}
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(efe->modificationTime))) {
-			inode->i_mtime.tv_sec = convtime;
-			inode->i_mtime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_mtime,
+					    efe->modificationTime))
 			inode->i_mtime = sbi->s_record_time;
-		}
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(efe->createTime))) {
-			iinfo->i_crtime.tv_sec = convtime;
-			iinfo->i_crtime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
 			iinfo->i_crtime = sbi->s_record_time;
-		}
 
-		if (udf_stamp_to_time(&convtime, &convtime_usec,
-				      lets_to_cpu(efe->attrTime))) {
-			inode->i_ctime.tv_sec = convtime;
-			inode->i_ctime.tv_nsec = convtime_usec * 1000;
-		} else {
+		if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
 			inode->i_ctime = sbi->s_record_time;
-		}
 
 		iinfo->i_unique = le64_to_cpu(efe->uniqueID);
 		iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1338,6 +1276,7 @@
 	case ICBTAG_FILE_TYPE_REALTIME:
 	case ICBTAG_FILE_TYPE_REGULAR:
 	case ICBTAG_FILE_TYPE_UNDEF:
+	case ICBTAG_FILE_TYPE_VAT20:
 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 			inode->i_data.a_ops = &udf_adinicb_aops;
 		else
@@ -1363,6 +1302,15 @@
 		inode->i_op = &page_symlink_inode_operations;
 		inode->i_mode = S_IFLNK | S_IRWXUGO;
 		break;
+	case ICBTAG_FILE_TYPE_MAIN:
+		udf_debug("METADATA FILE-----\n");
+		break;
+	case ICBTAG_FILE_TYPE_MIRROR:
+		udf_debug("METADATA MIRROR FILE-----\n");
+		break;
+	case ICBTAG_FILE_TYPE_BITMAP:
+		udf_debug("METADATA BITMAP FILE-----\n");
+		break;
 	default:
 		printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
 				"file type=%d\n", inode->i_ino,
@@ -1416,21 +1364,6 @@
 	return mode;
 }
 
-/*
- * udf_write_inode
- *
- * PURPOSE
- *	Write out the specified inode.
- *
- * DESCRIPTION
- *	This routine is called whenever an inode is synced.
- *	Currently this routine is just a placeholder.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
-
 int udf_write_inode(struct inode *inode, int sync)
 {
 	int ret;
@@ -1455,7 +1388,6 @@
 	uint32_t udfperms;
 	uint16_t icbflags;
 	uint16_t crclen;
-	kernel_timestamp cpu_time;
 	int err = 0;
 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
@@ -1488,9 +1420,9 @@
 						iinfo->i_location.
 							logicalBlockNum);
 		use->descTag.descCRCLength = cpu_to_le16(crclen);
-		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use +
-							   sizeof(tag), crclen,
-							   0));
+		use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
+							   sizeof(tag),
+							   crclen));
 		use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
 
 		mark_buffer_dirty(bh);
@@ -1558,12 +1490,9 @@
 			(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
 			(blocksize_bits - 9));
 
-		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
-			fe->accessTime = cpu_to_lets(cpu_time);
-		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
-			fe->modificationTime = cpu_to_lets(cpu_time);
-		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
-			fe->attrTime = cpu_to_lets(cpu_time);
+		udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
+		udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
+		udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
 		memset(&(fe->impIdent), 0, sizeof(regid));
 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1598,14 +1527,10 @@
 		     iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
 			iinfo->i_crtime = inode->i_ctime;
 
-		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
-			efe->accessTime = cpu_to_lets(cpu_time);
-		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
-			efe->modificationTime = cpu_to_lets(cpu_time);
-		if (udf_time_to_stamp(&cpu_time, iinfo->i_crtime))
-			efe->createTime = cpu_to_lets(cpu_time);
-		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
-			efe->attrTime = cpu_to_lets(cpu_time);
+		udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
+		udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
+		udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
+		udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
 
 		memset(&(efe->impIdent), 0, sizeof(regid));
 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
@@ -1660,8 +1585,8 @@
 	crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
 								sizeof(tag);
 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
-	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag),
-						  crclen, 0));
+	fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(tag),
+						  crclen));
 	fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
 
 	/* write the data blocks */
@@ -1778,9 +1703,7 @@
 
 			if (epos->bh) {
 				aed = (struct allocExtDesc *)epos->bh->b_data;
-				aed->lengthAllocDescs =
-					cpu_to_le32(le32_to_cpu(
-					aed->lengthAllocDescs) + adsize);
+				le32_add_cpu(&aed->lengthAllocDescs, adsize);
 			} else {
 				iinfo->i_lenAlloc += adsize;
 				mark_inode_dirty(inode);
@@ -1830,9 +1753,7 @@
 		mark_inode_dirty(inode);
 	} else {
 		aed = (struct allocExtDesc *)epos->bh->b_data;
-		aed->lengthAllocDescs =
-			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) +
-				    adsize);
+		le32_add_cpu(&aed->lengthAllocDescs, adsize);
 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
 				UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
 			udf_update_tag(epos->bh->b_data,
@@ -2046,9 +1967,7 @@
 			mark_inode_dirty(inode);
 		} else {
 			aed = (struct allocExtDesc *)oepos.bh->b_data;
-			aed->lengthAllocDescs =
-				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
-					    (2 * adsize));
+			le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
 				udf_update_tag(oepos.bh->b_data,
@@ -2065,9 +1984,7 @@
 			mark_inode_dirty(inode);
 		} else {
 			aed = (struct allocExtDesc *)oepos.bh->b_data;
-			aed->lengthAllocDescs =
-				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
-					    adsize);
+			le32_add_cpu(&aed->lengthAllocDescs, -adsize);
 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
 				udf_update_tag(oepos.bh->b_data,
@@ -2095,11 +2012,6 @@
 	int8_t etype;
 	struct udf_inode_info *iinfo;
 
-	if (block < 0) {
-		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
-		return -1;
-	}
-
 	iinfo = UDF_I(inode);
 	pos->offset = 0;
 	pos->block = iinfo->i_location;
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 579bae7..703843f 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -23,7 +23,6 @@
 #include <linux/cdrom.h>
 #include <asm/uaccess.h>
 
-#include <linux/udf_fs.h>
 #include "udf_sb.h"
 
 unsigned int udf_get_last_session(struct super_block *sb)
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a1d6da0..84bf0fd 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -23,8 +23,8 @@
 
 #include <linux/fs.h>
 #include <linux/string.h>
-#include <linux/udf_fs.h>
 #include <linux/buffer_head.h>
+#include <linux/crc-itu-t.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -136,8 +136,8 @@
 		/* rewrite CRC + checksum of eahd */
 		crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
 		eahd->descTag.descCRCLength = cpu_to_le16(crclen);
-		eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd +
-						sizeof(tag), crclen, 0));
+		eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
+						sizeof(tag), crclen));
 		eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
 		iinfo->i_lenEAttr += size;
 		return (struct genericFormat *)&ea[offset];
@@ -204,16 +204,15 @@
 {
 	tag *tag_p;
 	struct buffer_head *bh = NULL;
-	struct udf_sb_info *sbi = UDF_SB(sb);
 
 	/* Read the block */
 	if (block == 0xFFFFFFFF)
 		return NULL;
 
-	bh = udf_tread(sb, block + sbi->s_session);
+	bh = udf_tread(sb, block);
 	if (!bh) {
 		udf_debug("block=%d, location=%d: read failed\n",
-			  block + sbi->s_session, location);
+			  block, location);
 		return NULL;
 	}
 
@@ -223,8 +222,7 @@
 
 	if (location != le32_to_cpu(tag_p->tagLocation)) {
 		udf_debug("location mismatch block %u, tag %u != %u\n",
-			  block + sbi->s_session,
-			  le32_to_cpu(tag_p->tagLocation), location);
+			  block, le32_to_cpu(tag_p->tagLocation), location);
 		goto error_out;
 	}
 
@@ -244,13 +242,13 @@
 
 	/* Verify the descriptor CRC */
 	if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
-	    le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
-					le16_to_cpu(tag_p->descCRCLength), 0))
+	    le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
+					bh->b_data + sizeof(tag),
+					le16_to_cpu(tag_p->descCRCLength)))
 		return bh;
 
-	udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
-		  block + sbi->s_session, le16_to_cpu(tag_p->descCRC),
-		  le16_to_cpu(tag_p->descCRCLength));
+	udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block,
+	    le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
 
 error_out:
 	brelse(bh);
@@ -270,7 +268,7 @@
 	length -= sizeof(tag);
 
 	tptr->descCRCLength = cpu_to_le16(length);
-	tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0));
+	tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(tag), length));
 	tptr->tagChecksum = udf_tag_checksum(tptr);
 }
 
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 112a5fb..ba5537d 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -31,6 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/sched.h>
+#include <linux/crc-itu-t.h>
 
 static inline int udf_match(int len1, const char *name1, int len2,
 			    const char *name2)
@@ -97,25 +98,23 @@
 		memset(fibh->ebh->b_data, 0x00, padlen + offset);
 	}
 
-	crc = udf_crc((uint8_t *)cfi + sizeof(tag),
-		      sizeof(struct fileIdentDesc) - sizeof(tag), 0);
+	crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(tag),
+		      sizeof(struct fileIdentDesc) - sizeof(tag));
 
 	if (fibh->sbh == fibh->ebh) {
-		crc = udf_crc((uint8_t *)sfi->impUse,
+		crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
 			      crclen + sizeof(tag) -
-			      sizeof(struct fileIdentDesc), crc);
+			      sizeof(struct fileIdentDesc));
 	} else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
-		crc = udf_crc(fibh->ebh->b_data +
+		crc = crc_itu_t(crc, fibh->ebh->b_data +
 					sizeof(struct fileIdentDesc) +
 					fibh->soffset,
 			      crclen + sizeof(tag) -
-					sizeof(struct fileIdentDesc),
-			      crc);
+					sizeof(struct fileIdentDesc));
 	} else {
-		crc = udf_crc((uint8_t *)sfi->impUse,
-			      -fibh->soffset - sizeof(struct fileIdentDesc),
-			      crc);
-		crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
+		crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
+			      -fibh->soffset - sizeof(struct fileIdentDesc));
+		crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset);
 	}
 
 	cfi->descTag.descCRC = cpu_to_le16(crc);
@@ -149,7 +148,7 @@
 	struct fileIdentDesc *fi = NULL;
 	loff_t f_pos;
 	int block, flen;
-	char fname[UDF_NAME_LEN];
+	char *fname = NULL;
 	char *nameptr;
 	uint8_t lfi;
 	uint16_t liu;
@@ -163,12 +162,12 @@
 	size = udf_ext0_offset(dir) + dir->i_size;
 	f_pos = udf_ext0_offset(dir);
 
+	fibh->sbh = fibh->ebh = NULL;
 	fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
-	if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-		fibh->sbh = fibh->ebh = NULL;
-	else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
-			      &epos, &eloc, &elen, &offset) ==
-					(EXT_RECORDED_ALLOCATED >> 30)) {
+	if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+		if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+		    &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
+			goto out_err;
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
 		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -179,25 +178,19 @@
 			offset = 0;
 
 		fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
-		if (!fibh->sbh) {
-			brelse(epos.bh);
-			return NULL;
-		}
-	} else {
-		brelse(epos.bh);
-		return NULL;
+		if (!fibh->sbh)
+			goto out_err;
 	}
 
+	fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+	if (!fname)
+		goto out_err;
+
 	while (f_pos < size) {
 		fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
 					&elen, &offset);
-		if (!fi) {
-			if (fibh->sbh != fibh->ebh)
-				brelse(fibh->ebh);
-			brelse(fibh->sbh);
-			brelse(epos.bh);
-			return NULL;
-		}
+		if (!fi)
+			goto out_err;
 
 		liu = le16_to_cpu(cfi->lengthOfImpUse);
 		lfi = cfi->lengthFileIdent;
@@ -237,53 +230,22 @@
 
 		flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
 		if (flen && udf_match(flen, fname, dentry->d_name.len,
-				      dentry->d_name.name)) {
-			brelse(epos.bh);
-			return fi;
-		}
+				      dentry->d_name.name))
+			goto out_ok;
 	}
 
+out_err:
+	fi = NULL;
 	if (fibh->sbh != fibh->ebh)
 		brelse(fibh->ebh);
 	brelse(fibh->sbh);
+out_ok:
 	brelse(epos.bh);
+	kfree(fname);
 
-	return NULL;
+	return fi;
 }
 
-/*
- * udf_lookup
- *
- * PURPOSE
- *	Look-up the inode for a given name.
- *
- * DESCRIPTION
- *	Required - lookup_dentry() will return -ENOTDIR if this routine is not
- *	available for a directory. The filesystem is useless if this routine is
- *	not available for at least the filesystem's root directory.
- *
- *	This routine is passed an incomplete dentry - it must be completed by
- *	calling d_add(dentry, inode). If the name does not exist, then the
- *	specified inode must be set to null. An error should only be returned
- *	when the lookup fails for a reason other than the name not existing.
- *	Note that the directory inode semaphore is held during the call.
- *
- *	Refer to lookup_dentry() in fs/namei.c
- *	lookup_dentry() -> lookup() -> real_lookup() -> .
- *
- * PRE-CONDITIONS
- *	dir			Pointer to inode of parent directory.
- *	dentry			Pointer to dentry to complete.
- *	nd			Pointer to lookup nameidata
- *
- * POST-CONDITIONS
- *	<return>		Zero on success.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
-
 static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
 				 struct nameidata *nd)
 {
@@ -336,11 +298,9 @@
 {
 	struct super_block *sb = dir->i_sb;
 	struct fileIdentDesc *fi = NULL;
-	char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
+	char *name = NULL;
 	int namelen;
 	loff_t f_pos;
-	int flen;
-	char *nameptr;
 	loff_t size = udf_ext0_offset(dir) + dir->i_size;
 	int nfidlen;
 	uint8_t lfi;
@@ -352,16 +312,23 @@
 	struct extent_position epos = {};
 	struct udf_inode_info *dinfo;
 
+	fibh->sbh = fibh->ebh = NULL;
+	name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+	if (!name) {
+		*err = -ENOMEM;
+		goto out_err;
+	}
+
 	if (dentry) {
 		if (!dentry->d_name.len) {
 			*err = -EINVAL;
-			return NULL;
+			goto out_err;
 		}
 		namelen = udf_put_filename(sb, dentry->d_name.name, name,
 						 dentry->d_name.len);
 		if (!namelen) {
 			*err = -ENAMETOOLONG;
-			return NULL;
+			goto out_err;
 		}
 	} else {
 		namelen = 0;
@@ -373,11 +340,14 @@
 
 	fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
 	dinfo = UDF_I(dir);
-	if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
-		fibh->sbh = fibh->ebh = NULL;
-	else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
-			      &epos, &eloc, &elen, &offset) ==
-					(EXT_RECORDED_ALLOCATED >> 30)) {
+	if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+		if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+		    &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
+			block = udf_get_lb_pblock(dir->i_sb,
+					dinfo->i_location, 0);
+			fibh->soffset = fibh->eoffset = sb->s_blocksize;
+			goto add;
+		}
 		block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
 		if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
 			if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -389,17 +359,11 @@
 
 		fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
 		if (!fibh->sbh) {
-			brelse(epos.bh);
 			*err = -EIO;
-			return NULL;
+			goto out_err;
 		}
 
 		block = dinfo->i_location.logicalBlockNum;
-	} else {
-		block = udf_get_lb_pblock(dir->i_sb, dinfo->i_location, 0);
-		fibh->sbh = fibh->ebh = NULL;
-		fibh->soffset = fibh->eoffset = sb->s_blocksize;
-		goto add;
 	}
 
 	while (f_pos < size) {
@@ -407,41 +371,16 @@
 					&elen, &offset);
 
 		if (!fi) {
-			if (fibh->sbh != fibh->ebh)
-				brelse(fibh->ebh);
-			brelse(fibh->sbh);
-			brelse(epos.bh);
 			*err = -EIO;
-			return NULL;
+			goto out_err;
 		}
 
 		liu = le16_to_cpu(cfi->lengthOfImpUse);
 		lfi = cfi->lengthFileIdent;
 
-		if (fibh->sbh == fibh->ebh)
-			nameptr = fi->fileIdent + liu;
-		else {
-			int poffset;	/* Unpaded ending offset */
-
-			poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
-					liu + lfi;
-
-			if (poffset >= lfi)
-				nameptr = (char *)(fibh->ebh->b_data +
-						   poffset - lfi);
-			else {
-				nameptr = fname;
-				memcpy(nameptr, fi->fileIdent + liu,
-					lfi - poffset);
-				memcpy(nameptr + lfi - poffset,
-					fibh->ebh->b_data, poffset);
-			}
-		}
-
 		if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
 			if (((sizeof(struct fileIdentDesc) +
 					liu + lfi + 3) & ~3) == nfidlen) {
-				brelse(epos.bh);
 				cfi->descTag.tagSerialNum = cpu_to_le16(1);
 				cfi->fileVersionNum = cpu_to_le16(1);
 				cfi->fileCharacteristics = 0;
@@ -449,27 +388,13 @@
 				cfi->lengthOfImpUse = cpu_to_le16(0);
 				if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
 						  name))
-					return fi;
+					goto out_ok;
 				else {
 					*err = -EIO;
-					return NULL;
+					goto out_err;
 				}
 			}
 		}
-
-		if (!lfi || !dentry)
-			continue;
-
-		flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
-		if (flen && udf_match(flen, fname, dentry->d_name.len,
-				      dentry->d_name.name)) {
-			if (fibh->sbh != fibh->ebh)
-				brelse(fibh->ebh);
-			brelse(fibh->sbh);
-			brelse(epos.bh);
-			*err = -EEXIST;
-			return NULL;
-		}
 	}
 
 add:
@@ -496,7 +421,7 @@
 		fibh->sbh = fibh->ebh =
 				udf_expand_dir_adinicb(dir, &block, err);
 		if (!fibh->sbh)
-			return NULL;
+			goto out_err;
 		epos.block = dinfo->i_location;
 		epos.offset = udf_file_entry_alloc_offset(dir);
 		/* Load extent udf_expand_dir_adinicb() has created */
@@ -537,11 +462,8 @@
 						dir->i_sb->s_blocksize_bits);
 		fibh->ebh = udf_bread(dir,
 				f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
-		if (!fibh->ebh) {
-			brelse(epos.bh);
-			brelse(fibh->sbh);
-			return NULL;
-		}
+		if (!fibh->ebh)
+			goto out_err;
 
 		if (!fibh->soffset) {
 			if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
@@ -572,20 +494,25 @@
 	cfi->lengthFileIdent = namelen;
 	cfi->lengthOfImpUse = cpu_to_le16(0);
 	if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
-		brelse(epos.bh);
 		dir->i_size += nfidlen;
 		if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 			dinfo->i_lenAlloc += nfidlen;
 		mark_inode_dirty(dir);
-		return fi;
+		goto out_ok;
 	} else {
-		brelse(epos.bh);
-		if (fibh->sbh != fibh->ebh)
-			brelse(fibh->ebh);
-		brelse(fibh->sbh);
 		*err = -EIO;
-		return NULL;
+		goto out_err;
 	}
+
+out_err:
+	fi = NULL;
+	if (fibh->sbh != fibh->ebh)
+		brelse(fibh->ebh);
+	brelse(fibh->sbh);
+out_ok:
+	brelse(epos.bh);
+	kfree(name);
+	return fi;
 }
 
 static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
@@ -940,7 +867,7 @@
 	char *ea;
 	int err;
 	int block;
-	char name[UDF_NAME_LEN];
+	char *name = NULL;
 	int namelen;
 	struct buffer_head *bh;
 	struct udf_inode_info *iinfo;
@@ -950,6 +877,12 @@
 	if (!inode)
 		goto out;
 
+	name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+	if (!name) {
+		err = -ENOMEM;
+		goto out_no_entry;
+	}
+
 	iinfo = UDF_I(inode);
 	inode->i_mode = S_IFLNK | S_IRWXUGO;
 	inode->i_data.a_ops = &udf_symlink_aops;
@@ -1089,6 +1022,7 @@
 	err = 0;
 
 out:
+	kfree(name);
 	unlock_kernel();
 	return err;
 
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index fc53334..63610f0 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -24,7 +24,6 @@
 
 #include <linux/fs.h>
 #include <linux/string.h>
-#include <linux/udf_fs.h>
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 
@@ -55,11 +54,10 @@
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct udf_part_map *map;
 	struct udf_virtual_data *vdata;
-	struct udf_inode_info *iinfo;
+	struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
 
 	map = &sbi->s_partmaps[partition];
 	vdata = &map->s_type_specific.s_virtual;
-	index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
 
 	if (block > vdata->s_num_entries) {
 		udf_debug("Trying to access block beyond end of VAT "
@@ -67,6 +65,12 @@
 		return 0xFFFFFFFF;
 	}
 
+	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+		loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
+			vdata->s_start_offset))[block]);
+		goto translate;
+	}
+	index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
 	if (block >= index) {
 		block -= index;
 		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
@@ -89,7 +93,7 @@
 
 	brelse(bh);
 
-	iinfo = UDF_I(sbi->s_vat_inode);
+translate:
 	if (iinfo->i_location.partitionReferenceNum == partition) {
 		udf_debug("recursive call to udf_get_pblock!\n");
 		return 0xFFFFFFFF;
@@ -263,3 +267,58 @@
 
 	return 0;
 }
+
+static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
+					uint16_t partition, uint32_t offset)
+{
+	struct super_block *sb = inode->i_sb;
+	struct udf_part_map *map;
+	kernel_lb_addr eloc;
+	uint32_t elen;
+	sector_t ext_offset;
+	struct extent_position epos = {};
+	uint32_t phyblock;
+
+	if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
+						(EXT_RECORDED_ALLOCATED >> 30))
+		phyblock = 0xFFFFFFFF;
+	else {
+		map = &UDF_SB(sb)->s_partmaps[partition];
+		/* map to sparable/physical partition desc */
+		phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
+			map->s_partition_num, ext_offset + offset);
+	}
+
+	brelse(epos.bh);
+	return phyblock;
+}
+
+uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
+				uint16_t partition, uint32_t offset)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct udf_part_map *map;
+	struct udf_meta_data *mdata;
+	uint32_t retblk;
+	struct inode *inode;
+
+	udf_debug("READING from METADATA\n");
+
+	map = &sbi->s_partmaps[partition];
+	mdata = &map->s_type_specific.s_metadata;
+	inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
+
+	/* We shouldn't mount such media... */
+	BUG_ON(!inode);
+	retblk = udf_try_read_meta(inode, block, partition, offset);
+	if (retblk == 0xFFFFFFFF) {
+		udf_warning(sb, __func__, "error reading from METADATA, "
+			"trying to read from MIRROR");
+		inode = mdata->s_mirror_fe;
+		if (!inode)
+			return 0xFFFFFFFF;
+		retblk = udf_try_read_meta(inode, block, partition, offset);
+	}
+
+	return retblk;
+}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f3ac4ab..b564fc1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -55,9 +55,10 @@
 #include <linux/errno.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
+#include <linux/bitmap.h>
+#include <linux/crc-itu-t.h>
 #include <asm/byteorder.h>
 
-#include <linux/udf_fs.h>
 #include "udf_sb.h"
 #include "udf_i.h"
 
@@ -84,22 +85,19 @@
 static int udf_remount_fs(struct super_block *, int *, char *);
 static int udf_check_valid(struct super_block *, int, int);
 static int udf_vrs(struct super_block *sb, int silent);
-static int udf_load_partition(struct super_block *, kernel_lb_addr *);
-static int udf_load_logicalvol(struct super_block *, struct buffer_head *,
-			       kernel_lb_addr *);
 static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
 static void udf_find_anchor(struct super_block *);
 static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
 			    kernel_lb_addr *);
-static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
 static void udf_load_fileset(struct super_block *, struct buffer_head *,
 			     kernel_lb_addr *);
-static int udf_load_partdesc(struct super_block *, struct buffer_head *);
 static void udf_open_lvid(struct super_block *);
 static void udf_close_lvid(struct super_block *);
 static unsigned int udf_count_free(struct super_block *);
 static int udf_statfs(struct dentry *, struct kstatfs *);
 static int udf_show_options(struct seq_file *, struct vfsmount *);
+static void udf_error(struct super_block *sb, const char *function,
+		      const char *fmt, ...);
 
 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
 {
@@ -587,48 +585,10 @@
 	return 0;
 }
 
-/*
- * udf_set_blocksize
- *
- * PURPOSE
- *	Set the block size to be used in all transfers.
- *
- * DESCRIPTION
- *	To allow room for a DMA transfer, it is best to guess big when unsure.
- *	This routine picks 2048 bytes as the blocksize when guessing. This
- *	should be adequate until devices with larger block sizes become common.
- *
- *	Note that the Linux kernel can currently only deal with blocksizes of
- *	512, 1024, 2048, 4096, and 8192 bytes.
- *
- * PRE-CONDITIONS
- *	sb			Pointer to _locked_ superblock.
- *
- * POST-CONDITIONS
- *	sb->s_blocksize		Blocksize.
- *	sb->s_blocksize_bits	log2 of blocksize.
- *	<return>	0	Blocksize is valid.
- *	<return>	1	Blocksize is invalid.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
-static int udf_set_blocksize(struct super_block *sb, int bsize)
-{
-	if (!sb_min_blocksize(sb, bsize)) {
-		udf_debug("Bad block size (%d)\n", bsize);
-		printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
-		return 0;
-	}
-
-	return sb->s_blocksize;
-}
-
 static int udf_vrs(struct super_block *sb, int silent)
 {
 	struct volStructDesc *vsd = NULL;
-	int sector = 32768;
+	loff_t sector = 32768;
 	int sectorsize;
 	struct buffer_head *bh = NULL;
 	int iso9660 = 0;
@@ -649,7 +609,8 @@
 	sector += (sbi->s_session << sb->s_blocksize_bits);
 
 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
-		  (sector >> sb->s_blocksize_bits), sb->s_blocksize);
+		  (unsigned int)(sector >> sb->s_blocksize_bits),
+		  sb->s_blocksize);
 	/* Process the sequence (if applicable) */
 	for (; !nsr02 && !nsr03; sector += sectorsize) {
 		/* Read a block */
@@ -719,162 +680,140 @@
 }
 
 /*
- * udf_find_anchor
+ * Check whether there is an anchor block in the given block
+ */
+static int udf_check_anchor_block(struct super_block *sb, sector_t block,
+					bool varconv)
+{
+	struct buffer_head *bh = NULL;
+	tag *t;
+	uint16_t ident;
+	uint32_t location;
+
+	if (varconv) {
+		if (udf_fixed_to_variable(block) >=
+		    sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
+			return 0;
+		bh = sb_bread(sb, udf_fixed_to_variable(block));
+	}
+	else
+		bh = sb_bread(sb, block);
+
+	if (!bh)
+		return 0;
+
+	t = (tag *)bh->b_data;
+	ident = le16_to_cpu(t->tagIdent);
+	location = le32_to_cpu(t->tagLocation);
+	brelse(bh);
+	if (ident != TAG_IDENT_AVDP)
+		return 0;
+	return location == block;
+}
+
+/* Search for an anchor volume descriptor pointer */
+static sector_t udf_scan_anchors(struct super_block *sb, bool varconv,
+					sector_t lastblock)
+{
+	sector_t last[6];
+	int i;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+
+	last[0] = lastblock;
+	last[1] = last[0] - 1;
+	last[2] = last[0] + 1;
+	last[3] = last[0] - 2;
+	last[4] = last[0] - 150;
+	last[5] = last[0] - 152;
+
+	/*  according to spec, anchor is in either:
+	 *     block 256
+	 *     lastblock-256
+	 *     lastblock
+	 *  however, if the disc isn't closed, it could be 512 */
+
+	for (i = 0; i < ARRAY_SIZE(last); i++) {
+		if (last[i] < 0)
+			continue;
+		if (last[i] >= sb->s_bdev->bd_inode->i_size >>
+				sb->s_blocksize_bits)
+			continue;
+
+		if (udf_check_anchor_block(sb, last[i], varconv)) {
+			sbi->s_anchor[0] = last[i];
+			sbi->s_anchor[1] = last[i] - 256;
+			return last[i];
+		}
+
+		if (last[i] < 256)
+			continue;
+
+		if (udf_check_anchor_block(sb, last[i] - 256, varconv)) {
+			sbi->s_anchor[1] = last[i] - 256;
+			return last[i];
+		}
+	}
+
+	if (udf_check_anchor_block(sb, sbi->s_session + 256, varconv)) {
+		sbi->s_anchor[0] = sbi->s_session + 256;
+		return last[0];
+	}
+	if (udf_check_anchor_block(sb, sbi->s_session + 512, varconv)) {
+		sbi->s_anchor[0] = sbi->s_session + 512;
+		return last[0];
+	}
+	return 0;
+}
+
+/*
+ * Find an anchor volume descriptor. The function expects sbi->s_lastblock to
+ * be the last block on the media.
  *
- * PURPOSE
- *	Find an anchor volume descriptor.
+ * Return 1 if not found, 0 if ok
  *
- * PRE-CONDITIONS
- *	sb			Pointer to _locked_ superblock.
- *	lastblock		Last block on media.
- *
- * POST-CONDITIONS
- *	<return>		1 if not found, 0 if ok
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
  */
 static void udf_find_anchor(struct super_block *sb)
 {
-	int lastblock;
+	sector_t lastblock;
 	struct buffer_head *bh = NULL;
 	uint16_t ident;
-	uint32_t location;
 	int i;
-	struct udf_sb_info *sbi;
+	struct udf_sb_info *sbi = UDF_SB(sb);
 
-	sbi = UDF_SB(sb);
-	lastblock = sbi->s_last_block;
+	lastblock = udf_scan_anchors(sb, 0, sbi->s_last_block);
+	if (lastblock)
+		goto check_anchor;
 
+	/* No anchor found? Try VARCONV conversion of block numbers */
+	/* Firstly, we try to not convert number of the last block */
+	lastblock = udf_scan_anchors(sb, 1,
+				udf_variable_to_fixed(sbi->s_last_block));
 	if (lastblock) {
-		int varlastblock = udf_variable_to_fixed(lastblock);
-		int last[] =  { lastblock, lastblock - 2,
-				lastblock - 150, lastblock - 152,
-				varlastblock, varlastblock - 2,
-				varlastblock - 150, varlastblock - 152 };
-
-		lastblock = 0;
-
-		/* Search for an anchor volume descriptor pointer */
-
-		/*  according to spec, anchor is in either:
-		 *     block 256
-		 *     lastblock-256
-		 *     lastblock
-		 *  however, if the disc isn't closed, it could be 512 */
-
-		for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
-			ident = location = 0;
-			if (last[i] >= 0) {
-				bh = sb_bread(sb, last[i]);
-				if (bh) {
-					tag *t = (tag *)bh->b_data;
-					ident = le16_to_cpu(t->tagIdent);
-					location = le32_to_cpu(t->tagLocation);
-					brelse(bh);
-				}
-			}
-
-			if (ident == TAG_IDENT_AVDP) {
-				if (location == last[i] - sbi->s_session) {
-					lastblock = last[i] - sbi->s_session;
-					sbi->s_anchor[0] = lastblock;
-					sbi->s_anchor[1] = lastblock - 256;
-				} else if (location ==
-						udf_variable_to_fixed(last[i]) -
-							sbi->s_session) {
-					UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
-					lastblock =
-						udf_variable_to_fixed(last[i]) -
-							sbi->s_session;
-					sbi->s_anchor[0] = lastblock;
-					sbi->s_anchor[1] = lastblock - 256 -
-								sbi->s_session;
-				} else {
-					udf_debug("Anchor found at block %d, "
-						  "location mismatch %d.\n",
-						  last[i], location);
-				}
-			} else if (ident == TAG_IDENT_FE ||
-					ident == TAG_IDENT_EFE) {
-				lastblock = last[i];
-				sbi->s_anchor[3] = 512;
-			} else {
-				ident = location = 0;
-				if (last[i] >= 256) {
-					bh = sb_bread(sb, last[i] - 256);
-					if (bh) {
-						tag *t = (tag *)bh->b_data;
-						ident = le16_to_cpu(
-								t->tagIdent);
-						location = le32_to_cpu(
-								t->tagLocation);
-						brelse(bh);
-					}
-				}
-
-				if (ident == TAG_IDENT_AVDP &&
-				    location == last[i] - 256 -
-						sbi->s_session) {
-					lastblock = last[i];
-					sbi->s_anchor[1] = last[i] - 256;
-				} else {
-					ident = location = 0;
-					if (last[i] >= 312 + sbi->s_session) {
-						bh = sb_bread(sb,
-								last[i] - 312 -
-								sbi->s_session);
-						if (bh) {
-							tag *t = (tag *)
-								 bh->b_data;
-							ident = le16_to_cpu(
-								t->tagIdent);
-							location = le32_to_cpu(
-								t->tagLocation);
-							brelse(bh);
-						}
-					}
-
-					if (ident == TAG_IDENT_AVDP &&
-					    location == udf_variable_to_fixed(last[i]) - 256) {
-						UDF_SET_FLAG(sb,
-							     UDF_FLAG_VARCONV);
-						lastblock = udf_variable_to_fixed(last[i]);
-						sbi->s_anchor[1] = lastblock - 256;
-					}
-				}
-			}
-		}
+		UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+		goto check_anchor;
 	}
 
-	if (!lastblock) {
-		/* We haven't found the lastblock. check 312 */
-		bh = sb_bread(sb, 312 + sbi->s_session);
-		if (bh) {
-			tag *t = (tag *)bh->b_data;
-			ident = le16_to_cpu(t->tagIdent);
-			location = le32_to_cpu(t->tagLocation);
-			brelse(bh);
+	/* Secondly, we try with converted number of the last block */
+	lastblock = udf_scan_anchors(sb, 1, sbi->s_last_block);
+	if (lastblock)
+		UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
 
-			if (ident == TAG_IDENT_AVDP && location == 256)
-				UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
-		}
-	}
-
+check_anchor:
+	/*
+	 * Check located anchors and the anchor block supplied via
+	 * mount options
+	 */
 	for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
-		if (sbi->s_anchor[i]) {
-			bh = udf_read_tagged(sb, sbi->s_anchor[i],
-					     sbi->s_anchor[i], &ident);
-			if (!bh)
+		if (!sbi->s_anchor[i])
+			continue;
+		bh = udf_read_tagged(sb, sbi->s_anchor[i],
+					sbi->s_anchor[i], &ident);
+		if (!bh)
+			sbi->s_anchor[i] = 0;
+		else {
+			brelse(bh);
+			if (ident != TAG_IDENT_AVDP)
 				sbi->s_anchor[i] = 0;
-			else {
-				brelse(bh);
-				if ((ident != TAG_IDENT_AVDP) &&
-				    (i || (ident != TAG_IDENT_FE &&
-					   ident != TAG_IDENT_EFE)))
-					sbi->s_anchor[i] = 0;
-			}
 		}
 	}
 
@@ -971,27 +910,30 @@
 	return 1;
 }
 
-static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 {
 	struct primaryVolDesc *pvoldesc;
-	time_t recording;
-	long recording_usec;
 	struct ustr instr;
 	struct ustr outstr;
+	struct buffer_head *bh;
+	uint16_t ident;
+
+	bh = udf_read_tagged(sb, block, block, &ident);
+	if (!bh)
+		return 1;
+	BUG_ON(ident != TAG_IDENT_PVD);
 
 	pvoldesc = (struct primaryVolDesc *)bh->b_data;
 
-	if (udf_stamp_to_time(&recording, &recording_usec,
-			      lets_to_cpu(pvoldesc->recordingDateAndTime))) {
-		kernel_timestamp ts;
-		ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
-		udf_debug("recording time %ld/%ld, %04u/%02u/%02u"
+	if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
+			      pvoldesc->recordingDateAndTime)) {
+#ifdef UDFFS_DEBUG
+		timestamp *ts = &pvoldesc->recordingDateAndTime;
+		udf_debug("recording time %04u/%02u/%02u"
 			  " %02u:%02u (%x)\n",
-			  recording, recording_usec,
-			  ts.year, ts.month, ts.day, ts.hour,
-			  ts.minute, ts.typeAndTimezone);
-		UDF_SB(sb)->s_record_time.tv_sec = recording;
-		UDF_SB(sb)->s_record_time.tv_nsec = recording_usec * 1000;
+			  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
+			  ts->minute, le16_to_cpu(ts->typeAndTimezone));
+#endif
 	}
 
 	if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32))
@@ -1005,6 +947,104 @@
 	if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128))
 		if (udf_CS0toUTF8(&outstr, &instr))
 			udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
+
+	brelse(bh);
+	return 0;
+}
+
+static int udf_load_metadata_files(struct super_block *sb, int partition)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct udf_part_map *map;
+	struct udf_meta_data *mdata;
+	kernel_lb_addr addr;
+	int fe_error = 0;
+
+	map = &sbi->s_partmaps[partition];
+	mdata = &map->s_type_specific.s_metadata;
+
+	/* metadata address */
+	addr.logicalBlockNum =  mdata->s_meta_file_loc;
+	addr.partitionReferenceNum = map->s_partition_num;
+
+	udf_debug("Metadata file location: block = %d part = %d\n",
+			  addr.logicalBlockNum, addr.partitionReferenceNum);
+
+	mdata->s_metadata_fe = udf_iget(sb, addr);
+
+	if (mdata->s_metadata_fe == NULL) {
+		udf_warning(sb, __func__, "metadata inode efe not found, "
+				"will try mirror inode.");
+		fe_error = 1;
+	} else if (UDF_I(mdata->s_metadata_fe)->i_alloc_type !=
+		 ICBTAG_FLAG_AD_SHORT) {
+		udf_warning(sb, __func__, "metadata inode efe does not have "
+			"short allocation descriptors!");
+		fe_error = 1;
+		iput(mdata->s_metadata_fe);
+		mdata->s_metadata_fe = NULL;
+	}
+
+	/* mirror file entry */
+	addr.logicalBlockNum = mdata->s_mirror_file_loc;
+	addr.partitionReferenceNum = map->s_partition_num;
+
+	udf_debug("Mirror metadata file location: block = %d part = %d\n",
+			  addr.logicalBlockNum, addr.partitionReferenceNum);
+
+	mdata->s_mirror_fe = udf_iget(sb, addr);
+
+	if (mdata->s_mirror_fe == NULL) {
+		if (fe_error) {
+			udf_error(sb, __func__, "mirror inode efe not found "
+			"and metadata inode is missing too, exiting...");
+			goto error_exit;
+		} else
+			udf_warning(sb, __func__, "mirror inode efe not found,"
+					" but metadata inode is OK");
+	} else if (UDF_I(mdata->s_mirror_fe)->i_alloc_type !=
+		 ICBTAG_FLAG_AD_SHORT) {
+		udf_warning(sb, __func__, "mirror inode efe does not have "
+			"short allocation descriptors!");
+		iput(mdata->s_mirror_fe);
+		mdata->s_mirror_fe = NULL;
+		if (fe_error)
+			goto error_exit;
+	}
+
+	/*
+	 * bitmap file entry
+	 * Note:
+	 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
+	*/
+	if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
+		addr.logicalBlockNum = mdata->s_bitmap_file_loc;
+		addr.partitionReferenceNum = map->s_partition_num;
+
+		udf_debug("Bitmap file location: block = %d part = %d\n",
+			addr.logicalBlockNum, addr.partitionReferenceNum);
+
+		mdata->s_bitmap_fe = udf_iget(sb, addr);
+
+		if (mdata->s_bitmap_fe == NULL) {
+			if (sb->s_flags & MS_RDONLY)
+				udf_warning(sb, __func__, "bitmap inode efe "
+					"not found but it's ok since the disc"
+					" is mounted read-only");
+			else {
+				udf_error(sb, __func__, "bitmap inode efe not "
+					"found and attempted read-write mount");
+				goto error_exit;
+			}
+		}
+	}
+
+	udf_debug("udf_load_metadata_files Ok\n");
+
+	return 0;
+
+error_exit:
+	return 1;
 }
 
 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1025,10 +1065,9 @@
 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
 {
 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
-	return (map->s_partition_len +
-		(sizeof(struct spaceBitmapDesc) << 3) +
-		(sb->s_blocksize * 8) - 1) /
-		(sb->s_blocksize * 8);
+	return DIV_ROUND_UP(map->s_partition_len +
+			    (sizeof(struct spaceBitmapDesc) << 3),
+			    sb->s_blocksize * 8);
 }
 
 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
@@ -1059,134 +1098,241 @@
 	return bitmap;
 }
 
-static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
+static int udf_fill_partdesc_info(struct super_block *sb,
+		struct partitionDesc *p, int p_index)
 {
-	struct partitionDesc *p;
-	int i;
 	struct udf_part_map *map;
-	struct udf_sb_info *sbi;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct partitionHeaderDesc *phd;
 
-	p = (struct partitionDesc *)bh->b_data;
-	sbi = UDF_SB(sb);
+	map = &sbi->s_partmaps[p_index];
 
-	for (i = 0; i < sbi->s_partitions; i++) {
-		map = &sbi->s_partmaps[i];
-		udf_debug("Searching map: (%d == %d)\n",
-			  map->s_partition_num,
-			  le16_to_cpu(p->partitionNumber));
-		if (map->s_partition_num ==
-				le16_to_cpu(p->partitionNumber)) {
-			map->s_partition_len =
-				le32_to_cpu(p->partitionLength); /* blocks */
-			map->s_partition_root =
-				le32_to_cpu(p->partitionStartingLocation);
-			if (p->accessType ==
-					cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
-				map->s_partition_flags |=
-						UDF_PART_FLAG_READ_ONLY;
-			if (p->accessType ==
-					cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
-				map->s_partition_flags |=
-						UDF_PART_FLAG_WRITE_ONCE;
-			if (p->accessType ==
-					cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
-				map->s_partition_flags |=
-						UDF_PART_FLAG_REWRITABLE;
-			if (p->accessType ==
-				    cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
-				map->s_partition_flags |=
-						UDF_PART_FLAG_OVERWRITABLE;
+	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
 
-			if (!strcmp(p->partitionContents.ident,
-				    PD_PARTITION_CONTENTS_NSR02) ||
-			    !strcmp(p->partitionContents.ident,
-				    PD_PARTITION_CONTENTS_NSR03)) {
-				struct partitionHeaderDesc *phd;
+	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
+		map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
+	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
+		map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
+	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
+		map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
 
-				phd = (struct partitionHeaderDesc *)
-						(p->partitionContentsUse);
-				if (phd->unallocSpaceTable.extLength) {
-					kernel_lb_addr loc = {
-						.logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
-						.partitionReferenceNum = i,
-					};
+	udf_debug("Partition (%d type %x) starts at physical %d, "
+		  "block length %d\n", p_index,
+		  map->s_partition_type, map->s_partition_root,
+		  map->s_partition_len);
 
-					map->s_uspace.s_table =
-						udf_iget(sb, loc);
-					if (!map->s_uspace.s_table) {
-						udf_debug("cannot load unallocSpaceTable (part %d)\n", i);
-						return 1;
-					}
-					map->s_partition_flags |=
-						UDF_PART_FLAG_UNALLOC_TABLE;
-					udf_debug("unallocSpaceTable (part %d) @ %ld\n",
-						  i, map->s_uspace.s_table->i_ino);
-				}
-				if (phd->unallocSpaceBitmap.extLength) {
-					struct udf_bitmap *bitmap =
-						udf_sb_alloc_bitmap(sb, i);
-					map->s_uspace.s_bitmap = bitmap;
-					if (bitmap != NULL) {
-						bitmap->s_extLength =
-							le32_to_cpu(phd->unallocSpaceBitmap.extLength);
-						bitmap->s_extPosition =
-							le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
-						map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
-						udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
-							  i, bitmap->s_extPosition);
-					}
-				}
-				if (phd->partitionIntegrityTable.extLength)
-					udf_debug("partitionIntegrityTable (part %d)\n", i);
-				if (phd->freedSpaceTable.extLength) {
-					kernel_lb_addr loc = {
-						.logicalBlockNum = le32_to_cpu(phd->freedSpaceTable.extPosition),
-						.partitionReferenceNum = i,
-					};
+	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
+	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
+		return 0;
 
-					map->s_fspace.s_table =
-						udf_iget(sb, loc);
-					if (!map->s_fspace.s_table) {
-						udf_debug("cannot load freedSpaceTable (part %d)\n", i);
-						return 1;
-					}
-					map->s_partition_flags |=
-						UDF_PART_FLAG_FREED_TABLE;
-					udf_debug("freedSpaceTable (part %d) @ %ld\n",
-						  i, map->s_fspace.s_table->i_ino);
-				}
-				if (phd->freedSpaceBitmap.extLength) {
-					struct udf_bitmap *bitmap =
-						udf_sb_alloc_bitmap(sb, i);
-					map->s_fspace.s_bitmap = bitmap;
-					if (bitmap != NULL) {
-						bitmap->s_extLength =
-							le32_to_cpu(phd->freedSpaceBitmap.extLength);
-						bitmap->s_extPosition =
-							le32_to_cpu(phd->freedSpaceBitmap.extPosition);
-						map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
-						udf_debug("freedSpaceBitmap (part %d) @ %d\n",
-							  i, bitmap->s_extPosition);
-					}
-				}
-			}
-			break;
+	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
+	if (phd->unallocSpaceTable.extLength) {
+		kernel_lb_addr loc = {
+			.logicalBlockNum = le32_to_cpu(
+				phd->unallocSpaceTable.extPosition),
+			.partitionReferenceNum = p_index,
+		};
+
+		map->s_uspace.s_table = udf_iget(sb, loc);
+		if (!map->s_uspace.s_table) {
+			udf_debug("cannot load unallocSpaceTable (part %d)\n",
+					p_index);
+			return 1;
 		}
+		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
+		udf_debug("unallocSpaceTable (part %d) @ %ld\n",
+				p_index, map->s_uspace.s_table->i_ino);
 	}
-	if (i == sbi->s_partitions)
-		udf_debug("Partition (%d) not found in partition map\n",
-			  le16_to_cpu(p->partitionNumber));
-	else
-		udf_debug("Partition (%d:%d type %x) starts at physical %d, "
-			  "block length %d\n",
-			  le16_to_cpu(p->partitionNumber), i,
-			  map->s_partition_type,
-			  map->s_partition_root,
-			  map->s_partition_len);
+
+	if (phd->unallocSpaceBitmap.extLength) {
+		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
+		if (!bitmap)
+			return 1;
+		map->s_uspace.s_bitmap = bitmap;
+		bitmap->s_extLength = le32_to_cpu(
+				phd->unallocSpaceBitmap.extLength);
+		bitmap->s_extPosition = le32_to_cpu(
+				phd->unallocSpaceBitmap.extPosition);
+		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
+		udf_debug("unallocSpaceBitmap (part %d) @ %d\n", p_index,
+						bitmap->s_extPosition);
+	}
+
+	if (phd->partitionIntegrityTable.extLength)
+		udf_debug("partitionIntegrityTable (part %d)\n", p_index);
+
+	if (phd->freedSpaceTable.extLength) {
+		kernel_lb_addr loc = {
+			.logicalBlockNum = le32_to_cpu(
+				phd->freedSpaceTable.extPosition),
+			.partitionReferenceNum = p_index,
+		};
+
+		map->s_fspace.s_table = udf_iget(sb, loc);
+		if (!map->s_fspace.s_table) {
+			udf_debug("cannot load freedSpaceTable (part %d)\n",
+				p_index);
+			return 1;
+		}
+
+		map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
+		udf_debug("freedSpaceTable (part %d) @ %ld\n",
+				p_index, map->s_fspace.s_table->i_ino);
+	}
+
+	if (phd->freedSpaceBitmap.extLength) {
+		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
+		if (!bitmap)
+			return 1;
+		map->s_fspace.s_bitmap = bitmap;
+		bitmap->s_extLength = le32_to_cpu(
+				phd->freedSpaceBitmap.extLength);
+		bitmap->s_extPosition = le32_to_cpu(
+				phd->freedSpaceBitmap.extPosition);
+		map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
+		udf_debug("freedSpaceBitmap (part %d) @ %d\n", p_index,
+					bitmap->s_extPosition);
+	}
 	return 0;
 }
 
-static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
+static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+{
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct udf_part_map *map = &sbi->s_partmaps[p_index];
+	kernel_lb_addr ino;
+	struct buffer_head *bh = NULL;
+	struct udf_inode_info *vati;
+	uint32_t pos;
+	struct virtualAllocationTable20 *vat20;
+
+	/* VAT file entry is in the last recorded block */
+	ino.partitionReferenceNum = type1_index;
+	ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+	sbi->s_vat_inode = udf_iget(sb, ino);
+	if (!sbi->s_vat_inode)
+		return 1;
+
+	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
+		map->s_type_specific.s_virtual.s_start_offset = 0;
+		map->s_type_specific.s_virtual.s_num_entries =
+			(sbi->s_vat_inode->i_size - 36) >> 2;
+	} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
+		vati = UDF_I(sbi->s_vat_inode);
+		if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+			pos = udf_block_map(sbi->s_vat_inode, 0);
+			bh = sb_bread(sb, pos);
+			if (!bh)
+				return 1;
+			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
+		} else {
+			vat20 = (struct virtualAllocationTable20 *)
+							vati->i_ext.i_data;
+		}
+
+		map->s_type_specific.s_virtual.s_start_offset =
+			le16_to_cpu(vat20->lengthHeader);
+		map->s_type_specific.s_virtual.s_num_entries =
+			(sbi->s_vat_inode->i_size -
+				map->s_type_specific.s_virtual.
+					s_start_offset) >> 2;
+		brelse(bh);
+	}
+	return 0;
+}
+
+static int udf_load_partdesc(struct super_block *sb, sector_t block)
+{
+	struct buffer_head *bh;
+	struct partitionDesc *p;
+	struct udf_part_map *map;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	int i, type1_idx;
+	uint16_t partitionNumber;
+	uint16_t ident;
+	int ret = 0;
+
+	bh = udf_read_tagged(sb, block, block, &ident);
+	if (!bh)
+		return 1;
+	if (ident != TAG_IDENT_PD)
+		goto out_bh;
+
+	p = (struct partitionDesc *)bh->b_data;
+	partitionNumber = le16_to_cpu(p->partitionNumber);
+
+	/* First scan for TYPE1, SPARABLE and METADATA partitions */
+	for (i = 0; i < sbi->s_partitions; i++) {
+		map = &sbi->s_partmaps[i];
+		udf_debug("Searching map: (%d == %d)\n",
+			  map->s_partition_num, partitionNumber);
+		if (map->s_partition_num == partitionNumber &&
+		    (map->s_partition_type == UDF_TYPE1_MAP15 ||
+		     map->s_partition_type == UDF_SPARABLE_MAP15))
+			break;
+	}
+
+	if (i >= sbi->s_partitions) {
+		udf_debug("Partition (%d) not found in partition map\n",
+			  partitionNumber);
+		goto out_bh;
+	}
+
+	ret = udf_fill_partdesc_info(sb, p, i);
+
+	/*
+	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
+	 * PHYSICAL partitions are already set up
+	 */
+	type1_idx = i;
+	for (i = 0; i < sbi->s_partitions; i++) {
+		map = &sbi->s_partmaps[i];
+
+		if (map->s_partition_num == partitionNumber &&
+		    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
+		     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
+		     map->s_partition_type == UDF_METADATA_MAP25))
+			break;
+	}
+
+	if (i >= sbi->s_partitions)
+		goto out_bh;
+
+	ret = udf_fill_partdesc_info(sb, p, i);
+	if (ret)
+		goto out_bh;
+
+	if (map->s_partition_type == UDF_METADATA_MAP25) {
+		ret = udf_load_metadata_files(sb, i);
+		if (ret) {
+			printk(KERN_ERR "UDF-fs: error loading MetaData "
+			"partition map %d\n", i);
+			goto out_bh;
+		}
+	} else {
+		ret = udf_load_vat(sb, i, type1_idx);
+		if (ret)
+			goto out_bh;
+		/*
+		 * Mark filesystem read-only if we have a partition with
+		 * virtual map since we don't handle writing to it (we
+		 * overwrite blocks instead of relocating them).
+		 */
+		sb->s_flags |= MS_RDONLY;
+		printk(KERN_NOTICE "UDF-fs: Filesystem marked read-only "
+			"because writing to pseudooverwrite partition is "
+			"not implemented.\n");
+	}
+out_bh:
+	/* In case loading failed, we handle cleanup in udf_fill_super */
+	brelse(bh);
+	return ret;
+}
+
+static int udf_load_logicalvol(struct super_block *sb, sector_t block,
 			       kernel_lb_addr *fileset)
 {
 	struct logicalVolDesc *lvd;
@@ -1194,12 +1340,21 @@
 	uint8_t type;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct genericPartitionMap *gpm;
+	uint16_t ident;
+	struct buffer_head *bh;
+	int ret = 0;
 
+	bh = udf_read_tagged(sb, block, block, &ident);
+	if (!bh)
+		return 1;
+	BUG_ON(ident != TAG_IDENT_LVD);
 	lvd = (struct logicalVolDesc *)bh->b_data;
 
 	i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-	if (i != 0)
-		return i;
+	if (i != 0) {
+		ret = i;
+		goto out_bh;
+	}
 
 	for (i = 0, offset = 0;
 	     i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
@@ -1223,12 +1378,12 @@
 				u16 suf =
 					le16_to_cpu(((__le16 *)upm2->partIdent.
 							identSuffix)[0]);
-				if (suf == 0x0150) {
+				if (suf < 0x0200) {
 					map->s_partition_type =
 							UDF_VIRTUAL_MAP15;
 					map->s_partition_func =
 							udf_get_pblock_virt15;
-				} else if (suf == 0x0200) {
+				} else {
 					map->s_partition_type =
 							UDF_VIRTUAL_MAP20;
 					map->s_partition_func =
@@ -1238,7 +1393,6 @@
 						UDF_ID_SPARABLE,
 						strlen(UDF_ID_SPARABLE))) {
 				uint32_t loc;
-				uint16_t ident;
 				struct sparingTable *st;
 				struct sparablePartitionMap *spm =
 					(struct sparablePartitionMap *)gpm;
@@ -1256,22 +1410,64 @@
 					map->s_type_specific.s_sparing.
 							s_spar_map[j] = bh2;
 
-					if (bh2 != NULL) {
-						st = (struct sparingTable *)
-								bh2->b_data;
-						if (ident != 0 || strncmp(
-							st->sparingIdent.ident,
-							UDF_ID_SPARING,
-							strlen(UDF_ID_SPARING))) {
-							brelse(bh2);
-							map->s_type_specific.
-								s_sparing.
-								s_spar_map[j] =
-									NULL;
-						}
+					if (bh2 == NULL)
+						continue;
+
+					st = (struct sparingTable *)bh2->b_data;
+					if (ident != 0 || strncmp(
+						st->sparingIdent.ident,
+						UDF_ID_SPARING,
+						strlen(UDF_ID_SPARING))) {
+						brelse(bh2);
+						map->s_type_specific.s_sparing.
+							s_spar_map[j] = NULL;
 					}
 				}
 				map->s_partition_func = udf_get_pblock_spar15;
+			} else if (!strncmp(upm2->partIdent.ident,
+						UDF_ID_METADATA,
+						strlen(UDF_ID_METADATA))) {
+				struct udf_meta_data *mdata =
+					&map->s_type_specific.s_metadata;
+				struct metadataPartitionMap *mdm =
+						(struct metadataPartitionMap *)
+						&(lvd->partitionMaps[offset]);
+				udf_debug("Parsing Logical vol part %d "
+					"type %d  id=%s\n", i, type,
+					UDF_ID_METADATA);
+
+				map->s_partition_type = UDF_METADATA_MAP25;
+				map->s_partition_func = udf_get_pblock_meta25;
+
+				mdata->s_meta_file_loc   =
+					le32_to_cpu(mdm->metadataFileLoc);
+				mdata->s_mirror_file_loc =
+					le32_to_cpu(mdm->metadataMirrorFileLoc);
+				mdata->s_bitmap_file_loc =
+					le32_to_cpu(mdm->metadataBitmapFileLoc);
+				mdata->s_alloc_unit_size =
+					le32_to_cpu(mdm->allocUnitSize);
+				mdata->s_align_unit_size =
+					le16_to_cpu(mdm->alignUnitSize);
+				mdata->s_dup_md_flag 	 =
+					mdm->flags & 0x01;
+
+				udf_debug("Metadata Ident suffix=0x%x\n",
+					(le16_to_cpu(
+					 ((__le16 *)
+					      mdm->partIdent.identSuffix)[0])));
+				udf_debug("Metadata part num=%d\n",
+					le16_to_cpu(mdm->partitionNum));
+				udf_debug("Metadata part alloc unit size=%d\n",
+					le32_to_cpu(mdm->allocUnitSize));
+				udf_debug("Metadata file loc=%d\n",
+					le32_to_cpu(mdm->metadataFileLoc));
+				udf_debug("Mirror file loc=%d\n",
+				       le32_to_cpu(mdm->metadataMirrorFileLoc));
+				udf_debug("Bitmap file loc=%d\n",
+				       le32_to_cpu(mdm->metadataBitmapFileLoc));
+				udf_debug("Duplicate Flag: %d %d\n",
+					mdata->s_dup_md_flag, mdm->flags);
 			} else {
 				udf_debug("Unknown ident: %s\n",
 					  upm2->partIdent.ident);
@@ -1296,7 +1492,9 @@
 	if (lvd->integritySeqExt.extLength)
 		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
 
-	return 0;
+out_bh:
+	brelse(bh);
+	return ret;
 }
 
 /*
@@ -1345,7 +1543,7 @@
  *	July 1, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-static int udf_process_sequence(struct super_block *sb, long block,
+static noinline int udf_process_sequence(struct super_block *sb, long block,
 				long lastblock, kernel_lb_addr *fileset)
 {
 	struct buffer_head *bh = NULL;
@@ -1354,19 +1552,25 @@
 	struct generic_desc *gd;
 	struct volDescPtr *vdp;
 	int done = 0;
-	int i, j;
 	uint32_t vdsn;
 	uint16_t ident;
 	long next_s = 0, next_e = 0;
 
 	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
 
-	/* Read the main descriptor sequence */
+	/*
+	 * Read the main descriptor sequence and find which descriptors
+	 * are in it.
+	 */
 	for (; (!done && block <= lastblock); block++) {
 
 		bh = udf_read_tagged(sb, block, block, &ident);
-		if (!bh)
-			break;
+		if (!bh) {
+			printk(KERN_ERR "udf: Block %Lu of volume descriptor "
+			       "sequence is corrupted or we could not read "
+			       "it.\n", (unsigned long long)block);
+			return 1;
+		}
 
 		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
 		gd = (struct generic_desc *)bh->b_data;
@@ -1432,41 +1636,31 @@
 		}
 		brelse(bh);
 	}
-	for (i = 0; i < VDS_POS_LENGTH; i++) {
-		if (vds[i].block) {
-			bh = udf_read_tagged(sb, vds[i].block, vds[i].block,
-					     &ident);
+	/*
+	 * Now read interesting descriptors again and process them
+	 * in a suitable order
+	 */
+	if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
+		printk(KERN_ERR "udf: Primary Volume Descriptor not found!\n");
+		return 1;
+	}
+	if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
+		return 1;
 
-			if (i == VDS_POS_PRIMARY_VOL_DESC) {
-				udf_load_pvoldesc(sb, bh);
-			} else if (i == VDS_POS_LOGICAL_VOL_DESC) {
-				if (udf_load_logicalvol(sb, bh, fileset)) {
-					brelse(bh);
-					return 1;
-				}
-			} else if (i == VDS_POS_PARTITION_DESC) {
-				struct buffer_head *bh2 = NULL;
-				if (udf_load_partdesc(sb, bh)) {
-					brelse(bh);
-					return 1;
-				}
-				for (j = vds[i].block + 1;
-				     j <  vds[VDS_POS_TERMINATING_DESC].block;
-				     j++) {
-					bh2 = udf_read_tagged(sb, j, j, &ident);
-					gd = (struct generic_desc *)bh2->b_data;
-					if (ident == TAG_IDENT_PD)
-						if (udf_load_partdesc(sb,
-								      bh2)) {
-							brelse(bh);
-							brelse(bh2);
-							return 1;
-						}
-					brelse(bh2);
-				}
-			}
-			brelse(bh);
-		}
+	if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
+	    vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
+		return 1;
+
+	if (vds[VDS_POS_PARTITION_DESC].block) {
+		/*
+		 * We rescan the whole descriptor sequence to find
+		 * partition descriptor blocks and process them.
+		 */
+		for (block = vds[VDS_POS_PARTITION_DESC].block;
+		     block < vds[VDS_POS_TERMINATING_DESC].block;
+		     block++)
+			if (udf_load_partdesc(sb, block))
+				return 1;
 	}
 
 	return 0;
@@ -1478,6 +1672,7 @@
 static int udf_check_valid(struct super_block *sb, int novrs, int silent)
 {
 	long block;
+	struct udf_sb_info *sbi = UDF_SB(sb);
 
 	if (novrs) {
 		udf_debug("Validity check skipped because of novrs option\n");
@@ -1485,27 +1680,22 @@
 	}
 	/* Check that it is NSR02 compliant */
 	/* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
-	else {
-		block = udf_vrs(sb, silent);
-		if (block == -1) {
-			struct udf_sb_info *sbi = UDF_SB(sb);
-			udf_debug("Failed to read byte 32768. Assuming open "
-				  "disc. Skipping validity check\n");
-			if (!sbi->s_last_block)
-				sbi->s_last_block = udf_get_last_block(sb);
-			return 0;
-		} else
-			return !block;
-	}
+	block = udf_vrs(sb, silent);
+	if (block == -1)
+		udf_debug("Failed to read byte 32768. Assuming open "
+			  "disc. Skipping validity check\n");
+	if (block && !sbi->s_last_block)
+		sbi->s_last_block = udf_get_last_block(sb);
+	return !block;
 }
 
-static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
+static int udf_load_sequence(struct super_block *sb, kernel_lb_addr *fileset)
 {
 	struct anchorVolDescPtr *anchor;
 	uint16_t ident;
 	struct buffer_head *bh;
 	long main_s, main_e, reserve_s, reserve_e;
-	int i, j;
+	int i;
 	struct udf_sb_info *sbi;
 
 	if (!sb)
@@ -1515,6 +1705,7 @@
 	for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
 		if (!sbi->s_anchor[i])
 			continue;
+
 		bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i],
 				     &ident);
 		if (!bh)
@@ -1553,76 +1744,6 @@
 	}
 	udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]);
 
-	for (i = 0; i < sbi->s_partitions; i++) {
-		kernel_lb_addr uninitialized_var(ino);
-		struct udf_part_map *map = &sbi->s_partmaps[i];
-		switch (map->s_partition_type) {
-		case UDF_VIRTUAL_MAP15:
-		case UDF_VIRTUAL_MAP20:
-			if (!sbi->s_last_block) {
-				sbi->s_last_block = udf_get_last_block(sb);
-				udf_find_anchor(sb);
-			}
-
-			if (!sbi->s_last_block) {
-				udf_debug("Unable to determine Lastblock (For "
-					  "Virtual Partition)\n");
-				return 1;
-			}
-
-			for (j = 0; j < sbi->s_partitions; j++) {
-				struct udf_part_map *map2 = &sbi->s_partmaps[j];
-				if (j != i &&
-				    map->s_volumeseqnum ==
-						map2->s_volumeseqnum &&
-				    map->s_partition_num ==
-						map2->s_partition_num) {
-					ino.partitionReferenceNum = j;
-					ino.logicalBlockNum =
-						sbi->s_last_block -
-							map2->s_partition_root;
-					break;
-				}
-			}
-
-			if (j == sbi->s_partitions)
-				return 1;
-
-			sbi->s_vat_inode = udf_iget(sb, ino);
-			if (!sbi->s_vat_inode)
-				return 1;
-
-			if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
-				map->s_type_specific.s_virtual.s_start_offset =
-					udf_ext0_offset(sbi->s_vat_inode);
-				map->s_type_specific.s_virtual.s_num_entries =
-					(sbi->s_vat_inode->i_size - 36) >> 2;
-			} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
-				uint32_t pos;
-				struct virtualAllocationTable20 *vat20;
-
-				pos = udf_block_map(sbi->s_vat_inode, 0);
-				bh = sb_bread(sb, pos);
-				if (!bh)
-					return 1;
-				vat20 = (struct virtualAllocationTable20 *)
-					bh->b_data +
-					udf_ext0_offset(sbi->s_vat_inode);
-				map->s_type_specific.s_virtual.s_start_offset =
-					le16_to_cpu(vat20->lengthHeader) +
-					udf_ext0_offset(sbi->s_vat_inode);
-				map->s_type_specific.s_virtual.s_num_entries =
-					(sbi->s_vat_inode->i_size -
-					 map->s_type_specific.s_virtual.
-							s_start_offset) >> 2;
-				brelse(bh);
-			}
-			map->s_partition_root = udf_get_pblock(sb, 0, i, 0);
-			map->s_partition_len =
-				sbi->s_partmaps[ino.partitionReferenceNum].
-								s_partition_len;
-		}
-	}
 	return 0;
 }
 
@@ -1630,65 +1751,61 @@
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct buffer_head *bh = sbi->s_lvid_bh;
-	if (bh) {
-		kernel_timestamp cpu_time;
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolIntegrityDescImpUse *lvidiu =
-							udf_sb_lvidiu(sbi);
+	struct logicalVolIntegrityDesc *lvid;
+	struct logicalVolIntegrityDescImpUse *lvidiu;
+	if (!bh)
+		return;
 
-		lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
-		lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
-		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
-			lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
-		lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
+	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+	lvidiu = udf_sb_lvidiu(sbi);
 
-		lvid->descTag.descCRC = cpu_to_le16(
-			udf_crc((char *)lvid + sizeof(tag),
-				le16_to_cpu(lvid->descTag.descCRCLength),
-				0));
+	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+	udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
+				CURRENT_TIME);
+	lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
 
-		lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
-		mark_buffer_dirty(bh);
-	}
+	lvid->descTag.descCRC = cpu_to_le16(
+		crc_itu_t(0, (char *)lvid + sizeof(tag),
+			le16_to_cpu(lvid->descTag.descCRCLength)));
+
+	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+	mark_buffer_dirty(bh);
 }
 
 static void udf_close_lvid(struct super_block *sb)
 {
-	kernel_timestamp cpu_time;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct buffer_head *bh = sbi->s_lvid_bh;
 	struct logicalVolIntegrityDesc *lvid;
+	struct logicalVolIntegrityDescImpUse *lvidiu;
 
 	if (!bh)
 		return;
 
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 
-	if (lvid->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
-		struct logicalVolIntegrityDescImpUse *lvidiu =
-							udf_sb_lvidiu(sbi);
-		lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
-		lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
-		if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
-			lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
-		if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
-			lvidiu->maxUDFWriteRev =
-					cpu_to_le16(UDF_MAX_WRITE_VERSION);
-		if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
-			lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
-		if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
-			lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
-		lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
+	if (lvid->integrityType != LVID_INTEGRITY_TYPE_OPEN)
+		return;
 
-		lvid->descTag.descCRC = cpu_to_le16(
-			udf_crc((char *)lvid + sizeof(tag),
-				le16_to_cpu(lvid->descTag.descCRCLength),
-				0));
+	lvidiu = udf_sb_lvidiu(sbi);
+	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
+	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
+	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
+	if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
+		lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
+	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
+		lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
+	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
+		lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
+	lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
 
-		lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
-		mark_buffer_dirty(bh);
-	}
+	lvid->descTag.descCRC = cpu_to_le16(
+			crc_itu_t(0, (char *)lvid + sizeof(tag),
+				le16_to_cpu(lvid->descTag.descCRCLength)));
+
+	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+	mark_buffer_dirty(bh);
 }
 
 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1708,22 +1825,35 @@
 		vfree(bitmap);
 }
 
-/*
- * udf_read_super
- *
- * PURPOSE
- *	Complete the specified super block.
- *
- * PRE-CONDITIONS
- *	sb			Pointer to superblock to complete - never NULL.
- *	sb->s_dev		Device to read suberblock from.
- *	options			Pointer to mount options.
- *	silent			Silent flag.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
+static void udf_free_partition(struct udf_part_map *map)
+{
+	int i;
+	struct udf_meta_data *mdata;
+
+	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
+		iput(map->s_uspace.s_table);
+	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
+		iput(map->s_fspace.s_table);
+	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
+		udf_sb_free_bitmap(map->s_uspace.s_bitmap);
+	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
+		udf_sb_free_bitmap(map->s_fspace.s_bitmap);
+	if (map->s_partition_type == UDF_SPARABLE_MAP15)
+		for (i = 0; i < 4; i++)
+			brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
+	else if (map->s_partition_type == UDF_METADATA_MAP25) {
+		mdata = &map->s_type_specific.s_metadata;
+		iput(mdata->s_metadata_fe);
+		mdata->s_metadata_fe = NULL;
+
+		iput(mdata->s_mirror_fe);
+		mdata->s_mirror_fe = NULL;
+
+		iput(mdata->s_bitmap_fe);
+		mdata->s_bitmap_fe = NULL;
+	}
+}
+
 static int udf_fill_super(struct super_block *sb, void *options, int silent)
 {
 	int i;
@@ -1776,8 +1906,11 @@
 	sbi->s_nls_map = uopt.nls_map;
 
 	/* Set the block size for all transfers */
-	if (!udf_set_blocksize(sb, uopt.blocksize))
+	if (!sb_min_blocksize(sb, uopt.blocksize)) {
+		udf_debug("Bad block size (%d)\n", uopt.blocksize);
+		printk(KERN_ERR "udf: bad block size (%d)\n", uopt.blocksize);
 		goto error_out;
+	}
 
 	if (uopt.session == 0xFFFFFFFF)
 		sbi->s_session = udf_get_last_session(sb);
@@ -1789,7 +1922,6 @@
 	sbi->s_last_block = uopt.lastblock;
 	sbi->s_anchor[0] = sbi->s_anchor[1] = 0;
 	sbi->s_anchor[2] = uopt.anchor;
-	sbi->s_anchor[3] = 256;
 
 	if (udf_check_valid(sb, uopt.novrs, silent)) {
 		/* read volume recognition sequences */
@@ -1806,7 +1938,7 @@
 	sb->s_magic = UDF_SUPER_MAGIC;
 	sb->s_time_gran = 1000;
 
-	if (udf_load_partition(sb, &fileset)) {
+	if (udf_load_sequence(sb, &fileset)) {
 		printk(KERN_WARNING "UDF-fs: No partition found (1)\n");
 		goto error_out;
 	}
@@ -1856,12 +1988,12 @@
 	}
 
 	if (!silent) {
-		kernel_timestamp ts;
-		udf_time_to_stamp(&ts, sbi->s_record_time);
+		timestamp ts;
+		udf_time_to_disk_stamp(&ts, sbi->s_record_time);
 		udf_info("UDF: Mounting volume '%s', "
 			 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
-			 sbi->s_volume_ident, ts.year, ts.month, ts.day,
-			 ts.hour, ts.minute, ts.typeAndTimezone);
+			 sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day,
+			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
 	}
 	if (!(sb->s_flags & MS_RDONLY))
 		udf_open_lvid(sb);
@@ -1890,21 +2022,9 @@
 error_out:
 	if (sbi->s_vat_inode)
 		iput(sbi->s_vat_inode);
-	if (sbi->s_partitions) {
-		struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
-		if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
-			iput(map->s_uspace.s_table);
-		if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
-			iput(map->s_fspace.s_table);
-		if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
-			udf_sb_free_bitmap(map->s_uspace.s_bitmap);
-		if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
-			udf_sb_free_bitmap(map->s_fspace.s_bitmap);
-		if (map->s_partition_type == UDF_SPARABLE_MAP15)
-			for (i = 0; i < 4; i++)
-				brelse(map->s_type_specific.s_sparing.
-						s_spar_map[i]);
-	}
+	if (sbi->s_partitions)
+		for (i = 0; i < sbi->s_partitions; i++)
+			udf_free_partition(&sbi->s_partmaps[i]);
 #ifdef CONFIG_UDF_NLS
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
 		unload_nls(sbi->s_nls_map);
@@ -1920,8 +2040,8 @@
 	return -EINVAL;
 }
 
-void udf_error(struct super_block *sb, const char *function,
-	       const char *fmt, ...)
+static void udf_error(struct super_block *sb, const char *function,
+		      const char *fmt, ...)
 {
 	va_list args;
 
@@ -1948,19 +2068,6 @@
 	       sb->s_id, function, error_buf);
 }
 
-/*
- * udf_put_super
- *
- * PURPOSE
- *	Prepare for destruction of the superblock.
- *
- * DESCRIPTION
- *	Called before the filesystem is unmounted.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
 static void udf_put_super(struct super_block *sb)
 {
 	int i;
@@ -1969,21 +2076,9 @@
 	sbi = UDF_SB(sb);
 	if (sbi->s_vat_inode)
 		iput(sbi->s_vat_inode);
-	if (sbi->s_partitions) {
-		struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition];
-		if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
-			iput(map->s_uspace.s_table);
-		if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
-			iput(map->s_fspace.s_table);
-		if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
-			udf_sb_free_bitmap(map->s_uspace.s_bitmap);
-		if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
-			udf_sb_free_bitmap(map->s_fspace.s_bitmap);
-		if (map->s_partition_type == UDF_SPARABLE_MAP15)
-			for (i = 0; i < 4; i++)
-				brelse(map->s_type_specific.s_sparing.
-						s_spar_map[i]);
-	}
+	if (sbi->s_partitions)
+		for (i = 0; i < sbi->s_partitions; i++)
+			udf_free_partition(&sbi->s_partmaps[i]);
 #ifdef CONFIG_UDF_NLS
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
 		unload_nls(sbi->s_nls_map);
@@ -1996,19 +2091,6 @@
 	sb->s_fs_info = NULL;
 }
 
-/*
- * udf_stat_fs
- *
- * PURPOSE
- *	Return info about the filesystem.
- *
- * DESCRIPTION
- *	Called by sys_statfs()
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
- */
 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
@@ -2035,10 +2117,6 @@
 	return 0;
 }
 
-static unsigned char udf_bitmap_lookup[16] = {
-	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
-};
-
 static unsigned int udf_count_free_bitmap(struct super_block *sb,
 					  struct udf_bitmap *bitmap)
 {
@@ -2048,7 +2126,6 @@
 	int block = 0, newblock;
 	kernel_lb_addr loc;
 	uint32_t bytes;
-	uint8_t value;
 	uint8_t *ptr;
 	uint16_t ident;
 	struct spaceBitmapDesc *bm;
@@ -2074,13 +2151,10 @@
 	ptr = (uint8_t *)bh->b_data;
 
 	while (bytes > 0) {
-		while ((bytes > 0) && (index < sb->s_blocksize)) {
-			value = ptr[index];
-			accum += udf_bitmap_lookup[value & 0x0f];
-			accum += udf_bitmap_lookup[value >> 4];
-			index++;
-			bytes--;
-		}
+		u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
+		accum += bitmap_weight((const unsigned long *)(ptr + index),
+					cur_bytes * 8);
+		bytes -= cur_bytes;
 		if (bytes) {
 			brelse(bh);
 			newblock = udf_get_lb_pblock(sb, loc, ++block);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6ec9922..c3265e1 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -23,7 +23,6 @@
 #include <asm/uaccess.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
-#include <linux/udf_fs.h>
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/stat.h>
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index fe61be1..65e19b4 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -22,7 +22,6 @@
 #include "udfdecl.h"
 #include <linux/fs.h>
 #include <linux/mm.h>
-#include <linux/udf_fs.h>
 #include <linux/buffer_head.h>
 
 #include "udf_i.h"
@@ -180,6 +179,24 @@
 	brelse(epos.bh);
 }
 
+static void udf_update_alloc_ext_desc(struct inode *inode,
+				      struct extent_position *epos,
+				      u32 lenalloc)
+{
+	struct super_block *sb = inode->i_sb;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+
+	struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data);
+	int len = sizeof(struct allocExtDesc);
+
+	aed->lengthAllocDescs =	cpu_to_le32(lenalloc);
+	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201)
+		len += lenalloc;
+
+	udf_update_tag(epos->bh->b_data, len);
+	mark_buffer_dirty_inode(epos->bh, inode);
+}
+
 void udf_truncate_extents(struct inode *inode)
 {
 	struct extent_position epos;
@@ -187,7 +204,6 @@
 	uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
 	int8_t etype;
 	struct super_block *sb = inode->i_sb;
-	struct udf_sb_info *sbi = UDF_SB(sb);
 	sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
 	loff_t byte_offset;
 	int adsize;
@@ -224,35 +240,15 @@
 				if (indirect_ext_len) {
 					/* We managed to free all extents in the
 					 * indirect extent - free it too */
-					if (!epos.bh)
-						BUG();
+					BUG_ON(!epos.bh);
 					udf_free_blocks(sb, inode, epos.block,
 							0, indirect_ext_len);
-				} else {
-					if (!epos.bh) {
-						iinfo->i_lenAlloc =
-								lenalloc;
-						mark_inode_dirty(inode);
-					} else {
-						struct allocExtDesc *aed =
-							(struct allocExtDesc *)
-							(epos.bh->b_data);
-						int len =
-						    sizeof(struct allocExtDesc);
-
-						aed->lengthAllocDescs =
-						    cpu_to_le32(lenalloc);
-						if (!UDF_QUERY_FLAG(sb,
-							UDF_FLAG_STRICT) ||
-						    sbi->s_udfrev >= 0x0201)
-							len += lenalloc;
-
-						udf_update_tag(epos.bh->b_data,
-								len);
-						mark_buffer_dirty_inode(
-								epos.bh, inode);
-					}
-				}
+				} else if (!epos.bh) {
+					iinfo->i_lenAlloc = lenalloc;
+					mark_inode_dirty(inode);
+				} else
+					udf_update_alloc_ext_desc(inode,
+							&epos, lenalloc);
 				brelse(epos.bh);
 				epos.offset = sizeof(struct allocExtDesc);
 				epos.block = eloc;
@@ -272,29 +268,14 @@
 		}
 
 		if (indirect_ext_len) {
-			if (!epos.bh)
-				BUG();
+			BUG_ON(!epos.bh);
 			udf_free_blocks(sb, inode, epos.block, 0,
 					indirect_ext_len);
-		} else {
-			if (!epos.bh) {
-				iinfo->i_lenAlloc = lenalloc;
-				mark_inode_dirty(inode);
-			} else {
-				struct allocExtDesc *aed =
-				    (struct allocExtDesc *)(epos.bh->b_data);
-				aed->lengthAllocDescs = cpu_to_le32(lenalloc);
-				if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
-				    sbi->s_udfrev >= 0x0201)
-					udf_update_tag(epos.bh->b_data,
-						lenalloc +
-						sizeof(struct allocExtDesc));
-				else
-					udf_update_tag(epos.bh->b_data,
-						sizeof(struct allocExtDesc));
-				mark_buffer_dirty_inode(epos.bh, inode);
-			}
-		}
+		} else if (!epos.bh) {
+			iinfo->i_lenAlloc = lenalloc;
+			mark_inode_dirty(inode);
+		} else
+			udf_update_alloc_ext_desc(inode, &epos, lenalloc);
 	} else if (inode->i_size) {
 		if (byte_offset) {
 			kernel_long_ad extent;
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index ccc52f1..4f86b1d 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,10 +1,32 @@
-#ifndef __LINUX_UDF_I_H
-#define __LINUX_UDF_I_H
+#ifndef _UDF_I_H
+#define _UDF_I_H
 
-#include <linux/udf_fs_i.h>
+struct udf_inode_info {
+	struct timespec		i_crtime;
+	/* Physical address of inode */
+	kernel_lb_addr		i_location;
+	__u64			i_unique;
+	__u32			i_lenEAttr;
+	__u32			i_lenAlloc;
+	__u64			i_lenExtents;
+	__u32			i_next_alloc_block;
+	__u32			i_next_alloc_goal;
+	unsigned		i_alloc_type : 3;
+	unsigned		i_efe : 1;	/* extendedFileEntry */
+	unsigned		i_use : 1;	/* unallocSpaceEntry */
+	unsigned		i_strat4096 : 1;
+	unsigned		reserved : 26;
+	union {
+		short_ad	*i_sad;
+		long_ad		*i_lad;
+		__u8		*i_data;
+	} i_ext;
+	struct inode vfs_inode;
+};
+
 static inline struct udf_inode_info *UDF_I(struct inode *inode)
 {
 	return list_entry(inode, struct udf_inode_info, vfs_inode);
 }
 
-#endif /* !defined(_LINUX_UDF_I_H) */
+#endif /* _UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 737d1c6..1c1c514 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -1,10 +1,12 @@
 #ifndef __LINUX_UDF_SB_H
 #define __LINUX_UDF_SB_H
 
+#include <linux/mutex.h>
+
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC			0x15013346
 
-#define UDF_MAX_READ_VERSION		0x0201
+#define UDF_MAX_READ_VERSION		0x0250
 #define UDF_MAX_WRITE_VERSION		0x0201
 
 #define UDF_FLAG_USE_EXTENDED_FE	0
@@ -38,6 +40,111 @@
 #define UDF_PART_FLAG_REWRITABLE	0x0040
 #define UDF_PART_FLAG_OVERWRITABLE	0x0080
 
+#define UDF_MAX_BLOCK_LOADED	8
+
+#define UDF_TYPE1_MAP15			0x1511U
+#define UDF_VIRTUAL_MAP15		0x1512U
+#define UDF_VIRTUAL_MAP20		0x2012U
+#define UDF_SPARABLE_MAP15		0x1522U
+#define UDF_METADATA_MAP25		0x2511U
+
+#pragma pack(1) /* XXX(hch): Why?  This file just defines in-core structures */
+
+struct udf_meta_data {
+	__u32	s_meta_file_loc;
+	__u32	s_mirror_file_loc;
+	__u32	s_bitmap_file_loc;
+	__u32	s_alloc_unit_size;
+	__u16	s_align_unit_size;
+	__u8 	s_dup_md_flag;
+	struct inode *s_metadata_fe;
+	struct inode *s_mirror_fe;
+	struct inode *s_bitmap_fe;
+};
+
+struct udf_sparing_data {
+	__u16	s_packet_len;
+	struct buffer_head *s_spar_map[4];
+};
+
+struct udf_virtual_data {
+	__u32	s_num_entries;
+	__u16	s_start_offset;
+};
+
+struct udf_bitmap {
+	__u32			s_extLength;
+	__u32			s_extPosition;
+	__u16			s_nr_groups;
+	struct buffer_head 	**s_block_bitmap;
+};
+
+struct udf_part_map {
+	union {
+		struct udf_bitmap	*s_bitmap;
+		struct inode		*s_table;
+	} s_uspace;
+	union {
+		struct udf_bitmap	*s_bitmap;
+		struct inode		*s_table;
+	} s_fspace;
+	__u32	s_partition_root;
+	__u32	s_partition_len;
+	__u16	s_partition_type;
+	__u16	s_partition_num;
+	union {
+		struct udf_sparing_data s_sparing;
+		struct udf_virtual_data s_virtual;
+		struct udf_meta_data s_metadata;
+	} s_type_specific;
+	__u32	(*s_partition_func)(struct super_block *, __u32, __u16, __u32);
+	__u16	s_volumeseqnum;
+	__u16	s_partition_flags;
+};
+
+#pragma pack()
+
+struct udf_sb_info {
+	struct udf_part_map	*s_partmaps;
+	__u8			s_volume_ident[32];
+
+	/* Overall info */
+	__u16			s_partitions;
+	__u16			s_partition;
+
+	/* Sector headers */
+	__s32			s_session;
+	__u32			s_anchor[3];
+	__u32			s_last_block;
+
+	struct buffer_head	*s_lvid_bh;
+
+	/* Default permissions */
+	mode_t			s_umask;
+	gid_t			s_gid;
+	uid_t			s_uid;
+
+	/* Root Info */
+	struct timespec		s_record_time;
+
+	/* Fileset Info */
+	__u16			s_serial_number;
+
+	/* highest UDF revision we have recorded to this media */
+	__u16			s_udfrev;
+
+	/* Miscellaneous flags */
+	__u32			s_flags;
+
+	/* Encoding info */
+	struct nls_table	*s_nls_map;
+
+	/* VAT inode */
+	struct inode		*s_vat_inode;
+
+	struct mutex		s_alloc_mutex;
+};
+
 static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
 {
 	return sb->s_fs_info;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 681dc2b..f3f45d0 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -1,17 +1,37 @@
 #ifndef __UDF_DECL_H
 #define __UDF_DECL_H
 
-#include <linux/udf_fs.h>
 #include "ecma_167.h"
 #include "osta_udf.h"
 
 #include <linux/fs.h>
 #include <linux/types.h>
-#include <linux/udf_fs_i.h>
-#include <linux/udf_fs_sb.h>
 #include <linux/buffer_head.h>
+#include <linux/udf_fs_i.h>
 
+#include "udf_sb.h"
 #include "udfend.h"
+#include "udf_i.h"
+
+#define UDF_PREALLOCATE
+#define UDF_DEFAULT_PREALLOC_BLOCKS	8
+
+#define UDFFS_DEBUG
+
+#ifdef UDFFS_DEBUG
+#define udf_debug(f, a...) \
+do { \
+	printk(KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
+		__FILE__, __LINE__, __func__); \
+	printk(f, ##a); \
+} while (0)
+#else
+#define udf_debug(f, a...) /**/
+#endif
+
+#define udf_info(f, a...) \
+	printk(KERN_INFO "UDF-fs INFO " f, ##a);
+
 
 #define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) )
 #define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) )
@@ -23,16 +43,24 @@
 #define UDF_NAME_LEN		256
 #define UDF_PATH_LEN		1023
 
-#define udf_file_entry_alloc_offset(inode)\
-	(UDF_I(inode)->i_use ?\
-		sizeof(struct unallocSpaceEntry) :\
-		((UDF_I(inode)->i_efe ?\
-			sizeof(struct extendedFileEntry) :\
-			sizeof(struct fileEntry)) + UDF_I(inode)->i_lenEAttr))
+static inline size_t udf_file_entry_alloc_offset(struct inode *inode)
+{
+	struct udf_inode_info *iinfo = UDF_I(inode);
+	if (iinfo->i_use)
+		return sizeof(struct unallocSpaceEntry);
+	else if (iinfo->i_efe)
+		return sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr;
+	else
+		return sizeof(struct fileEntry) + iinfo->i_lenEAttr;
+}
 
-#define udf_ext0_offset(inode)\
-	(UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ?\
-		udf_file_entry_alloc_offset(inode) : 0)
+static inline size_t udf_ext0_offset(struct inode *inode)
+{
+	if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+		return udf_file_entry_alloc_offset(inode);
+	else
+		return 0;
+}
 
 #define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
 
@@ -83,7 +111,6 @@
 };
 
 /* super.c */
-extern void udf_error(struct super_block *, const char *, const char *, ...);
 extern void udf_warning(struct super_block *, const char *, const char *, ...);
 
 /* namei.c */
@@ -150,6 +177,8 @@
 				      uint32_t);
 extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t,
 				      uint32_t);
+extern uint32_t udf_get_pblock_meta25(struct super_block *, uint32_t, uint16_t,
+					  uint32_t);
 extern int udf_relocate_blocks(struct super_block *, long, long *);
 
 /* unicode.c */
@@ -157,7 +186,7 @@
 extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
 			    int);
 extern int udf_build_ustr(struct ustr *, dstring *, int);
-extern int udf_CS0toUTF8(struct ustr *, struct ustr *);
+extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
@@ -191,11 +220,9 @@
 extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
 extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
 
-/* crc.c */
-extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
-
 /* udftime.c */
-extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp);
-extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec);
+extern struct timespec *udf_disk_stamp_to_time(struct timespec *dest,
+						timestamp src);
+extern timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec src);
 
 #endif				/* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index c4bd120..489f52f 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -24,17 +24,6 @@
 	return out;
 }
 
-static inline kernel_timestamp lets_to_cpu(timestamp in)
-{
-	kernel_timestamp out;
-
-	memcpy(&out, &in, sizeof(timestamp));
-	out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
-	out.year = le16_to_cpu(in.year);
-
-	return out;
-}
-
 static inline short_ad lesa_to_cpu(short_ad in)
 {
 	short_ad out;
@@ -85,15 +74,4 @@
 	return out;
 }
 
-static inline timestamp cpu_to_lets(kernel_timestamp in)
-{
-	timestamp out;
-
-	memcpy(&out, &in, sizeof(timestamp));
-	out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
-	out.year = cpu_to_le16(in.year);
-
-	return out;
-}
-
 #endif /* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index ce59573..5f811655 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -85,39 +85,38 @@
 #define SECS_PER_HOUR	(60 * 60)
 #define SECS_PER_DAY	(SECS_PER_HOUR * 24)
 
-time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
+struct timespec *udf_disk_stamp_to_time(struct timespec *dest, timestamp src)
 {
 	int yday;
-	uint8_t type = src.typeAndTimezone >> 12;
+	u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
+	u16 year = le16_to_cpu(src.year);
+	uint8_t type = typeAndTimezone >> 12;
 	int16_t offset;
 
 	if (type == 1) {
-		offset = src.typeAndTimezone << 4;
+		offset = typeAndTimezone << 4;
 		/* sign extent offset */
 		offset = (offset >> 4);
 		if (offset == -2047) /* unspecified offset */
 			offset = 0;
-	} else {
+	} else
 		offset = 0;
-	}
 
-	if ((src.year < EPOCH_YEAR) ||
-	    (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
-		*dest = -1;
-		*dest_usec = -1;
+	if ((year < EPOCH_YEAR) ||
+	    (year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
 		return NULL;
 	}
-	*dest = year_seconds[src.year - EPOCH_YEAR];
-	*dest -= offset * 60;
+	dest->tv_sec = year_seconds[year - EPOCH_YEAR];
+	dest->tv_sec -= offset * 60;
 
-	yday = ((__mon_yday[__isleap(src.year)][src.month - 1]) + src.day - 1);
-	*dest += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
-	*dest_usec = src.centiseconds * 10000 +
-			src.hundredsOfMicroseconds * 100 + src.microseconds;
+	yday = ((__mon_yday[__isleap(year)][src.month - 1]) + src.day - 1);
+	dest->tv_sec += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
+	dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+			src.hundredsOfMicroseconds * 100 + src.microseconds);
 	return dest;
 }
 
-kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
+timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec ts)
 {
 	long int days, rem, y;
 	const unsigned short int *ip;
@@ -128,7 +127,7 @@
 	if (!dest)
 		return NULL;
 
-	dest->typeAndTimezone = 0x1000 | (offset & 0x0FFF);
+	dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF));
 
 	ts.tv_sec += offset * 60;
 	days = ts.tv_sec / SECS_PER_DAY;
@@ -151,7 +150,7 @@
 			 - LEAPS_THRU_END_OF(y - 1));
 		y = yg;
 	}
-	dest->year = y;
+	dest->year = cpu_to_le16(y);
 	ip = __mon_yday[__isleap(y)];
 	for (y = 11; days < (long int)ip[y]; --y)
 		continue;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index e533b11..9fdf8c9 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -23,7 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>	/* for memset */
 #include <linux/nls.h>
-#include <linux/udf_fs.h>
+#include <linux/crc-itu-t.h>
 
 #include "udf_sb.h"
 
@@ -49,14 +49,16 @@
 {
 	int usesize;
 
-	if ((!dest) || (!ptr) || (!size))
+	if (!dest || !ptr || !size)
 		return -1;
+	BUG_ON(size < 2);
 
-	memset(dest, 0, sizeof(struct ustr));
-	usesize = (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size;
+	usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
+	usesize = min(usesize, size - 2);
 	dest->u_cmpID = ptr[0];
-	dest->u_len = ptr[size - 1];
-	memcpy(dest->u_name, ptr + 1, usesize - 1);
+	dest->u_len = usesize;
+	memcpy(dest->u_name, ptr + 1, usesize);
+	memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
 
 	return 0;
 }
@@ -83,9 +85,6 @@
  * PURPOSE
  *	Convert OSTA Compressed Unicode to the UTF-8 equivalent.
  *
- * DESCRIPTION
- *	This routine is only called by udf_filldir().
- *
  * PRE-CONDITIONS
  *	utf			Pointer to UTF-8 output buffer.
  *	ocu			Pointer to OSTA Compressed Unicode input buffer
@@ -99,43 +98,39 @@
  *	November 12, 1997 - Andrew E. Mileski
  *	Written, tested, and released.
  */
-int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
+int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
 {
-	uint8_t *ocu;
-	uint32_t c;
+	const uint8_t *ocu;
 	uint8_t cmp_id, ocu_len;
 	int i;
 
-	ocu = ocu_i->u_name;
-
 	ocu_len = ocu_i->u_len;
-	cmp_id = ocu_i->u_cmpID;
-	utf_o->u_len = 0;
-
 	if (ocu_len == 0) {
 		memset(utf_o, 0, sizeof(struct ustr));
-		utf_o->u_cmpID = 0;
-		utf_o->u_len = 0;
 		return 0;
 	}
 
-	if ((cmp_id != 8) && (cmp_id != 16)) {
+	cmp_id = ocu_i->u_cmpID;
+	if (cmp_id != 8 && cmp_id != 16) {
+		memset(utf_o, 0, sizeof(struct ustr));
 		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
 		       cmp_id, ocu_i->u_name);
 		return 0;
 	}
 
+	ocu = ocu_i->u_name;
+	utf_o->u_len = 0;
 	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
 
 		/* Expand OSTA compressed Unicode to Unicode */
-		c = ocu[i++];
+		uint32_t c = ocu[i++];
 		if (cmp_id == 16)
 			c = (c << 8) | ocu[i++];
 
 		/* Compress Unicode to UTF-8 */
-		if (c < 0x80U) {
+		if (c < 0x80U)
 			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
-		} else if (c < 0x800U) {
+		else if (c < 0x800U) {
 			utf_o->u_name[utf_o->u_len++] =
 						(uint8_t)(0xc0 | (c >> 6));
 			utf_o->u_name[utf_o->u_len++] =
@@ -255,35 +250,32 @@
 }
 
 static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
-			struct ustr *ocu_i)
+			const struct ustr *ocu_i)
 {
-	uint8_t *ocu;
-	uint32_t c;
+	const uint8_t *ocu;
 	uint8_t cmp_id, ocu_len;
 	int i;
 
-	ocu = ocu_i->u_name;
 
 	ocu_len = ocu_i->u_len;
-	cmp_id = ocu_i->u_cmpID;
-	utf_o->u_len = 0;
-
 	if (ocu_len == 0) {
 		memset(utf_o, 0, sizeof(struct ustr));
-		utf_o->u_cmpID = 0;
-		utf_o->u_len = 0;
 		return 0;
 	}
 
-	if ((cmp_id != 8) && (cmp_id != 16)) {
+	cmp_id = ocu_i->u_cmpID;
+	if (cmp_id != 8 && cmp_id != 16) {
+		memset(utf_o, 0, sizeof(struct ustr));
 		printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
 		       cmp_id, ocu_i->u_name);
 		return 0;
 	}
 
+	ocu = ocu_i->u_name;
+	utf_o->u_len = 0;
 	for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
 		/* Expand OSTA compressed Unicode to Unicode */
-		c = ocu[i++];
+		uint32_t c = ocu[i++];
 		if (cmp_id == 16)
 			c = (c << 8) | ocu[i++];
 
@@ -463,7 +455,7 @@
 		} else if (newIndex > 250)
 			newIndex = 250;
 		newName[newIndex++] = CRC_MARK;
-		valueCRC = udf_crc(fidName, fidNameLen, 0);
+		valueCRC = crc_itu_t(0, fidName, fidNameLen);
 		newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
 		newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8];
 		newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index f1663aa..18a4321 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -157,6 +157,7 @@
 struct ia64_mca_notify_die {
 	struct ia64_sal_os_state *sos;
 	int *monarch_cpu;
+	int *data;
 };
 
 DECLARE_PER_CPU(u64, ia64_mca_pal_base);
diff --git a/include/asm-sh/i2c-sh7760.h b/include/asm-sh/i2c-sh7760.h
new file mode 100644
index 0000000..2418211
--- /dev/null
+++ b/include/asm-sh/i2c-sh7760.h
@@ -0,0 +1,22 @@
+/*
+ * MMIO/IRQ and platform data for SH7760 I2C channels
+ */
+
+#ifndef _I2C_SH7760_H_
+#define _I2C_SH7760_H_
+
+#define SH7760_I2C_DEVNAME	"sh7760-i2c"
+
+#define SH7760_I2C0_MMIO	0xFE140000
+#define SH7760_I2C0_MMIOEND	0xFE14003B
+#define SH7760_I2C0_IRQ		62
+
+#define SH7760_I2C1_MMIO	0xFE150000
+#define SH7760_I2C1_MMIOEND	0xFE15003B
+#define SH7760_I2C1_IRQ		63
+
+struct sh7760_i2c_platdata {
+	unsigned int speed_khz;
+};
+
+#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b3d9ccd..cbb5ccb 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -100,7 +100,7 @@
 header-y += jffs2.h
 header-y += keyctl.h
 header-y += limits.h
-header-y += lock_dlm_plock.h
+header-y += dlm_plock.h
 header-y += magic.h
 header-y += major.h
 header-y += matroxfb.h
@@ -150,6 +150,7 @@
 header-y += tipc.h
 header-y += tipc_config.h
 header-y += toshiba.h
+header-y += udf_fs_i.h
 header-y += ultrasound.h
 header-y += un.h
 header-y += utime.h
@@ -210,7 +211,9 @@
 unifdef-y += hdlc.h
 unifdef-y += hdreg.h
 unifdef-y += hdsmart.h
+unifdef-y += hid.h
 unifdef-y += hiddev.h
+unifdef-y += hidraw.h
 unifdef-y += hpet.h
 unifdef-y += i2c.h
 unifdef-y += i2c-dev.h
@@ -334,7 +337,6 @@
 unifdef-y += timex.h
 unifdef-y += tty.h
 unifdef-y += types.h
-unifdef-y += udf_fs_i.h
 unifdef-y += udp.h
 unifdef-y += uinput.h
 unifdef-y += uio.h
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index c743fbc..203a025 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -21,10 +21,7 @@
 
 /* Lock levels and flags are here */
 #include <linux/dlmconstants.h>
-
-
-#define DLM_RESNAME_MAXLEN	64
-
+#include <linux/types.h>
 
 typedef void dlm_lockspace_t;
 
@@ -63,7 +60,7 @@
 
 struct dlm_lksb {
 	int 	 sb_status;
-	uint32_t sb_lkid;
+	__u32	 sb_lkid;
 	char 	 sb_flags;
 	char *	 sb_lvbptr;
 };
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index 9642277..c603450 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -11,10 +11,16 @@
 *******************************************************************************
 ******************************************************************************/
 
+#ifndef _LINUX_DLM_DEVICE_H
+#define _LINUX_DLM_DEVICE_H
+
 /* This is the device interface for dlm, most users will use a library
  * interface.
  */
 
+#include <linux/dlm.h>
+#include <linux/types.h>
+
 #define DLM_USER_LVB_LEN	32
 
 /* Version of the device interface */
@@ -94,10 +100,9 @@
 #define DLM_USER_PURGE        6
 #define DLM_USER_DEADLOCK     7
 
-/* Arbitrary length restriction */
-#define MAX_LS_NAME_LEN 64
-
 /* Lockspace flags */
 #define DLM_USER_LSFLG_AUTOFREE   1
 #define DLM_USER_LSFLG_FORCEFREE  2
 
+#endif
+
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
new file mode 100644
index 0000000..18d5fdb
--- /dev/null
+++ b/include/linux/dlm_plock.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ */
+
+#ifndef __DLM_PLOCK_DOT_H__
+#define __DLM_PLOCK_DOT_H__
+
+#define DLM_PLOCK_MISC_NAME		"dlm_plock"
+
+#define DLM_PLOCK_VERSION_MAJOR	1
+#define DLM_PLOCK_VERSION_MINOR	1
+#define DLM_PLOCK_VERSION_PATCH	0
+
+enum {
+	DLM_PLOCK_OP_LOCK = 1,
+	DLM_PLOCK_OP_UNLOCK,
+	DLM_PLOCK_OP_GET,
+};
+
+struct dlm_plock_info {
+	__u32 version[3];
+	__u8 optype;
+	__u8 ex;
+	__u8 wait;
+	__u8 pad;
+	__u32 pid;
+	__s32 nodeid;
+	__s32 rv;
+	__u32 fsid;
+	__u64 number;
+	__u64 start;
+	__u64 end;
+	__u64 owner;
+};
+
+#ifdef __KERNEL__
+int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		int cmd, struct file_lock *fl);
+int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		struct file_lock *fl);
+int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		struct file_lock *fl);
+#endif /* __KERNEL__ */
+
+#endif
+
diff --git a/include/linux/dlmconstants.h b/include/linux/dlmconstants.h
index fddb3d3..47bf08d 100644
--- a/include/linux/dlmconstants.h
+++ b/include/linux/dlmconstants.h
@@ -18,6 +18,10 @@
  * Constants used by DLM interface.
  */
 
+#define DLM_LOCKSPACE_LEN       64
+#define DLM_RESNAME_MAXLEN      64
+
+
 /*
  * Lock Modes
  */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 74ff575..d951ec4 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -284,6 +284,7 @@
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_B8		0x02000000
 #define HID_QUIRK_HWHEEL_WHEEL_INVERT		0x04000000
 #define HID_QUIRK_MICROSOFT_KEYS		0x08000000
+#define HID_QUIRK_FULLSPEED_INTERVAL		0x10000000
 
 /*
  * Separate quirks for runtime report descriptor fixup
@@ -296,6 +297,8 @@
 #define HID_QUIRK_RDESC_MACBOOK_JIS		0x00000010
 #define HID_QUIRK_RDESC_BUTTON_CONSUMER		0x00000020
 #define HID_QUIRK_RDESC_SAMSUNG_REMOTE		0x00000040
+#define HID_QUIRK_RDESC_MICROSOFT_RECV_1028	0x00000080
+#define HID_QUIRK_RDESC_SUNPLUS_WDESKTOP	0x00000100
 
 /*
  * This is the global environment of the parser. This information is
@@ -320,7 +323,7 @@
  * This is the local environment. It is persistent up the next main-item.
  */
 
-#define HID_MAX_USAGES			8192
+#define HID_MAX_USAGES			12288
 #define HID_DEFAULT_NUM_COLLECTIONS	16
 
 struct hid_local {
@@ -421,6 +424,7 @@
 #define HID_RESET_PENDING	4
 #define HID_SUSPENDED		5
 #define HID_CLEAR_HALT		6
+#define HID_DISCONNECTED	7
 
 struct hid_input {
 	struct list_head list;
@@ -452,8 +456,6 @@
 	void *hidraw;
 	int minor;							/* Hiddev minor number */
 
-	wait_queue_head_t wait;						/* For sleeping */
-
 	int open;							/* is the device open by anyone? */
 	char name[128];							/* Device name */
 	char phys[64];							/* Device physical location */
@@ -530,14 +532,12 @@
 int hidinput_mapping_quirks(struct hid_usage *, struct input_dev *, unsigned long **, int *);
 int hidinput_event_quirks(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
 int hidinput_apple_event(struct hid_device *, struct input_dev *, struct hid_usage *, __s32);
-void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt);
 void hid_output_report(struct hid_report *report, __u8 *data);
 void hid_free_device(struct hid_device *device);
 struct hid_device *hid_parse_report(__u8 *start, unsigned size);
 
 /* HID quirks API */
 u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, const u32 quirks);
 int usbhid_quirks_init(char **quirks_param);
 void usbhid_quirks_exit(void);
 void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char **);
@@ -546,6 +546,7 @@
 int hid_ff_init(struct hid_device *hid);
 
 int hid_lgff_init(struct hid_device *hid);
+int hid_lg2ff_init(struct hid_device *hid);
 int hid_plff_init(struct hid_device *hid);
 int hid_tmff_init(struct hid_device *hid);
 int hid_zpff_init(struct hid_device *hid);
@@ -566,7 +567,11 @@
 #define dbg_hid_line(format, arg...) if (hid_debug) \
 				printk(format, ## arg)
 #else
-#define dbg_hid(format, arg...) do {} while (0)
+static inline int __attribute__((format(printf, 1, 2)))
+dbg_hid(const char *fmt, ...)
+{
+	return 0;
+}
 #define dbg_hid_line dbg_hid
 #endif
 
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index 0536f29..dbb5c8c 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -16,6 +16,7 @@
  */
 
 #include <linux/hid.h>
+#include <linux/types.h>
 
 struct hidraw_report_descriptor {
 	__u32 size;
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index fce47c0..adcb3dc 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -1,14 +1,41 @@
 #ifndef _LINUX_I2C_ALGO_PCA_H
 #define _LINUX_I2C_ALGO_PCA_H
 
+/* Clock speeds for the bus */
+#define I2C_PCA_CON_330kHz	0x00
+#define I2C_PCA_CON_288kHz	0x01
+#define I2C_PCA_CON_217kHz	0x02
+#define I2C_PCA_CON_146kHz	0x03
+#define I2C_PCA_CON_88kHz	0x04
+#define I2C_PCA_CON_59kHz	0x05
+#define I2C_PCA_CON_44kHz	0x06
+#define I2C_PCA_CON_36kHz	0x07
+
+/* PCA9564 registers */
+#define I2C_PCA_STA		0x00 /* STATUS  Read Only  */
+#define I2C_PCA_TO		0x00 /* TIMEOUT Write Only */
+#define I2C_PCA_DAT		0x01 /* DATA    Read/Write */
+#define I2C_PCA_ADR		0x02 /* OWN ADR Read/Write */
+#define I2C_PCA_CON		0x03 /* CONTROL Read/Write */
+
+#define I2C_PCA_CON_AA		0x80 /* Assert Acknowledge */
+#define I2C_PCA_CON_ENSIO	0x40 /* Enable */
+#define I2C_PCA_CON_STA		0x20 /* Start */
+#define I2C_PCA_CON_STO		0x10 /* Stop */
+#define I2C_PCA_CON_SI		0x08 /* Serial Interrupt */
+#define I2C_PCA_CON_CR		0x07 /* Clock Rate (MASK) */
+
 struct i2c_algo_pca_data {
-	int  (*get_own)			(struct i2c_algo_pca_data *adap); /* Obtain own address */
-	int  (*get_clock)		(struct i2c_algo_pca_data *adap);
-	void (*write_byte)		(struct i2c_algo_pca_data *adap, int reg, int val);
-	int  (*read_byte)		(struct i2c_algo_pca_data *adap, int reg);
-	int  (*wait_for_interrupt)	(struct i2c_algo_pca_data *adap);
+	void 				*data;	/* private low level data */
+	void (*write_byte)		(void *data, int reg, int val);
+	int  (*read_byte)		(void *data, int reg);
+	int  (*wait_for_completion)	(void *data);
+	void (*reset_chip)		(void *data);
+	/* i2c_clock values are defined in linux/i2c-algo-pca.h */
+	unsigned int			i2c_clock;
 };
 
 int i2c_pca_add_bus(struct i2c_adapter *);
+int i2c_pca_add_numbered_bus(struct i2c_adapter *);
 
 #endif /* _LINUX_I2C_ALGO_PCA_H */
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
new file mode 100644
index 0000000..3d19187
--- /dev/null
+++ b/include/linux/i2c-pca-platform.h
@@ -0,0 +1,12 @@
+#ifndef I2C_PCA9564_PLATFORM_H
+#define I2C_PCA9564_PLATFORM_H
+
+struct i2c_pca9564_pf_platform_data {
+	int gpio;		/* pin to reset chip. driver will work when
+				 * not supplied (negative value), but it
+				 * cannot exit some error conditions then */
+	int i2c_clock_speed;	/* values are defined in linux/i2c-algo-pca.h */
+	int timeout;		/* timeout = this value * 10us */
+};
+
+#endif /* I2C_PCA9564_PLATFORM_H */
diff --git a/include/linux/lock_dlm_plock.h b/include/linux/lock_dlm_plock.h
deleted file mode 100644
index fc34151..0000000
--- a/include/linux/lock_dlm_plock.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2005 Red Hat, Inc.  All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
- */
-
-#ifndef __LOCK_DLM_PLOCK_DOT_H__
-#define __LOCK_DLM_PLOCK_DOT_H__
-
-#define GDLM_PLOCK_MISC_NAME		"lock_dlm_plock"
-
-#define GDLM_PLOCK_VERSION_MAJOR	1
-#define GDLM_PLOCK_VERSION_MINOR	1
-#define GDLM_PLOCK_VERSION_PATCH	0
-
-enum {
-	GDLM_PLOCK_OP_LOCK = 1,
-	GDLM_PLOCK_OP_UNLOCK,
-	GDLM_PLOCK_OP_GET,
-};
-
-struct gdlm_plock_info {
-	__u32 version[3];
-	__u8 optype;
-	__u8 ex;
-	__u8 wait;
-	__u8 pad;
-	__u32 pid;
-	__s32 nodeid;
-	__s32 rv;
-	__u32 fsid;
-	__u64 number;
-	__u64 start;
-	__u64 end;
-	__u64 owner;
-};
-
-#endif
-
diff --git a/include/linux/udf_fs.h b/include/linux/udf_fs.h
deleted file mode 100644
index aa88654..0000000
--- a/include/linux/udf_fs.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * udf_fs.h
- *
- * PURPOSE
- *  Included by fs/filesystems.c
- *
- * DESCRIPTION
- *  OSTA-UDF(tm) = Optical Storage Technology Association
- *  Universal Disk Format.
- *
- *  This code is based on version 2.50 of the UDF specification,
- *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
- *    http://www.osta.org/ *    http://www.ecma.ch/
- *    http://www.iso.org/
- *
- * COPYRIGHT
- *	This file is distributed under the terms of the GNU General Public
- *	License (GPL). Copies of the GPL can be obtained from:
- *		ftp://prep.ai.mit.edu/pub/gnu/GPL
- *	Each contributing author retains all rights to their own work.
- *
- *  (C) 1999-2004 Ben Fennema
- *  (C) 1999-2000 Stelias Computing Inc
- *
- * HISTORY
- *
- */
-
-#ifndef _UDF_FS_H
-#define _UDF_FS_H 1
-
-#define UDF_PREALLOCATE
-#define UDF_DEFAULT_PREALLOC_BLOCKS	8
-
-#undef UDFFS_DEBUG
-
-#ifdef UDFFS_DEBUG
-#define udf_debug(f, a...) \
-	do { \
-		printk (KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
-			__FILE__, __LINE__, __FUNCTION__); \
-		printk (f, ##a); \
-	} while (0)
-#else
-#define udf_debug(f, a...) /**/
-#endif
-
-#define udf_info(f, a...) \
-		printk (KERN_INFO "UDF-fs INFO " f, ##a);
-
-#endif /* _UDF_FS_H */
diff --git a/include/linux/udf_fs_i.h b/include/linux/udf_fs_i.h
index ffaf056..3536965 100644
--- a/include/linux/udf_fs_i.h
+++ b/include/linux/udf_fs_i.h
@@ -9,41 +9,10 @@
  *		ftp://prep.ai.mit.edu/pub/gnu/GPL
  *	Each contributing author retains all rights to their own work.
  */
-
 #ifndef _UDF_FS_I_H
 #define _UDF_FS_I_H 1
 
-#ifdef __KERNEL__
-
-struct udf_inode_info
-{
-	struct timespec		i_crtime;
-	/* Physical address of inode */
-	kernel_lb_addr		i_location;
-	__u64			i_unique;
-	__u32			i_lenEAttr;
-	__u32			i_lenAlloc;
-	__u64			i_lenExtents;
-	__u32			i_next_alloc_block;
-	__u32			i_next_alloc_goal;
-	unsigned		i_alloc_type : 3;
-	unsigned		i_efe : 1;
-	unsigned		i_use : 1;
-	unsigned		i_strat4096 : 1;
-	unsigned		reserved : 26;
-	union
-	{
-		short_ad	*i_sad;
-		long_ad		*i_lad;
-		__u8		*i_data;
-	} i_ext;
-	struct inode vfs_inode;
-};
-
-#endif
-
 /* exported IOCTLs, we have 'l', 0x40-0x7f */
-
 #define UDF_GETEASIZE   _IOR('l', 0x40, int)
 #define UDF_GETEABLOCK  _IOR('l', 0x41, void *)
 #define UDF_GETVOLIDENT _IOR('l', 0x42, void *)
diff --git a/include/linux/udf_fs_sb.h b/include/linux/udf_fs_sb.h
deleted file mode 100644
index 9bc4735..0000000
--- a/include/linux/udf_fs_sb.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * udf_fs_sb.h
- * 
- * This include file is for the Linux kernel/module.
- *
- * COPYRIGHT
- *	This file is distributed under the terms of the GNU General Public
- *	License (GPL). Copies of the GPL can be obtained from:
- *		ftp://prep.ai.mit.edu/pub/gnu/GPL
- *	Each contributing author retains all rights to their own work.
- */
-
-#ifndef _UDF_FS_SB_H
-#define _UDF_FS_SB_H 1
-
-#include <linux/mutex.h>
-
-#pragma pack(1)
-
-#define UDF_MAX_BLOCK_LOADED	8
-
-#define UDF_TYPE1_MAP15			0x1511U
-#define UDF_VIRTUAL_MAP15		0x1512U
-#define UDF_VIRTUAL_MAP20		0x2012U
-#define UDF_SPARABLE_MAP15		0x1522U
-
-struct udf_sparing_data
-{
-	__u16	s_packet_len;
-	struct buffer_head *s_spar_map[4];
-};
-
-struct udf_virtual_data
-{
-	__u32	s_num_entries;
-	__u16	s_start_offset;
-};
-
-struct udf_bitmap
-{
-	__u32			s_extLength;
-	__u32			s_extPosition;
-	__u16			s_nr_groups;
-	struct buffer_head 	**s_block_bitmap;
-};
-
-struct udf_part_map
-{
-	union
-	{
-		struct udf_bitmap	*s_bitmap;
-		struct inode		*s_table;
-	} s_uspace;
-	union
-	{
-		struct udf_bitmap	*s_bitmap;
-		struct inode		*s_table;
-	} s_fspace;
-	__u32	s_partition_root;
-	__u32	s_partition_len;
-	__u16	s_partition_type;
-	__u16	s_partition_num;
-	union
-	{
-		struct udf_sparing_data s_sparing;
-		struct udf_virtual_data s_virtual;
-	} s_type_specific;
-	__u32	(*s_partition_func)(struct super_block *, __u32, __u16, __u32);
-	__u16	s_volumeseqnum;
-	__u16	s_partition_flags;
-};
-
-#pragma pack()
-
-struct udf_sb_info
-{
-	struct udf_part_map	*s_partmaps;
-	__u8			s_volume_ident[32];
-
-	/* Overall info */
-	__u16			s_partitions;
-	__u16			s_partition;
-
-	/* Sector headers */
-	__s32			s_session;
-	__u32			s_anchor[4];
-	__u32			s_last_block;
-
-	struct buffer_head	*s_lvid_bh;
-
-	/* Default permissions */
-	mode_t			s_umask;
-	gid_t			s_gid;
-	uid_t			s_uid;
-
-	/* Root Info */
-	struct timespec		s_record_time;
-
-	/* Fileset Info */
-	__u16			s_serial_number;
-
-	/* highest UDF revision we have recorded to this media */
-	__u16			s_udfrev;
-
-	/* Miscellaneous flags */
-	__u32			s_flags;
-
-	/* Encoding info */
-	struct nls_table	*s_nls_map;
-
-	/* VAT inode */
-	struct inode		*s_vat_inode;
-
-	struct mutex		s_alloc_mutex;
-};
-
-#endif /* _UDF_FS_SB_H */
diff --git a/kernel/sched.c b/kernel/sched.c
index 57ba7ea..0014b03 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7035,6 +7035,7 @@
 /**
  * sched_domain_node_span - get a cpumask for a node's sched_domain
  * @node: node whose cpumask we're constructing
+ * @span: resulting cpumask
  *
  * Given a node, construct a good cpumask for its sched_domain to span. It
  * should be one that prevents unnecessary balancing, but also spreads tasks
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 1d69f66..95a8ef4 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -312,6 +312,7 @@
 		if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
 			continue;
 
+		rcu_read_lock();
 		list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
 			if (atomic_dec_and_test(&node->ae.used)) {
 				/* Recently Unused */
@@ -319,11 +320,13 @@
 				avc_cache_stats_incr(reclaims);
 				ecx++;
 				if (ecx >= AVC_CACHE_RECLAIM) {
+					rcu_read_unlock();
 					spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
 					goto out;
 				}
 			}
 		}
+		rcu_read_unlock();
 		spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
 	}
 out:
@@ -821,8 +824,14 @@
 
 	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
 		spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
+		/*
+		 * With preemptable RCU, the outer spinlock does not
+		 * prevent RCU grace periods from ending.
+		 */
+		rcu_read_lock();
 		list_for_each_entry(node, &avc_cache.slots[i], list)
 			avc_node_delete(node);
+		rcu_read_unlock();
 		spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
 	}
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 38fbb16..308e2cf 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -755,9 +755,18 @@
 	int set_context =	(oldsbsec->flags & CONTEXT_MNT);
 	int set_rootcontext =	(oldsbsec->flags & ROOTCONTEXT_MNT);
 
-	/* we can't error, we can't save the info, this shouldn't get called
-	 * this early in the boot process. */
-	BUG_ON(!ss_initialized);
+	/*
+	 * if the parent was able to be mounted it clearly had no special lsm
+	 * mount options.  thus we can safely put this sb on the list and deal
+	 * with it later
+	 */
+	if (!ss_initialized) {
+		spin_lock(&sb_security_lock);
+		if (list_empty(&newsbsec->list))
+			list_add(&newsbsec->list, &superblock_security_head);
+		spin_unlock(&sb_security_lock);
+		return;
+	}
 
 	/* how can we clone if the old one wasn't set up?? */
 	BUG_ON(!oldsbsec->initialized);
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index c658b84..b4e14bc 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -239,11 +239,13 @@
 {
 	struct sel_netif *netif;
 
+	rcu_read_lock();
 	spin_lock_bh(&sel_netif_lock);
 	netif = sel_netif_find(ifindex);
 	if (netif)
 		sel_netif_destroy(netif);
 	spin_unlock_bh(&sel_netif_lock);
+	rcu_read_unlock();
 }
 
 /**